1
0
Fork 0
mirror of synced 2025-09-23 12:18:44 +00:00

apply suggestions, remove mpc crate

This commit is contained in:
dark64 2021-12-02 18:38:18 +01:00
parent bd34950f3f
commit 7976774718
16 changed files with 29 additions and 1277 deletions

32
Cargo.lock generated
View file

@ -1513,6 +1513,22 @@ dependencies = [
"sha-1",
]
[[package]]
name = "phase2"
version = "0.2.2"
source = "git+https://github.com/Zokrates/phase2#dbdc1def651ebcef459888e813880e58fcc16f3f"
dependencies = [
"bellman_ce",
"blake2 0.9.2",
"blake2-rfc",
"byteorder",
"crossbeam",
"getrandom 0.2.3",
"num_cpus",
"pairing_ce",
"rand 0.4.6",
]
[[package]]
name = "pin-project-lite"
version = "0.2.7"
@ -2484,6 +2500,7 @@ dependencies = [
"hex 0.3.2",
"lazy_static",
"log",
"phase2",
"rand 0.4.6",
"regex 0.2.11",
"rust-crypto",
@ -2493,7 +2510,6 @@ dependencies = [
"zokrates_core",
"zokrates_field",
"zokrates_fs_resolver",
"zokrates_mpc",
]
[[package]]
@ -2603,20 +2619,6 @@ dependencies = [
"zokrates_common",
]
[[package]]
name = "zokrates_mpc"
version = "0.1.0"
dependencies = [
"bellman_ce",
"blake2-rfc",
"byteorder",
"crossbeam",
"getrandom 0.2.3",
"num_cpus",
"pairing_ce",
"rand 0.4.6",
]
[[package]]
name = "zokrates_parser"
version = "0.2.4"

View file

@ -7,7 +7,6 @@ members = [
"zokrates_fs_resolver",
"zokrates_stdlib",
"zokrates_embed",
"zokrates_mpc",
"zokrates_abi",
"zokrates_test",
"zokrates_core_test",

View file

@ -36,8 +36,9 @@ Initializing MPC...
Writing initial parameters to `mpc.params`
```
Using the `-r` flag we pass a path of the file that contains parameters for our circuit with a depth of `2^n` (`phase1radix2m{n}`).
The paramaters with various circuit depths can be computed using [phase2-bn254](https://github.com/kobigurk/phase2-bn254) utility by picking the latest response from [Perpetual Powers of Tau](https://github.com/weijiekoh/perpetualpowersoftau) and following procedures explained in the mentioned repositories.
Using the `-r` flag, we pass a path to the file that contains the parameters for our circuit with depth `2^n` (`phase1radix2m{n}`).
The parameters for various circuit depths can be computed using [phase2-bn254](https://github.com/kobigurk/phase2-bn254) utility
by picking the latest response from [Perpetual Powers of Tau](https://github.com/weijiekoh/perpetualpowersoftau) and following the instructions in the mentioned repositories.
## Making a contribution

View file

@ -20,7 +20,6 @@ bincode = "0.8.0"
regex = "0.2"
zokrates_field = { version = "0.4", path = "../zokrates_field", default-features = false }
zokrates_abi = { version = "0.1", path = "../zokrates_abi" }
zokrates_mpc = { version = "0.1", path = "../zokrates_mpc" }
zokrates_core = { version = "0.6", path = "../zokrates_core", default-features = false }
zokrates_fs_resolver = { version = "0.5", path = "../zokrates_fs_resolver"}
serde_json = "1.0"
@ -31,6 +30,7 @@ rust-crypto = "0.2.36"
rand = "0.4"
hex = "0.3.1"
blake2 = "0.8.1"
phase2 = { git = "https://github.com/Zokrates/phase2" }
[dev-dependencies]
glob = "0.2.11"

View file

@ -1,10 +1,10 @@
use crate::constants::MPC_DEFAULT_PATH;
use clap::{App, Arg, ArgMatches, SubCommand};
use phase2::MPCParameters;
use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use zokrates_field::{BellmanFieldExtensions, Bn128Field};
use zokrates_mpc::groth16::parameters::MPCParameters;
pub fn subcommand() -> App<'static, 'static> {
SubCommand::with_name("beacon")

View file

@ -1,10 +1,10 @@
use crate::constants::MPC_DEFAULT_PATH;
use clap::{App, Arg, ArgMatches, SubCommand};
use phase2::MPCParameters;
use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use zokrates_field::{BellmanFieldExtensions, Bn128Field};
use zokrates_mpc::groth16::parameters::MPCParameters;
pub fn subcommand() -> App<'static, 'static> {
SubCommand::with_name("contribute")

View file

@ -1,11 +1,11 @@
use crate::constants::{MPC_DEFAULT_PATH, PROVING_KEY_DEFAULT_PATH, VERIFICATION_KEY_DEFAULT_PATH};
use clap::{App, Arg, ArgMatches, SubCommand};
use phase2::MPCParameters;
use std::fs::File;
use std::io::{BufReader, Write};
use std::path::Path;
use zokrates_core::proof_system::bellman::groth16::serialization::parameters_to_verification_key;
use zokrates_field::{BellmanFieldExtensions, Bn128Field};
use zokrates_mpc::groth16::parameters::MPCParameters;
pub fn subcommand() -> App<'static, 'static> {
SubCommand::with_name("export")

View file

@ -1,5 +1,6 @@
use crate::constants::{FLATTENED_CODE_DEFAULT_PATH, MPC_DEFAULT_PATH};
use clap::{App, Arg, ArgMatches, SubCommand};
use phase2::MPCParameters;
use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::path::Path;
@ -7,7 +8,6 @@ use zokrates_core::ir;
use zokrates_core::ir::ProgEnum;
use zokrates_core::proof_system::bellman::Computation;
use zokrates_field::Bn128Field;
use zokrates_mpc::groth16::parameters::MPCParameters;
pub fn subcommand() -> App<'static, 'static> {
SubCommand::with_name("init")
@ -67,7 +67,7 @@ fn cli_mpc_init(ir_prog: ir::Prog<Bn128Field>, sub_matches: &ArgMatches) -> Resu
let mut radix_reader = BufReader::with_capacity(1024 * 1024, radix_file);
let circuit = Computation::without_witness(ir_prog);
let params = MPCParameters::new(circuit, true, &mut radix_reader).unwrap();
let params = MPCParameters::new(circuit, &mut radix_reader).unwrap();
let output_path = Path::new(sub_matches.value_of("output").unwrap());
let output_file = File::create(&output_path)

View file

@ -1,5 +1,6 @@
use crate::constants::{FLATTENED_CODE_DEFAULT_PATH, MPC_DEFAULT_PATH};
use clap::{App, Arg, ArgMatches, SubCommand};
use phase2::MPCParameters;
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
@ -7,7 +8,6 @@ use zokrates_core::ir;
use zokrates_core::ir::ProgEnum;
use zokrates_core::proof_system::bellman::Computation;
use zokrates_field::Bn128Field;
use zokrates_mpc::groth16::parameters::MPCParameters;
pub fn subcommand() -> App<'static, 'static> {
SubCommand::with_name("verify")
@ -36,7 +36,7 @@ pub fn subcommand() -> App<'static, 'static> {
Arg::with_name("radix-path")
.short("r")
.long("radix-dir")
.help("Path to the radix file containing parameters for 2^n circuit depth (phase1radix2m{n})")
.help("Path to the radix file containing parameters for a circuit depth of 2^n (phase1radix2m{n})")
.value_name("PATH")
.takes_value(true)
.required(true),
@ -77,7 +77,7 @@ fn cli_mpc_verify(ir_prog: ir::Prog<Bn128Field>, sub_matches: &ArgMatches) -> Re
let circuit = Computation::without_witness(ir_prog);
let result = params
.verify(circuit, true, &mut radix_reader)
.verify(circuit, &mut radix_reader)
.map_err(|_| "Verification failed".to_string())?;
let contribution_count = result.len();

View file

@ -1,17 +0,0 @@
[package]
name = "zokrates_mpc"
version = "0.1.0"
authors = ["dark64 <darem966@gmail.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
num_cpus = "1"
rand = "0.4"
byteorder = "1"
crossbeam = "0.7.3"
blake2-rfc = "0.2"
bellman_ce = { version = "^0.3", default-features = false }
pairing_ce = { version = "^0.21" }
getrandom = { version = "0.2", features = ["js"] }

View file

@ -1,118 +0,0 @@
extern crate bellman_ce;
use bellman_ce::pairing::{CurveAffine, EncodedPoint, Engine};
use std::io::{self, Read, Write};
/// This needs to be destroyed by at least one participant
/// for the final parameters to be secure.
pub struct PrivateKey<E: Engine> {
pub delta: E::Fr,
}
/// This allows others to verify that you contributed. The hash produced
/// by `MPCParameters::contribute` is just a BLAKE2b hash of this object.
#[derive(Clone)]
pub struct PublicKey<E: Engine> {
/// This is the delta (in G1) after the transformation, kept so that we
/// can check correctness of the public keys without having the entire
/// interstitial parameters for each contribution.
pub delta_after: E::G1Affine,
/// Random element chosen by the contributor.
pub s: E::G1Affine,
/// That element, taken to the contributor's secret delta.
pub s_delta: E::G1Affine,
/// r is H(last_pubkey | s | s_delta), r_delta proves knowledge of delta
pub r_delta: E::G2Affine,
/// Hash of the transcript (used for mapping to r)
pub transcript: [u8; 64],
}
impl<E: Engine> PublicKey<E> {
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(self.delta_after.into_uncompressed().as_ref())?;
writer.write_all(self.s.into_uncompressed().as_ref())?;
writer.write_all(self.s_delta.into_uncompressed().as_ref())?;
writer.write_all(self.r_delta.into_uncompressed().as_ref())?;
writer.write_all(&self.transcript)?;
Ok(())
}
pub fn read<R: Read>(mut reader: R) -> io::Result<PublicKey<E>> {
let mut g1_repr = <<E as Engine>::G1Affine as CurveAffine>::Uncompressed::empty();
let mut g2_repr = <<E as Engine>::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(g1_repr.as_mut())?;
let delta_after = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if delta_after.is_zero() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
));
}
reader.read_exact(g1_repr.as_mut())?;
let s = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if s.is_zero() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
));
}
reader.read_exact(g1_repr.as_mut())?;
let s_delta = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if s_delta.is_zero() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
));
}
reader.read_exact(g2_repr.as_mut())?;
let r_delta = g2_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if r_delta.is_zero() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
));
}
let mut transcript = [0u8; 64];
reader.read_exact(&mut transcript)?;
Ok(PublicKey {
delta_after,
s,
s_delta,
r_delta,
transcript,
})
}
}
impl<E: Engine> PartialEq for PublicKey<E> {
fn eq(&self, other: &PublicKey<E>) -> bool {
self.delta_after == other.delta_after
&& self.s == other.s
&& self.s_delta == other.s_delta
&& self.r_delta == other.r_delta
&& self.transcript[..] == other.transcript[..]
}
}

View file

@ -1,120 +0,0 @@
extern crate bellman_ce;
use bellman_ce::pairing::Engine;
use bellman_ce::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
/// This is our assembly structure that we'll use to synthesize the
/// circuit into a QAP.
pub struct KeypairAssembly<E: Engine> {
pub num_inputs: usize,
pub num_aux: usize,
pub num_constraints: usize,
pub at_inputs: Vec<Vec<(E::Fr, usize)>>,
pub bt_inputs: Vec<Vec<(E::Fr, usize)>>,
pub ct_inputs: Vec<Vec<(E::Fr, usize)>>,
pub at_aux: Vec<Vec<(E::Fr, usize)>>,
pub bt_aux: Vec<Vec<(E::Fr, usize)>>,
pub ct_aux: Vec<Vec<(E::Fr, usize)>>,
}
impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
type Root = Self;
fn alloc<F, A, AR>(&mut self, _: A, _: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_aux;
self.num_aux += 1;
self.at_aux.push(vec![]);
self.bt_aux.push(vec![]);
self.ct_aux.push(vec![]);
Ok(Variable::new_unchecked(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(&mut self, _: A, _: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_inputs;
self.num_inputs += 1;
self.at_inputs.push(vec![]);
self.bt_inputs.push(vec![]);
self.ct_inputs.push(vec![]);
Ok(Variable::new_unchecked(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
{
fn eval<E: Engine>(
l: LinearCombination<E>,
inputs: &mut [Vec<(E::Fr, usize)>],
aux: &mut [Vec<(E::Fr, usize)>],
this_constraint: usize,
) {
for &(var, coeff) in l.as_ref() {
match var.get_unchecked() {
Index::Input(id) => inputs[id].push((coeff, this_constraint)),
Index::Aux(id) => aux[id].push((coeff, this_constraint)),
}
}
}
eval(
a(LinearCombination::zero()),
&mut self.at_inputs,
&mut self.at_aux,
self.num_constraints,
);
eval(
b(LinearCombination::zero()),
&mut self.bt_inputs,
&mut self.bt_aux,
self.num_constraints,
);
eval(
c(LinearCombination::zero()),
&mut self.ct_inputs,
&mut self.ct_aux,
self.num_constraints,
);
self.num_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}

View file

@ -1,3 +0,0 @@
mod keypair;
mod keypair_assembly;
pub mod parameters;

View file

@ -1,937 +0,0 @@
#![allow(clippy::redundant_clone)]
extern crate bellman_ce;
extern crate byteorder;
extern crate crossbeam;
extern crate num_cpus;
extern crate rand;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use std::{
io::{self, Read, Write},
sync::Arc,
};
use bellman_ce::{
groth16::{Parameters, VerifyingKey},
pairing::{
ff::{Field, PrimeField},
CurveAffine, CurveProjective, EncodedPoint, Engine, Wnaf,
},
worker::Worker,
Circuit, ConstraintSystem, Index, SynthesisError, Variable,
};
use rand::{ChaChaRng, Rand, Rng, SeedableRng};
use super::keypair::*;
use super::keypair_assembly::*;
use crate::hash_writer::HashWriter;
/// MPC parameters are just like bellman `Parameters` except, when serialized,
/// they contain a transcript of contributions at the end, which can be verified.
#[derive(Clone)]
pub struct MPCParameters<E: Engine> {
params: Parameters<E>,
cs_hash: [u8; 64],
contributions: Vec<PublicKey<E>>,
}
impl<E: Engine> PartialEq for MPCParameters<E> {
fn eq(&self, other: &MPCParameters<E>) -> bool {
self.params == other.params
&& self.cs_hash[..] == other.cs_hash[..]
&& self.contributions == other.contributions
}
}
impl<E: Engine> MPCParameters<E> {
/// Create new Groth16 parameters for a given circuit. The resulting parameters are unsafe to
/// use until there are contributions (see `contribute()`).
#[cfg(not(target_arch = "wasm32"))]
pub fn new<C, R: Read>(
circuit: C,
should_filter_points_at_infinity: bool,
phase1_radix: &mut R,
) -> Result<MPCParameters<E>, SynthesisError>
where
C: Circuit<E>,
{
let mut assembly = KeypairAssembly {
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
at_inputs: vec![],
bt_inputs: vec![],
ct_inputs: vec![],
at_aux: vec![],
bt_aux: vec![],
ct_aux: vec![],
};
// Allocate the "one" input variable
assembly.alloc_input(|| "", || Ok(E::Fr::one()))?;
// Synthesize the circuit.
circuit.synthesize(&mut assembly)?;
// Input constraints to ensure full density of IC query
// x * 0 = 0
for i in 0..assembly.num_inputs {
assembly.enforce(
|| "",
|lc| lc + Variable::new_unchecked(Index::Input(i)),
|lc| lc,
|lc| lc,
);
}
// Compute the size of our evaluation domain
let mut m = 1;
let mut exp = 0;
while m < assembly.num_constraints {
m *= 2;
exp += 1;
// Powers of Tau ceremony can't support more than 2^28
if exp > 28 {
return Err(SynthesisError::PolynomialDegreeTooLarge);
}
}
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut repr = <<E as Engine>::G1Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
repr.into_affine_unchecked()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| {
if e.is_zero() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut repr = <<E as Engine>::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
repr.into_affine_unchecked()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| {
if e.is_zero() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let alpha = read_g1(phase1_radix)?;
let beta_g1 = read_g1(phase1_radix)?;
let beta_g2 = read_g2(phase1_radix)?;
let mut coeffs_g1 = Vec::with_capacity(m);
for _ in 0..m {
coeffs_g1.push(read_g1(phase1_radix)?);
}
let mut coeffs_g2 = Vec::with_capacity(m);
for _ in 0..m {
coeffs_g2.push(read_g2(phase1_radix)?);
}
let mut alpha_coeffs_g1 = Vec::with_capacity(m);
for _ in 0..m {
alpha_coeffs_g1.push(read_g1(phase1_radix)?);
}
let mut beta_coeffs_g1 = Vec::with_capacity(m);
for _ in 0..m {
beta_coeffs_g1.push(read_g1(phase1_radix)?);
}
// These are `Arc` so that later it'll be easier
// to use multiexp during QAP evaluation (which requires a futures-based API)
let coeffs_g1 = Arc::new(coeffs_g1);
let coeffs_g2 = Arc::new(coeffs_g2);
let alpha_coeffs_g1 = Arc::new(alpha_coeffs_g1);
let beta_coeffs_g1 = Arc::new(beta_coeffs_g1);
let mut h = Vec::with_capacity(m - 1);
for _ in 0..m - 1 {
h.push(read_g1(phase1_radix)?);
}
let mut ic = vec![E::G1::zero(); assembly.num_inputs];
let mut l = vec![E::G1::zero(); assembly.num_aux];
let mut a_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
let mut b_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
let mut b_g2 = vec![E::G2::zero(); assembly.num_inputs + assembly.num_aux];
#[allow(clippy::too_many_arguments)]
fn eval<E: Engine>(
// Lagrange coefficients for tau
coeffs_g1: Arc<Vec<E::G1Affine>>,
coeffs_g2: Arc<Vec<E::G2Affine>>,
alpha_coeffs_g1: Arc<Vec<E::G1Affine>>,
beta_coeffs_g1: Arc<Vec<E::G1Affine>>,
// QAP polynomials
at: &[Vec<(E::Fr, usize)>],
bt: &[Vec<(E::Fr, usize)>],
ct: &[Vec<(E::Fr, usize)>],
// Resulting evaluated QAP polynomials
a_g1: &mut [E::G1],
b_g1: &mut [E::G1],
b_g2: &mut [E::G2],
ext: &mut [E::G1],
// Worker
worker: &Worker,
) {
// Sanity check
assert_eq!(a_g1.len(), at.len());
assert_eq!(a_g1.len(), bt.len());
assert_eq!(a_g1.len(), ct.len());
assert_eq!(a_g1.len(), b_g1.len());
assert_eq!(a_g1.len(), b_g2.len());
assert_eq!(a_g1.len(), ext.len());
// Evaluate polynomials in multiple threads
worker.scope(a_g1.len(), |scope, chunk| {
for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in a_g1
.chunks_mut(chunk)
.zip(b_g1.chunks_mut(chunk))
.zip(b_g2.chunks_mut(chunk))
.zip(ext.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(bt.chunks(chunk))
.zip(ct.chunks(chunk))
{
let coeffs_g1 = coeffs_g1.clone();
let coeffs_g2 = coeffs_g2.clone();
let alpha_coeffs_g1 = alpha_coeffs_g1.clone();
let beta_coeffs_g1 = beta_coeffs_g1.clone();
scope.spawn(move |_| {
for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in a_g1
.iter_mut()
.zip(b_g1.iter_mut())
.zip(b_g2.iter_mut())
.zip(ext.iter_mut())
.zip(at.iter())
.zip(bt.iter())
.zip(ct.iter())
{
for &(coeff, lag) in at {
a_g1.add_assign(&coeffs_g1[lag].mul(coeff));
ext.add_assign(&beta_coeffs_g1[lag].mul(coeff));
}
for &(coeff, lag) in bt {
b_g1.add_assign(&coeffs_g1[lag].mul(coeff));
b_g2.add_assign(&coeffs_g2[lag].mul(coeff));
ext.add_assign(&alpha_coeffs_g1[lag].mul(coeff));
}
for &(coeff, lag) in ct {
ext.add_assign(&coeffs_g1[lag].mul(coeff));
}
}
// Batch normalize
E::G1::batch_normalization(a_g1);
E::G1::batch_normalization(b_g1);
E::G2::batch_normalization(b_g2);
E::G1::batch_normalization(ext);
});
}
});
}
let worker = Worker::new();
// Evaluate for inputs.
eval::<E>(
coeffs_g1.clone(),
coeffs_g2.clone(),
alpha_coeffs_g1.clone(),
beta_coeffs_g1.clone(),
&assembly.at_inputs,
&assembly.bt_inputs,
&assembly.ct_inputs,
&mut a_g1[0..assembly.num_inputs],
&mut b_g1[0..assembly.num_inputs],
&mut b_g2[0..assembly.num_inputs],
&mut ic,
&worker,
);
// Evaluate for auxiliary variables.
eval::<E>(
coeffs_g1.clone(),
coeffs_g2.clone(),
alpha_coeffs_g1.clone(),
beta_coeffs_g1.clone(),
&assembly.at_aux,
&assembly.bt_aux,
&assembly.ct_aux,
&mut a_g1[assembly.num_inputs..],
&mut b_g1[assembly.num_inputs..],
&mut b_g2[assembly.num_inputs..],
&mut l,
&worker,
);
// Don't allow any elements be unconstrained, so that
// the L query is always fully dense.
for e in l.iter() {
if e.is_zero() {
return Err(SynthesisError::UnconstrainedVariable);
}
}
let vk = VerifyingKey {
alpha_g1: alpha,
beta_g1,
beta_g2,
gamma_g2: E::G2Affine::one(),
delta_g1: E::G1Affine::one(),
delta_g2: E::G2Affine::one(),
ic: ic.into_iter().map(|e| e.into_affine()).collect(),
};
let params = if should_filter_points_at_infinity {
Parameters {
vk,
h: Arc::new(h),
l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
// Filter points at infinity away from A/B queries
a: Arc::new(
a_g1.into_iter()
.filter(|e| !e.is_zero())
.map(|e| e.into_affine())
.collect(),
),
b_g1: Arc::new(
b_g1.into_iter()
.filter(|e| !e.is_zero())
.map(|e| e.into_affine())
.collect(),
),
b_g2: Arc::new(
b_g2.into_iter()
.filter(|e| !e.is_zero())
.map(|e| e.into_affine())
.collect(),
),
}
} else {
Parameters {
vk,
h: Arc::new(h),
l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
a: Arc::new(a_g1.into_iter().map(|e| e.into_affine()).collect()),
b_g1: Arc::new(b_g1.into_iter().map(|e| e.into_affine()).collect()),
b_g2: Arc::new(b_g2.into_iter().map(|e| e.into_affine()).collect()),
}
};
let h = {
let sink = io::sink();
let mut sink = HashWriter::new(sink);
params.write(&mut sink).unwrap();
sink.into_hash()
};
let mut cs_hash = [0; 64];
cs_hash.copy_from_slice(h.as_ref());
Ok(MPCParameters {
params,
cs_hash,
contributions: vec![],
})
}
// Generates random keypair
fn generate_keypair<R: Rng>(&self, rng: &mut R) -> (PublicKey<E>, PrivateKey<E>) {
// Sample random delta
let delta: E::Fr = rng.gen();
// Compute delta s-pair in G1
let s = E::G1::rand(rng).into_affine();
let s_delta = s.mul(delta).into_affine();
// H(cs_hash | <previous pubkeys> | s | s_delta)
let h = {
let sink = io::sink();
let mut sink = HashWriter::new(sink);
sink.write_all(&self.cs_hash[..]).unwrap();
for pubkey in &self.contributions {
pubkey.write(&mut sink).unwrap();
}
sink.write_all(s.into_uncompressed().as_ref()).unwrap();
sink.write_all(s_delta.into_uncompressed().as_ref())
.unwrap();
sink.into_hash()
};
// This avoids making a weird assumption about the hash into the group.
let mut transcript = [0; 64];
transcript.copy_from_slice(h.as_ref());
// Compute delta s-pair in G2
let r = hash_to_g2::<E>(h.as_ref()).into_affine();
let r_delta = r.mul(delta).into_affine();
(
PublicKey {
delta_after: self.params.vk.delta_g1.mul(delta).into_affine(),
s,
s_delta,
r_delta,
transcript,
},
PrivateKey { delta },
)
}
/// Contributes some randomness to the parameters. Only one
/// contributor needs to be honest for the parameters to be
/// secure.
///
/// This function returns a "hash" that is bound to the
/// contribution. Contributors can use this hash to make
/// sure their contribution is in the final parameters, by
/// checking to see if it appears in the output of
/// `MPCParameters::verify`.
pub fn contribute<R: Rng>(&mut self, rng: &mut R) -> [u8; 64] {
// Generate a keypair
let (pubkey, privkey) = self.generate_keypair(rng);
#[cfg(not(target_arch = "wasm32"))]
fn batch_exp<C: CurveAffine>(bases: &mut [C], coeff: C::Scalar) {
let coeff = coeff.into_repr();
let mut projective = vec![C::Projective::zero(); bases.len()];
let cpus = num_cpus::get();
let chunk_size = if bases.len() < cpus {
1
} else {
bases.len() / cpus
};
// Perform wNAF over multiple cores, placing results into `projective`.
crossbeam::scope(|scope| {
for (bases, projective) in bases
.chunks_mut(chunk_size)
.zip(projective.chunks_mut(chunk_size))
{
scope.spawn(move |_| {
let mut wnaf = Wnaf::new();
for (base, projective) in bases.iter_mut().zip(projective.iter_mut()) {
*projective = wnaf.base(base.into_projective(), 1).scalar(coeff);
}
});
}
})
.unwrap();
// Perform batch normalization
crossbeam::scope(|scope| {
for projective in projective.chunks_mut(chunk_size) {
scope.spawn(move |_| {
C::Projective::batch_normalization(projective);
});
}
})
.unwrap();
// Turn it all back into affine points
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
*affine = projective.into_affine();
}
}
#[cfg(target_arch = "wasm32")]
fn batch_exp<C: CurveAffine>(bases: &mut [C], coeff: C::Scalar) {
let coeff = coeff.into_repr();
let mut projective = vec![C::Projective::zero(); bases.len()];
// Perform wNAF, placing results into `projective`.
let mut wnaf = Wnaf::new();
for (base, projective) in bases.iter_mut().zip(projective.iter_mut()) {
*projective = wnaf.base(base.into_projective(), 1).scalar(coeff);
}
// Perform batch normalization
C::Projective::batch_normalization(&mut projective);
// Turn it all back into affine points
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
*affine = projective.into_affine();
}
}
let delta_inv = privkey.delta.inverse().expect("nonzero");
let mut l = (&self.params.l[..]).to_vec();
let mut h = (&self.params.h[..]).to_vec();
batch_exp(&mut l, delta_inv);
batch_exp(&mut h, delta_inv);
self.params.l = Arc::new(l);
self.params.h = Arc::new(h);
self.params.vk.delta_g1 = self.params.vk.delta_g1.mul(privkey.delta).into_affine();
self.params.vk.delta_g2 = self.params.vk.delta_g2.mul(privkey.delta).into_affine();
self.contributions.push(pubkey.clone());
// Calculate the hash of the public key and return it
{
let sink = io::sink();
let mut sink = HashWriter::new(sink);
pubkey.write(&mut sink).unwrap();
let h = sink.into_hash();
let mut response = [0u8; 64];
response.copy_from_slice(h.as_ref());
response
}
}
/// Verify the correctness of the parameters, given a circuit instance.
/// This will return all of the hashes that contributors obtained when
/// they ran `MPCParameters::contribute`.
/// For ensuring that contributions exist in the final parameters.
#[cfg(not(target_arch = "wasm32"))]
#[allow(clippy::result_unit_err)]
pub fn verify<C: Circuit<E>, R: Read>(
&self,
circuit: C,
should_filter_points_at_infinity: bool,
phase1_radix: &mut R,
) -> Result<Vec<[u8; 64]>, ()> {
let initial_params =
MPCParameters::new(circuit, should_filter_points_at_infinity, phase1_radix)
.map_err(|_| ())?;
// H/L will change, but should have same length
if initial_params.params.h.len() != self.params.h.len() {
return Err(());
}
if initial_params.params.l.len() != self.params.l.len() {
return Err(());
}
// A/B_G1/B_G2 doesn't change at all
if initial_params.params.a != self.params.a {
return Err(());
}
if initial_params.params.b_g1 != self.params.b_g1 {
return Err(());
}
if initial_params.params.b_g2 != self.params.b_g2 {
return Err(());
}
// alpha/beta/gamma don't change
if initial_params.params.vk.alpha_g1 != self.params.vk.alpha_g1 {
return Err(());
}
if initial_params.params.vk.beta_g1 != self.params.vk.beta_g1 {
return Err(());
}
if initial_params.params.vk.beta_g2 != self.params.vk.beta_g2 {
return Err(());
}
if initial_params.params.vk.gamma_g2 != self.params.vk.gamma_g2 {
return Err(());
}
// IC shouldn't change, as gamma doesn't change
if initial_params.params.vk.ic != self.params.vk.ic {
return Err(());
}
// cs_hash should be the same
if initial_params.cs_hash[..] != self.cs_hash[..] {
return Err(());
}
let sink = io::sink();
let mut sink = HashWriter::new(sink);
sink.write_all(&initial_params.cs_hash[..]).unwrap();
let mut current_delta = E::G1Affine::one();
let mut result = vec![];
for pubkey in &self.contributions {
let mut our_sink = sink.clone();
our_sink
.write_all(pubkey.s.into_uncompressed().as_ref())
.unwrap();
our_sink
.write_all(pubkey.s_delta.into_uncompressed().as_ref())
.unwrap();
pubkey.write(&mut sink).unwrap();
let h = our_sink.into_hash();
// The transcript must be consistent
if &pubkey.transcript[..] != h.as_ref() {
return Err(());
}
let r = hash_to_g2::<E>(h.as_ref()).into_affine();
// Check the signature of knowledge
if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) {
return Err(());
}
// Check the change from the old delta is consistent
if !same_ratio((current_delta, pubkey.delta_after), (r, pubkey.r_delta)) {
return Err(());
}
current_delta = pubkey.delta_after;
{
let sink = io::sink();
let mut sink = HashWriter::new(sink);
pubkey.write(&mut sink).unwrap();
let h = sink.into_hash();
let mut response = [0u8; 64];
response.copy_from_slice(h.as_ref());
result.push(response);
}
}
// Current parameters should have consistent delta in G1
if current_delta != self.params.vk.delta_g1 {
return Err(());
}
// Current parameters should have consistent delta in G2
if !same_ratio(
(E::G1Affine::one(), current_delta),
(E::G2Affine::one(), self.params.vk.delta_g2),
) {
return Err(());
}
// H and L queries should be updated with delta^-1
if !same_ratio(
merge_pairs(&initial_params.params.h, &self.params.h),
(self.params.vk.delta_g2, E::G2Affine::one()), // reversed for inverse
) {
return Err(());
}
if !same_ratio(
merge_pairs(&initial_params.params.l, &self.params.l),
(self.params.vk.delta_g2, E::G2Affine::one()), // reversed for inverse
) {
return Err(());
}
Ok(result)
}
/// Serialize these parameters. The serialized parameters
/// can be read by bellman as Groth16 `Parameters`.
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
self.params.write(&mut writer)?;
writer.write_all(&self.cs_hash)?;
writer.write_u32::<BigEndian>(self.contributions.len() as u32)?;
for pubkey in &self.contributions {
pubkey.write(&mut writer)?;
}
Ok(())
}
/// Deserialize these parameters. If `checked` is false, curve validity and
/// group order checks are not performed.
pub fn read<R: Read>(mut reader: R, checked: bool) -> io::Result<MPCParameters<E>> {
let params = Parameters::read(&mut reader, checked)?;
let mut cs_hash = [0u8; 64];
reader.read_exact(&mut cs_hash)?;
let contributions_len = reader.read_u32::<BigEndian>()? as usize;
let mut contributions = vec![];
for _ in 0..contributions_len {
contributions.push(PublicKey::read(&mut reader)?);
}
Ok(MPCParameters {
params,
cs_hash,
contributions,
})
}
/// Get the underlying Groth16 `Parameters`
pub fn get_params(&self) -> &Parameters<E> {
&self.params
}
}
/// Verify a contribution, given the old parameters and
/// the new parameters. Returns the hash of the contribution.
#[allow(clippy::result_unit_err)]
pub fn verify_contribution<E: Engine>(
before: &MPCParameters<E>,
after: &MPCParameters<E>,
) -> Result<[u8; 64], ()> {
// Transformation involves a single new object
if after.contributions.len() != (before.contributions.len() + 1) {
return Err(());
}
// None of the previous transformations should change
if before.contributions[..] != after.contributions[0..before.contributions.len()] {
return Err(());
}
// H/L will change, but should have same length
if before.params.h.len() != after.params.h.len() {
return Err(());
}
if before.params.l.len() != after.params.l.len() {
return Err(());
}
// A/B_G1/B_G2 doesn't change at all
if before.params.a != after.params.a {
return Err(());
}
if before.params.b_g1 != after.params.b_g1 {
return Err(());
}
if before.params.b_g2 != after.params.b_g2 {
return Err(());
}
// alpha/beta/gamma don't change
if before.params.vk.alpha_g1 != after.params.vk.alpha_g1 {
return Err(());
}
if before.params.vk.beta_g1 != after.params.vk.beta_g1 {
return Err(());
}
if before.params.vk.beta_g2 != after.params.vk.beta_g2 {
return Err(());
}
if before.params.vk.gamma_g2 != after.params.vk.gamma_g2 {
return Err(());
}
// IC shouldn't change, as gamma doesn't change
if before.params.vk.ic != after.params.vk.ic {
return Err(());
}
// cs_hash should be the same
if before.cs_hash[..] != after.cs_hash[..] {
return Err(());
}
let sink = io::sink();
let mut sink = HashWriter::new(sink);
sink.write_all(&before.cs_hash[..]).unwrap();
for pubkey in &before.contributions {
pubkey.write(&mut sink).unwrap();
}
let pubkey = after.contributions.last().unwrap();
sink.write_all(pubkey.s.into_uncompressed().as_ref())
.unwrap();
sink.write_all(pubkey.s_delta.into_uncompressed().as_ref())
.unwrap();
let h = sink.into_hash();
// The transcript must be consistent
if &pubkey.transcript[..] != h.as_ref() {
return Err(());
}
let r = hash_to_g2::<E>(h.as_ref()).into_affine();
// Check the signature of knowledge
if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) {
return Err(());
}
// Check the change from the old delta is consistent
if !same_ratio(
(before.params.vk.delta_g1, pubkey.delta_after),
(r, pubkey.r_delta),
) {
return Err(());
}
// Current parameters should have consistent delta in G1
if pubkey.delta_after != after.params.vk.delta_g1 {
return Err(());
}
// Current parameters should have consistent delta in G2
if !same_ratio(
(E::G1Affine::one(), pubkey.delta_after),
(E::G2Affine::one(), after.params.vk.delta_g2),
) {
return Err(());
}
// H and L queries should be updated with delta^-1
if !same_ratio(
merge_pairs(&before.params.h, &after.params.h),
(after.params.vk.delta_g2, before.params.vk.delta_g2), // reversed for inverse
) {
return Err(());
}
if !same_ratio(
merge_pairs(&before.params.l, &after.params.l),
(after.params.vk.delta_g2, before.params.vk.delta_g2), // reversed for inverse
) {
return Err(());
}
let sink = io::sink();
let mut sink = HashWriter::new(sink);
pubkey.write(&mut sink).unwrap();
let h = sink.into_hash();
let mut response = [0u8; 64];
response.copy_from_slice(h.as_ref());
Ok(response)
}
/// Checks if pairs have the same ratio.
fn same_ratio<G1: CurveAffine>(g1: (G1, G1), g2: (G1::Pair, G1::Pair)) -> bool {
if g1.0.is_zero() || g1.1.is_zero() || g2.0.is_zero() || g2.1.is_zero() {
return false;
}
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
}
#[cfg(not(target_arch = "wasm32"))]
fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G) {
use rand::thread_rng;
use std::sync::Mutex;
assert_eq!(v1.len(), v2.len());
let chunk = (v1.len() / num_cpus::get()) + 1;
let s = Arc::new(Mutex::new(G::Projective::zero()));
let sx = Arc::new(Mutex::new(G::Projective::zero()));
crossbeam::scope(|scope| {
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
let s = s.clone();
let sx = sx.clone();
scope.spawn(move |_| {
// We do not need to be overly cautious of the RNG
// used for this check.
let rng = &mut thread_rng();
let mut wnaf = Wnaf::new();
let mut local_s = G::Projective::zero();
let mut local_sx = G::Projective::zero();
for (v1, v2) in v1.iter().zip(v2.iter()) {
let rho = G::Scalar::rand(rng);
let mut wnaf = wnaf.scalar(rho.into_repr());
let v1 = wnaf.base(v1.into_projective());
let v2 = wnaf.base(v2.into_projective());
local_s.add_assign(&v1);
local_sx.add_assign(&v2);
}
s.lock().unwrap().add_assign(&local_s);
sx.lock().unwrap().add_assign(&local_sx);
});
}
})
.unwrap();
let s = s.lock().unwrap().into_affine();
let sx = sx.lock().unwrap().into_affine();
(s, sx)
}
#[cfg(target_arch = "wasm32")]
fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G) {
use std::mem::transmute;
assert_eq!(v1.len(), v2.len());
let mut wnaf = Wnaf::new();
let mut s = G::Projective::zero();
let mut sx = G::Projective::zero();
for (v1, v2) in v1.iter().zip(v2.iter()) {
// We do not need to be overly cautious of the RNG
// used for this check.
let mut seed = [0u8; 32];
getrandom::getrandom(&mut seed).expect("could not get random seed");
let seed: [u32; 8] = unsafe { transmute(seed) };
let mut rng = ChaChaRng::from_seed(&seed);
let rho = G::Scalar::rand(&mut rng);
let mut wnaf = wnaf.scalar(rho.into_repr());
let v1 = wnaf.base(v1.into_projective());
let v2 = wnaf.base(v2.into_projective());
s.add_assign(&v1);
sx.add_assign(&v2);
}
(s.into_affine(), sx.into_affine())
}
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
/// than 32 bytes. The input must be random.
fn hash_to_g2<E: Engine>(mut digest: &[u8]) -> E::G2 {
assert!(digest.len() >= 32);
let mut seed = Vec::with_capacity(8);
for _ in 0..8 {
seed.push(
digest
.read_u32::<BigEndian>()
.expect("assertion above guarantees this to work"),
);
}
ChaChaRng::from_seed(&seed).gen()
}

View file

@ -1,53 +0,0 @@
extern crate blake2_rfc;
use blake2_rfc::blake2b::Blake2b;
use std::io;
use std::io::Write;
/// Abstraction over a writer which hashes the data being written.
pub struct HashWriter<W: Write> {
writer: W,
hasher: Blake2b,
}
impl Clone for HashWriter<io::Sink> {
fn clone(&self) -> HashWriter<io::Sink> {
HashWriter {
writer: io::sink(),
hasher: self.hasher.clone(),
}
}
}
impl<W: Write> HashWriter<W> {
/// Construct a new `HashWriter` given an existing `writer` by value.
pub fn new(writer: W) -> Self {
HashWriter {
writer,
hasher: Blake2b::new(64),
}
}
/// Destroy this writer and return the hash of what was written.
pub fn into_hash(self) -> [u8; 64] {
let mut tmp = [0u8; 64];
tmp.copy_from_slice(self.hasher.finalize().as_ref());
tmp
}
}
impl<W: Write> Write for HashWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let bytes = self.writer.write(buf)?;
if bytes > 0 {
self.hasher.update(&buf[0..bytes]);
}
Ok(bytes)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}

View file

@ -1,2 +0,0 @@
pub mod groth16;
mod hash_writer;