Split phase2 into modules

This commit is contained in:
poma 2020-01-09 19:42:16 +07:00
parent b6945b6029
commit 9bdbe85480
No known key found for this signature in database
GPG Key ID: BA20CB01FE165657
14 changed files with 1405 additions and 1952 deletions

@ -19,7 +19,6 @@ blake2-rfc = "0.2"
blake2 = "0.6.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
memmap = "0.7"
num-bigint = "0.2.3"
num-traits = "0.2.8"
itertools = "0.8.1"

@ -1,6 +1,5 @@
extern crate rand;
extern crate phase2;
extern crate memmap;
extern crate num_bigint;
extern crate num_traits;
extern crate blake2;
@ -15,6 +14,7 @@ use itertools::Itertools;
use std::fs::File;
use std::fs::OpenOptions;
use phase2::parameters::MPCParameters;
fn main() {
let args: Vec<String> = std::env::args().collect();
@ -82,7 +82,7 @@ fn main() {
.read(true)
.open(in_params_filename)
.expect("unable to open.");
let mut params = phase2::MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
let mut params = MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
println!("Contributing to {}...", in_params_filename);
let hash = params.contribute(&mut rng);

@ -1,6 +1,5 @@
extern crate rand;
extern crate phase2;
extern crate memmap;
extern crate num_bigint;
extern crate num_traits;
extern crate blake2;
@ -13,6 +12,8 @@ use itertools::Itertools;
use std::fs::File;
use std::fs::OpenOptions;
use phase2::parameters::MPCParameters;
fn main() {
let args: Vec<String> = std::env::args().collect();
if args.len() != 4 {
@ -62,7 +63,7 @@ fn main() {
.read(true)
.open(in_params_filename)
.expect("unable to open.");
let mut params = phase2::MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
let mut params = MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
println!("Contributing to {}...", in_params_filename);
let hash = params.contribute(&mut rng);

@ -1,7 +1,6 @@
extern crate bellman_ce;
extern crate rand;
extern crate phase2;
extern crate memmap;
extern crate num_bigint;
extern crate num_traits;
extern crate exitcode;
@ -13,9 +12,10 @@ use serde::{Deserialize, Serialize};
use num_bigint::BigUint;
use num_traits::Num;
use std::fs;
use std::fs::OpenOptions;
use std::io::Write;
use std::ops::DerefMut;
use phase2::parameters::MPCParameters;
#[derive(Serialize, Deserialize)]
struct ProvingKeyJson {
@ -89,7 +89,7 @@ fn main() {
.read(true)
.open(params_filename)
.expect("unable to open.");
let params = phase2::MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
let params = MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
let params = params.get_params();
let mut proving_key = ProvingKeyJson {
@ -203,17 +203,11 @@ fn main() {
verification_key.vk_gamma_2 = p2_to_vec(&vk_gamma_2);
verification_key.vk_delta_2 = p2_to_vec(&vk_delta_2);
let pk_file = OpenOptions::new().read(true).write(true).create_new(true).open(pk_filename).unwrap();
let pk_json = serde_json::to_string(&proving_key).unwrap();
pk_file.set_len(pk_json.len() as u64).expect("unable to write pk file");
let mut mmap = unsafe { memmap::Mmap::map(&pk_file) }.unwrap().make_mut().unwrap();
mmap.deref_mut().write_all(pk_json.as_bytes()).unwrap();
fs::write(pk_filename, pk_json.as_bytes()).unwrap();
let vk_file = OpenOptions::new().read(true).write(true).create_new(true).open(vk_filename).unwrap();
let vk_json = serde_json::to_string(&verification_key).unwrap();
vk_file.set_len(vk_json.len() as u64).expect("unable to write vk file");
let mut mmap = unsafe { memmap::Mmap::map(&vk_file) }.unwrap().make_mut().unwrap();
mmap.deref_mut().write_all(vk_json.as_bytes()).unwrap();
fs::write(vk_filename, vk_json.as_bytes()).unwrap();
println!("Created {} and {}.", pk_filename, vk_filename);
}

@ -1,445 +0,0 @@
extern crate bellman_ce;
extern crate rand;
extern crate phase2;
extern crate num_bigint;
extern crate num_traits;
#[macro_use]
extern crate serde;
extern crate serde_json;
use num_bigint::BigUint;
use num_traits::Num;
use std::ops::DerefMut;
use std::io::Write;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
// For randomness (during paramgen and proof generation)
use rand::{thread_rng, Rng};
// For benchmarking
use std::time::{Duration, Instant};
// Bring in some tools for using pairing-friendly curves
use bellman_ce::pairing::{
Engine,
CurveAffine,
ff::{Field, PrimeField},
};
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
use bellman_ce::pairing::bn256::{
Bn256
};
// We'll use these interfaces to construct our circuit.
use bellman_ce::{
Circuit,
ConstraintSystem,
SynthesisError
};
// We're going to use the Groth16 proving system.
use bellman_ce::groth16::{
Proof,
prepare_verifying_key,
create_random_proof,
verify_proof,
};
use std::fs::File;
use std::fs::{OpenOptions, remove_file};
#[derive(Serialize, Deserialize)]
struct ProvingKeyJson {
#[serde(rename = "A")]
pub a: Vec<Vec<String>>,
#[serde(rename = "B1")]
pub b1: Vec<Vec<String>>,
#[serde(rename = "B2")]
pub b2: Vec<Vec<Vec<String>>>,
#[serde(rename = "C")]
pub c: Vec<Option<Vec<String>>>,
pub vk_alfa_1: Vec<String>,
pub vk_beta_1: Vec<String>,
pub vk_delta_1: Vec<String>,
pub vk_beta_2: Vec<Vec<String>>,
pub vk_delta_2: Vec<Vec<String>>,
#[serde(rename = "hExps")]
pub h: Vec<Vec<String>>,
}
#[derive(Serialize, Deserialize)]
struct VerifyingKeyJson {
#[serde(rename = "IC")]
pub ic: Vec<Vec<String>>,
pub vk_alfa_1: Vec<String>,
pub vk_beta_2: Vec<Vec<String>>,
pub vk_gamma_2: Vec<Vec<String>>,
pub vk_delta_2: Vec<Vec<String>>,
}
const MIMC_ROUNDS: usize = 322;
/// This is an implementation of MiMC, specifically a
/// variant named `LongsightF322p3` for BLS12-381.
/// See http://eprint.iacr.org/2016/492 for more
/// information about this construction.
///
/// ```
/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) {
/// for i from 0 up to 321 {
/// xL, xR := xR + (xL + Ci)^3, xL
/// }
/// return xL
/// }
/// ```
fn mimc<E: Engine>(
mut xl: E::Fr,
mut xr: E::Fr,
constants: &[E::Fr]
) -> E::Fr
{
assert_eq!(constants.len(), MIMC_ROUNDS);
for i in 0..MIMC_ROUNDS {
let mut tmp1 = xl;
tmp1.add_assign(&constants[i]);
let mut tmp2 = tmp1;
tmp2.square();
tmp2.mul_assign(&tmp1);
tmp2.add_assign(&xr);
xr = xl;
xl = tmp2;
}
xl
}
/// This is our demo circuit for proving knowledge of the
/// preimage of a MiMC hash invocation.
struct MiMCDemo<'a, E: Engine> {
xl: Option<E::Fr>,
xr: Option<E::Fr>,
constants: &'a [E::Fr]
}
/// Our demo circuit implements this `Circuit` trait which
/// is used during paramgen and proving in order to
/// synthesize the constraint system.
impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
assert_eq!(self.constants.len(), MIMC_ROUNDS);
// Allocate the first component of the preimage.
let mut xl_value = self.xl;
let mut xl = cs.alloc(|| "preimage xl", || {
xl_value.ok_or(SynthesisError::AssignmentMissing)
})?;
// Allocate the second component of the preimage.
let mut xr_value = self.xr;
let mut xr = cs.alloc(|| "preimage xr", || {
xr_value.ok_or(SynthesisError::AssignmentMissing)
})?;
for i in 0..MIMC_ROUNDS {
// xL, xR := xR + (xL + Ci)^3, xL
let cs = &mut cs.namespace(|| format!("round {}", i));
// tmp = (xL + Ci)^2
let mut tmp_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.square();
e
});
let mut tmp = cs.alloc(|| "tmp", || {
tmp_value.ok_or(SynthesisError::AssignmentMissing)
})?;
cs.enforce(
|| "tmp = (xL + Ci)^2",
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + tmp
);
// new_xL = xR + (xL + Ci)^3
// new_xL = xR + tmp * (xL + Ci)
// new_xL - xR = tmp * (xL + Ci)
let mut new_xl_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.mul_assign(&tmp_value.unwrap());
e.add_assign(&xr_value.unwrap());
e
});
let mut new_xl = if i == (MIMC_ROUNDS-1) {
// This is the last round, xL is our image and so
// we allocate a public input.
cs.alloc_input(|| "image", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
} else {
cs.alloc(|| "new_xl", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
};
cs.enforce(
|| "new_xL = xR + (xL + Ci)^3",
|lc| lc + tmp,
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + new_xl - xr
);
// xR = xL
xr = xl;
xr_value = xl_value;
// xL = new_xL
xl = new_xl;
xl_value = new_xl_value;
}
Ok(())
}
}
fn main() {
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
println!("Creating parameters...");
let should_filter_points_at_infinity = false;
// Create parameters for our circuit
let mut params = {
let c = MiMCDemo::<Bn256> {
xl: None,
xr: None,
constants: &constants
};
phase2::MPCParameters::new(c, should_filter_points_at_infinity).unwrap()
};
let old_params = params.clone();
params.contribute(rng);
let first_contrib = phase2::verify_contribution(&old_params, &params).expect("should verify");
let old_params = params.clone();
params.contribute(rng);
let second_contrib = phase2::verify_contribution(&old_params, &params).expect("should verify");
let verification_result = params.verify(MiMCDemo::<Bn256> {
xl: None,
xr: None,
constants: &constants
}, should_filter_points_at_infinity).unwrap();
assert!(phase2::contains_contribution(&verification_result, &first_contrib));
assert!(phase2::contains_contribution(&verification_result, &second_contrib));
let params = params.get_params();
let mut f = File::create("mimc.params").unwrap();
params.write(&mut f);
let mut proving_key = ProvingKeyJson {
a: vec![],
b1: vec![],
b2: vec![],
c: vec![],
vk_alfa_1: vec![],
vk_beta_1: vec![],
vk_delta_1: vec![],
vk_beta_2: vec![],
vk_delta_2: vec![],
h: vec![],
};
let repr_to_big = |r| {
BigUint::from_str_radix(&format!("{}", r)[2..], 16).unwrap().to_str_radix(10)
};
let p1_to_vec = |p : &<Bn256 as Engine>::G1Affine| {
let mut v = vec![];
let x = repr_to_big(p.get_x().into_repr());
v.push(x);
let y = repr_to_big(p.get_y().into_repr());
v.push(y);
if p.is_zero() {
v.push("0".to_string());
} else {
v.push("1".to_string());
}
v
};
let p2_to_vec = |p : &<Bn256 as Engine>::G2Affine| {
let mut v = vec![];
let x = p.get_x();
let mut x_v = vec![];
x_v.push(repr_to_big(x.c0.into_repr()));
x_v.push(repr_to_big(x.c1.into_repr()));
v.push(x_v);
let y = p.get_y();
let mut y_v = vec![];
y_v.push(repr_to_big(y.c0.into_repr()));
y_v.push(repr_to_big(y.c1.into_repr()));
v.push(y_v);
if p.is_zero() {
v.push(["0".to_string(), "0".to_string()].to_vec());
} else {
v.push(["1".to_string(), "0".to_string()].to_vec());
}
v
};
let a = params.a.clone();
for e in a.iter() {
proving_key.a.push(p1_to_vec(e));
}
let b1 = params.b_g1.clone();
for e in b1.iter() {
proving_key.b1.push(p1_to_vec(e));
}
let b2 = params.b_g2.clone();
for e in b2.iter() {
proving_key.b2.push(p2_to_vec(e));
}
let c = params.l.clone();
for _ in 0..params.vk.ic.len() {
proving_key.c.push(None);
}
for e in c.iter() {
proving_key.c.push(Some(p1_to_vec(e)));
}
let vk_alfa_1 = params.vk.alpha_g1.clone();
proving_key.vk_alfa_1 = p1_to_vec(&vk_alfa_1);
let vk_beta_1 = params.vk.beta_g1.clone();
proving_key.vk_beta_1 = p1_to_vec(&vk_beta_1);
let vk_delta_1 = params.vk.delta_g1.clone();
proving_key.vk_delta_1 = p1_to_vec(&vk_delta_1);
let vk_beta_2 = params.vk.beta_g2.clone();
proving_key.vk_beta_2 = p2_to_vec(&vk_beta_2);
let vk_delta_2 = params.vk.delta_g2.clone();
proving_key.vk_delta_2 = p2_to_vec(&vk_delta_2);
let h = params.h.clone();
for e in h.iter() {
proving_key.h.push(p1_to_vec(e));
}
let mut verification_key = VerifyingKeyJson {
ic: vec![],
vk_alfa_1: vec![],
vk_beta_2: vec![],
vk_gamma_2: vec![],
vk_delta_2: vec![],
};
let ic = params.vk.ic.clone();
for e in ic.iter() {
verification_key.ic.push(p1_to_vec(e));
}
verification_key.vk_alfa_1 = p1_to_vec(&vk_alfa_1);
verification_key.vk_beta_2 = p2_to_vec(&vk_beta_2);
let vk_gamma_2 = params.vk.gamma_g2.clone();
verification_key.vk_gamma_2 = p2_to_vec(&vk_gamma_2);
verification_key.vk_delta_2 = p2_to_vec(&vk_delta_2);
let mut pk_file = OpenOptions::new().read(true).write(true).create_new(true).open("pk.json").unwrap();
let pk_json = serde_json::to_string(&proving_key).unwrap();
pk_file.set_len(pk_json.len() as u64);
let mut mmap = unsafe { memmap::Mmap::map(&pk_file) }.unwrap().make_mut().unwrap();
mmap.deref_mut().write_all(pk_json.as_bytes()).unwrap();
let mut vk_file = OpenOptions::new().read(true).write(true).create_new(true).open("vk.json").unwrap();
let vk_json = serde_json::to_string(&verification_key).unwrap();
vk_file.set_len(vk_json.len() as u64);
let mut mmap = unsafe { memmap::Mmap::map(&vk_file) }.unwrap().make_mut().unwrap();
mmap.deref_mut().write_all(vk_json.as_bytes()).unwrap();
/*
// Prepare the verification key (for proof verification)
let pvk = prepare_verifying_key(&params.vk);
println!("Creating proofs...");
// Let's benchmark stuff!
const SAMPLES: u32 = 50;
let mut total_proving = Duration::new(0, 0);
let mut total_verifying = Duration::new(0, 0);
// Just a place to put the proof data, so we can
// benchmark deserialization.
let mut proof_vec = vec![];
for _ in 0..SAMPLES {
// Generate a random preimage and compute the image
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bn256>(xl, xr, &constants);
proof_vec.truncate(0);
let start = Instant::now();
{
// Create an instance of our circuit (with the
// witness)
let c = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants
};
// Create a groth16 proof with our parameters.
let proof = create_random_proof(c, params, rng).unwrap();
proof.write(&mut proof_vec).unwrap();
}
total_proving += start.elapsed();
let start = Instant::now();
let proof = Proof::read(&proof_vec[..]).unwrap();
// Check the proof
assert!(verify_proof(
&pvk,
&proof,
&[image]
).unwrap());
total_verifying += start.elapsed();
}
let proving_avg = total_proving / SAMPLES;
let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64
+ (proving_avg.as_secs() as f64);
let verifying_avg = total_verifying / SAMPLES;
let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64
+ (verifying_avg.as_secs() as f64);
println!("Average proving time: {:?} seconds", proving_avg);
println!("Average verifying time: {:?} seconds", verifying_avg);
*/
}

@ -3,6 +3,8 @@ extern crate phase2;
extern crate exitcode;
use std::fs::File;
use phase2::parameters::MPCParameters;
use phase2::circom_circuit::CircomCircuit;
fn main() {
let args: Vec<String> = std::env::args().collect();
@ -18,10 +20,10 @@ fn main() {
// Import the circuit and create the initial parameters using phase 1
println!("Creating initial parameters for {}...", circuit_filename);
let params = {
let c = phase2::CircomCircuit {
let c = CircomCircuit {
file_name: &circuit_filename,
};
phase2::MPCParameters::new(c, should_filter_points_at_infinity).unwrap()
MPCParameters::new(c, should_filter_points_at_infinity).unwrap()
};
println!("Writing initial parameters to {}.", params_filename);

@ -3,6 +3,9 @@ extern crate exitcode;
use std::fs::OpenOptions;
use phase2::parameters::*;
use phase2::circom_circuit::CircomCircuit;
fn main() {
let args: Vec<String> = std::env::args().collect();
if args.len() != 4 {
@ -19,21 +22,21 @@ fn main() {
.read(true)
.open(old_params_filename)
.expect("unable to open old params");
let old_params = phase2::MPCParameters::read(old_reader, disallow_points_at_infinity, true).expect("unable to read old params");
let old_params = MPCParameters::read(old_reader, disallow_points_at_infinity, true).expect("unable to read old params");
let new_reader = OpenOptions::new()
.read(true)
.open(new_params_filename)
.expect("unable to open new params");
let new_params = phase2::MPCParameters::read(new_reader, disallow_points_at_infinity, true).expect("unable to read new params");
let new_params = MPCParameters::read(new_reader, disallow_points_at_infinity, true).expect("unable to read new params");
println!("Checking contribution {}...", new_params_filename);
let contribution = phase2::verify_contribution(&old_params, &new_params).expect("should verify");
let contribution = verify_contribution(&old_params, &new_params).expect("should verify");
let should_filter_points_at_infinity = false;
let verification_result = new_params.verify(phase2::CircomCircuit {
let verification_result = new_params.verify(CircomCircuit {
file_name: &circuit_filename,
}, should_filter_points_at_infinity).unwrap();
assert!(phase2::contains_contribution(&verification_result, &contribution));
assert!(contains_contribution(&verification_result, &contribution));
println!("Contribution {} verified.", new_params_filename);
}

@ -0,0 +1,101 @@
#![allow(unused_imports)]
extern crate bellman_ce;
use std::str;
use std::fs;
use std::collections::BTreeMap;
use bellman_ce::pairing::{
Engine,
ff::{
PrimeField,
},
};
use bellman_ce::{
Circuit,
SynthesisError,
Variable,
Index,
ConstraintSystem,
LinearCombination,
};
#[derive(Serialize, Deserialize)]
struct CircuitJson {
pub constraints: Vec<Vec<BTreeMap<String, String>>>,
#[serde(rename = "nPubInputs")]
pub num_inputs: usize,
#[serde(rename = "nOutputs")]
pub num_outputs: usize,
#[serde(rename = "nVars")]
pub num_variables: usize,
}
pub struct CircomCircuit<'a> {
pub file_name: &'a str,
}
/// Our demo circuit implements this `Circuit` trait which
/// is used during paramgen and proving in order to
/// synthesize the constraint system.
impl<'a, E: Engine> Circuit<E> for CircomCircuit<'a> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let content = fs::read_to_string(self.file_name)?;
let circuit_json: CircuitJson = serde_json::from_str(&content).unwrap();
let num_public_inputs = circuit_json.num_inputs + circuit_json.num_outputs + 1;
for i in 1..circuit_json.num_variables {
if i < num_public_inputs {
cs.alloc_input(|| format!("variable {}", i), || {
Ok(E::Fr::from_str("1").unwrap())
})?;
} else {
cs.alloc(|| format!("variable {}", i), || {
Ok(E::Fr::from_str("1").unwrap())
})?;
}
}
let mut constrained: BTreeMap<usize, bool> = BTreeMap::new();
let mut constraint_num = 0;
for (i, constraint) in circuit_json.constraints.iter().enumerate() {
let mut lcs = vec![];
for lc_description in constraint {
let mut lc = LinearCombination::<E>::zero();
for (var_index_str, coefficient_str) in lc_description {
let var_index_num: usize = var_index_str.parse().unwrap();
let var_index = if var_index_num < num_public_inputs {
Index::Input(var_index_num)
} else {
Index::Aux(var_index_num - num_public_inputs)
};
constrained.insert(var_index_num, true);
if i == 2 {
lc = lc + (E::Fr::from_str(coefficient_str).unwrap(), Variable::new_unchecked(var_index));
} else {
lc = lc + (E::Fr::from_str(coefficient_str).unwrap(), Variable::new_unchecked(var_index));
}
}
lcs.push(lc);
}
cs.enforce(|| format!("constraint {}", constraint_num), |_| lcs[0].clone(), |_| lcs[1].clone(), |_| lcs[2].clone());
constraint_num += 1;
}
println!("constraints: {}", circuit_json.constraints.len());
let mut unconstrained: BTreeMap<usize, bool> = BTreeMap::new();
for i in 0..circuit_json.num_variables {
if !constrained.contains_key(&i) {
unconstrained.insert(i, true);
}
}
for (i, _) in unconstrained {
println!("variable {} is unconstrained", i);
}
Ok(())
}
}

53
phase2/src/hash_writer.rs Normal file

@ -0,0 +1,53 @@
extern crate blake2_rfc;
use std::io;
use std::io::Write;
use blake2_rfc::blake2b::Blake2b;
/// Abstraction over a writer which hashes the data being written.
pub struct HashWriter<W: Write> {
writer: W,
hasher: Blake2b
}
impl Clone for HashWriter<io::Sink> {
fn clone(&self) -> HashWriter<io::Sink> {
HashWriter {
writer: io::sink(),
hasher: self.hasher.clone()
}
}
}
impl<W: Write> HashWriter<W> {
/// Construct a new `HashWriter` given an existing `writer` by value.
pub fn new(writer: W) -> Self {
HashWriter {
writer: writer,
hasher: Blake2b::new(64)
}
}
/// Destroy this writer and return the hash of what was written.
pub fn into_hash(self) -> [u8; 64] {
let mut tmp = [0u8; 64];
tmp.copy_from_slice(self.hasher.finalize().as_ref());
tmp
}
}
impl<W: Write> Write for HashWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let bytes = self.writer.write(buf)?;
if bytes > 0 {
self.hasher.update(&buf[0..bytes]);
}
Ok(bytes)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}

116
phase2/src/keypair.rs Normal file

@ -0,0 +1,116 @@
extern crate bellman_ce;
use std::io::{
self,
Read,
Write,
};
use bellman_ce::pairing::{
EncodedPoint,
CurveAffine,
bn256::{
Fr,
G1Affine,
G1Uncompressed,
G2Affine,
G2Uncompressed
}
};
/// This needs to be destroyed by at least one participant
/// for the final parameters to be secure.
pub struct PrivateKey {
pub delta: Fr
}
/// This allows others to verify that you contributed. The hash produced
/// by `MPCParameters::contribute` is just a BLAKE2b hash of this object.
#[derive(Clone)]
pub struct PublicKey {
/// This is the delta (in G1) after the transformation, kept so that we
/// can check correctness of the public keys without having the entire
/// interstitial parameters for each contribution.
pub delta_after: G1Affine,
/// Random element chosen by the contributor.
pub s: G1Affine,
/// That element, taken to the contributor's secret delta.
pub s_delta: G1Affine,
/// r is H(last_pubkey | s | s_delta), r_delta proves knowledge of delta
pub r_delta: G2Affine,
/// Hash of the transcript (used for mapping to r)
pub transcript: [u8; 64],
}
impl PublicKey {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.delta_after.into_uncompressed().as_ref())?;
writer.write_all(self.s.into_uncompressed().as_ref())?;
writer.write_all(self.s_delta.into_uncompressed().as_ref())?;
writer.write_all(self.r_delta.into_uncompressed().as_ref())?;
writer.write_all(&self.transcript)?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<PublicKey>
{
let mut g1_repr = G1Uncompressed::empty();
let mut g2_repr = G2Uncompressed::empty();
reader.read_exact(g1_repr.as_mut())?;
let delta_after = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if delta_after.is_zero() {
return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"));
}
reader.read_exact(g1_repr.as_mut())?;
let s = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if s.is_zero() {
return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"));
}
reader.read_exact(g1_repr.as_mut())?;
let s_delta = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if s_delta.is_zero() {
return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"));
}
reader.read_exact(g2_repr.as_mut())?;
let r_delta = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if r_delta.is_zero() {
return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"));
}
let mut transcript = [0u8; 64];
reader.read_exact(&mut transcript)?;
Ok(PublicKey {
delta_after, s, s_delta, r_delta, transcript
})
}
}
impl PartialEq for PublicKey {
fn eq(&self, other: &PublicKey) -> bool {
self.delta_after == other.delta_after &&
self.s == other.s &&
self.s_delta == other.s_delta &&
self.r_delta == other.r_delta &&
&self.transcript[..] == &other.transcript[..]
}
}

@ -0,0 +1,118 @@
extern crate bellman_ce;
use bellman_ce::pairing::Engine;
use bellman_ce::{
SynthesisError,
Variable,
Index,
ConstraintSystem,
LinearCombination,
};
/// This is our assembly structure that we'll use to synthesize the
/// circuit into a QAP.
pub struct KeypairAssembly<E: Engine> {
pub num_inputs: usize,
pub num_aux: usize,
pub num_constraints: usize,
pub at_inputs: Vec<Vec<(E::Fr, usize)>>,
pub bt_inputs: Vec<Vec<(E::Fr, usize)>>,
pub ct_inputs: Vec<Vec<(E::Fr, usize)>>,
pub at_aux: Vec<Vec<(E::Fr, usize)>>,
pub bt_aux: Vec<Vec<(E::Fr, usize)>>,
pub ct_aux: Vec<Vec<(E::Fr, usize)>>
}
impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_aux;
self.num_aux += 1;
self.at_aux.push(vec![]);
self.bt_aux.push(vec![]);
self.ct_aux.push(vec![]);
Ok(Variable::new_unchecked(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_inputs;
self.num_inputs += 1;
self.at_inputs.push(vec![]);
self.bt_inputs.push(vec![]);
self.ct_inputs.push(vec![]);
Ok(Variable::new_unchecked(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
fn eval<E: Engine>(
l: LinearCombination<E>,
inputs: &mut [Vec<(E::Fr, usize)>],
aux: &mut [Vec<(E::Fr, usize)>],
this_constraint: usize
)
{
for &(var, coeff) in l.as_ref() {
match var.get_unchecked() {
Index::Input(id) => inputs[id].push((coeff, this_constraint)),
Index::Aux(id) => aux[id].push((coeff, this_constraint))
}
}
}
eval(a(LinearCombination::zero()), &mut self.at_inputs, &mut self.at_aux, self.num_constraints);
eval(b(LinearCombination::zero()), &mut self.bt_inputs, &mut self.bt_aux, self.num_constraints);
eval(c(LinearCombination::zero()), &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints);
self.num_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}

File diff suppressed because it is too large Load Diff

870
phase2/src/parameters.rs Normal file

@ -0,0 +1,870 @@
extern crate bellman_ce;
extern crate rand;
extern crate byteorder;
extern crate num_cpus;
extern crate crossbeam;
use byteorder::{
BigEndian,
ReadBytesExt,
WriteBytesExt
};
use std::{
io::{
self,
Read,
Write,
BufReader
},
fs::{
File
},
sync::{
Arc
}
};
use bellman_ce::pairing::{
ff::{
PrimeField,
Field,
},
EncodedPoint,
CurveAffine,
CurveProjective,
Wnaf,
bn256::{
Bn256,
Fr,
G1,
G2,
G1Affine,
G1Uncompressed,
G2Affine,
G2Uncompressed
}
};
pub use bellman_ce::multicore::*;
use bellman_ce::{
Circuit,
SynthesisError,
Variable,
Index,
ConstraintSystem,
groth16::{
Parameters,
VerifyingKey
},
};
use rand::{
Rng,
Rand,
ChaChaRng,
SeedableRng
};
use super::hash_writer::*;
use super::keypair_assembly::*;
use super::keypair::*;
use super::utils::*;
/// MPC parameters are just like bellman `Parameters` except, when serialized,
/// they contain a transcript of contributions at the end, which can be verified.
#[derive(Clone)]
pub struct MPCParameters {
params: Parameters<Bn256>,
cs_hash: [u8; 64],
contributions: Vec<PublicKey>
}
impl PartialEq for MPCParameters {
fn eq(&self, other: &MPCParameters) -> bool {
self.params == other.params &&
&self.cs_hash[..] == &other.cs_hash[..] &&
self.contributions == other.contributions
}
}
impl MPCParameters {
/// Create new Groth16 parameters (compatible with bellman) for a
/// given circuit. The resulting parameters are unsafe to use
/// until there are contributions (see `contribute()`).
pub fn new<C>(
circuit: C,
should_filter_points_at_infinity: bool,
) -> Result<MPCParameters, SynthesisError>
where C: Circuit<Bn256>
{
let mut assembly = KeypairAssembly {
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
at_inputs: vec![],
bt_inputs: vec![],
ct_inputs: vec![],
at_aux: vec![],
bt_aux: vec![],
ct_aux: vec![]
};
// Allocate the "one" input variable
assembly.alloc_input(|| "", || Ok(Fr::one()))?;
// Synthesize the circuit.
circuit.synthesize(&mut assembly)?;
// Input constraints to ensure full density of IC query
// x * 0 = 0
for i in 0..assembly.num_inputs {
assembly.enforce(|| "",
|lc| lc + Variable::new_unchecked(Index::Input(i)),
|lc| lc,
|lc| lc,
);
}
// Compute the size of our evaluation domain
let mut m = 1;
let mut exp = 0;
while m < assembly.num_constraints {
m *= 2;
exp += 1;
// Powers of Tau ceremony can't support more than 2^28
if exp > 28 {
return Err(SynthesisError::PolynomialDegreeTooLarge)
}
}
// Try to load "phase1radix2m{}"
let f = match File::open(format!("phase1radix2m{}", exp)) {
Ok(f) => f,
Err(e) => {
panic!("Couldn't load phase1radix2m{}: {:?}", exp, e);
}
};
let f = &mut BufReader::with_capacity(1024 * 1024, f);
let read_g1 = |reader: &mut BufReader<File>| -> io::Result<G1Affine> {
let mut repr = G1Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
repr.into_affine_unchecked()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})
};
let read_g2 = |reader: &mut BufReader<File>| -> io::Result<G2Affine> {
let mut repr = G2Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
repr.into_affine_unchecked()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})
};
let alpha = read_g1(f)?;
let beta_g1 = read_g1(f)?;
let beta_g2 = read_g2(f)?;
let mut coeffs_g1 = Vec::with_capacity(m);
for _ in 0..m {
coeffs_g1.push(read_g1(f)?);
}
let mut coeffs_g2 = Vec::with_capacity(m);
for _ in 0..m {
coeffs_g2.push(read_g2(f)?);
}
let mut alpha_coeffs_g1 = Vec::with_capacity(m);
for _ in 0..m {
alpha_coeffs_g1.push(read_g1(f)?);
}
let mut beta_coeffs_g1 = Vec::with_capacity(m);
for _ in 0..m {
beta_coeffs_g1.push(read_g1(f)?);
}
// These are `Arc` so that later it'll be easier
// to use multiexp during QAP evaluation (which
// requires a futures-based API)
let coeffs_g1 = Arc::new(coeffs_g1);
let coeffs_g2 = Arc::new(coeffs_g2);
let alpha_coeffs_g1 = Arc::new(alpha_coeffs_g1);
let beta_coeffs_g1 = Arc::new(beta_coeffs_g1);
let mut h = Vec::with_capacity(m-1);
for _ in 0..m-1 {
h.push(read_g1(f)?);
}
let mut ic = vec![G1::zero(); assembly.num_inputs];
let mut l = vec![G1::zero(); assembly.num_aux];
let mut a_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux];
let mut b_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux];
let mut b_g2 = vec![G2::zero(); assembly.num_inputs + assembly.num_aux];
fn eval(
// Lagrange coefficients for tau
coeffs_g1: Arc<Vec<G1Affine>>,
coeffs_g2: Arc<Vec<G2Affine>>,
alpha_coeffs_g1: Arc<Vec<G1Affine>>,
beta_coeffs_g1: Arc<Vec<G1Affine>>,
// QAP polynomials
at: &[Vec<(Fr, usize)>],
bt: &[Vec<(Fr, usize)>],
ct: &[Vec<(Fr, usize)>],
// Resulting evaluated QAP polynomials
a_g1: &mut [G1],
b_g1: &mut [G1],
b_g2: &mut [G2],
ext: &mut [G1],
// Worker
worker: &Worker
)
{
// Sanity check
assert_eq!(a_g1.len(), at.len());
assert_eq!(a_g1.len(), bt.len());
assert_eq!(a_g1.len(), ct.len());
assert_eq!(a_g1.len(), b_g1.len());
assert_eq!(a_g1.len(), b_g2.len());
assert_eq!(a_g1.len(), ext.len());
// Evaluate polynomials in multiple threads
worker.scope(a_g1.len(), |scope, chunk| {
for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in
a_g1.chunks_mut(chunk)
.zip(b_g1.chunks_mut(chunk))
.zip(b_g2.chunks_mut(chunk))
.zip(ext.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(bt.chunks(chunk))
.zip(ct.chunks(chunk))
{
let coeffs_g1 = coeffs_g1.clone();
let coeffs_g2 = coeffs_g2.clone();
let alpha_coeffs_g1 = alpha_coeffs_g1.clone();
let beta_coeffs_g1 = beta_coeffs_g1.clone();
scope.spawn(move |_| {
for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in
a_g1.iter_mut()
.zip(b_g1.iter_mut())
.zip(b_g2.iter_mut())
.zip(ext.iter_mut())
.zip(at.iter())
.zip(bt.iter())
.zip(ct.iter())
{
for &(coeff, lag) in at {
a_g1.add_assign(&coeffs_g1[lag].mul(coeff));
ext.add_assign(&beta_coeffs_g1[lag].mul(coeff));
}
for &(coeff, lag) in bt {
b_g1.add_assign(&coeffs_g1[lag].mul(coeff));
b_g2.add_assign(&coeffs_g2[lag].mul(coeff));
ext.add_assign(&alpha_coeffs_g1[lag].mul(coeff));
}
for &(coeff, lag) in ct {
ext.add_assign(&coeffs_g1[lag].mul(coeff));
}
}
// Batch normalize
G1::batch_normalization(a_g1);
G1::batch_normalization(b_g1);
G2::batch_normalization(b_g2);
G1::batch_normalization(ext);
});
}
});
}
let worker = Worker::new();
// Evaluate for inputs.
eval(
coeffs_g1.clone(),
coeffs_g2.clone(),
alpha_coeffs_g1.clone(),
beta_coeffs_g1.clone(),
&assembly.at_inputs,
&assembly.bt_inputs,
&assembly.ct_inputs,
&mut a_g1[0..assembly.num_inputs],
&mut b_g1[0..assembly.num_inputs],
&mut b_g2[0..assembly.num_inputs],
&mut ic,
&worker
);
// Evaluate for auxillary variables.
eval(
coeffs_g1.clone(),
coeffs_g2.clone(),
alpha_coeffs_g1.clone(),
beta_coeffs_g1.clone(),
&assembly.at_aux,
&assembly.bt_aux,
&assembly.ct_aux,
&mut a_g1[assembly.num_inputs..],
&mut b_g1[assembly.num_inputs..],
&mut b_g2[assembly.num_inputs..],
&mut l,
&worker
);
// Don't allow any elements be unconstrained, so that
// the L query is always fully dense.
for e in l.iter() {
if e.is_zero() {
return Err(SynthesisError::UnconstrainedVariable);
}
}
let vk = VerifyingKey {
alpha_g1: alpha,
beta_g1: beta_g1,
beta_g2: beta_g2,
gamma_g2: G2Affine::one(),
delta_g1: G1Affine::one(),
delta_g2: G2Affine::one(),
ic: ic.into_iter().map(|e| e.into_affine()).collect()
};
let params = if should_filter_points_at_infinity {
Parameters {
vk: vk,
h: Arc::new(h),
l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
// Filter points at infinity away from A/B queries
a: Arc::new(a_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect())
}
} else {
Parameters {
vk: vk,
h: Arc::new(h),
l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
a: Arc::new(a_g1.into_iter().map(|e| e.into_affine()).collect()),
b_g1: Arc::new(b_g1.into_iter().map(|e| e.into_affine()).collect()),
b_g2: Arc::new(b_g2.into_iter().map(|e| e.into_affine()).collect())
}
};
let h = {
let sink = io::sink();
let mut sink = HashWriter::new(sink);
params.write(&mut sink).unwrap();
sink.into_hash()
};
let mut cs_hash = [0; 64];
cs_hash.copy_from_slice(h.as_ref());
Ok(MPCParameters {
params: params,
cs_hash: cs_hash,
contributions: vec![]
})
}
/// Get the underlying Groth16 `Parameters`
pub fn get_params(&self) -> &Parameters<Bn256> {
&self.params
}
/// Contributes some randomness to the parameters. Only one
/// contributor needs to be honest for the parameters to be
/// secure.
///
/// This function returns a "hash" that is bound to the
/// contribution. Contributors can use this hash to make
/// sure their contribution is in the final parameters, by
/// checking to see if it appears in the output of
/// `MPCParameters::verify`.
pub fn contribute<R: Rng>(
&mut self,
rng: &mut R
) -> [u8; 64]
{
// Generate a keypair
let (pubkey, privkey) = keypair(rng, self);
fn batch_exp<C: CurveAffine>(bases: &mut [C], coeff: C::Scalar) {
let coeff = coeff.into_repr();
let mut projective = vec![C::Projective::zero(); bases.len()];
let cpus = num_cpus::get();
let chunk_size = if bases.len() < cpus {
1
} else {
bases.len() / cpus
};
// Perform wNAF over multiple cores, placing results into `projective`.
crossbeam::scope(|scope| {
for (bases, projective) in bases.chunks_mut(chunk_size)
.zip(projective.chunks_mut(chunk_size))
{
scope.spawn(move || {
let mut wnaf = Wnaf::new();
for (base, projective) in bases.iter_mut()
.zip(projective.iter_mut())
{
*projective = wnaf.base(base.into_projective(), 1).scalar(coeff);
}
});
}
});
// Perform batch normalization
crossbeam::scope(|scope| {
for projective in projective.chunks_mut(chunk_size)
{
scope.spawn(move || {
C::Projective::batch_normalization(projective);
});
}
});
// Turn it all back into affine points
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
*affine = projective.into_affine();
}
}
let delta_inv = privkey.delta.inverse().expect("nonzero");
let mut l = (&self.params.l[..]).to_vec();
let mut h = (&self.params.h[..]).to_vec();
batch_exp(&mut l, delta_inv);
batch_exp(&mut h, delta_inv);
self.params.l = Arc::new(l);
self.params.h = Arc::new(h);
self.params.vk.delta_g1 = self.params.vk.delta_g1.mul(privkey.delta).into_affine();
self.params.vk.delta_g2 = self.params.vk.delta_g2.mul(privkey.delta).into_affine();
self.contributions.push(pubkey.clone());
// Calculate the hash of the public key and return it
{
let sink = io::sink();
let mut sink = HashWriter::new(sink);
pubkey.write(&mut sink).unwrap();
let h = sink.into_hash();
let mut response = [0u8; 64];
response.copy_from_slice(h.as_ref());
response
}
}
/// Verify the correctness of the parameters, given a circuit
/// instance. This will return all of the hashes that
/// contributors obtained when they ran
/// `MPCParameters::contribute`, for ensuring that contributions
/// exist in the final parameters.
pub fn verify<C: Circuit<Bn256>>(
&self,
circuit: C,
should_filter_points_at_infinity: bool,
) -> Result<Vec<[u8; 64]>, ()>
{
let initial_params = MPCParameters::new(circuit, should_filter_points_at_infinity).map_err(|_| ())?;
// H/L will change, but should have same length
if initial_params.params.h.len() != self.params.h.len() {
return Err(());
}
if initial_params.params.l.len() != self.params.l.len() {
return Err(());
}
// A/B_G1/B_G2 doesn't change at all
if initial_params.params.a != self.params.a {
return Err(());
}
if initial_params.params.b_g1 != self.params.b_g1 {
return Err(());
}
if initial_params.params.b_g2 != self.params.b_g2 {
return Err(());
}
// alpha/beta/gamma don't change
if initial_params.params.vk.alpha_g1 != self.params.vk.alpha_g1 {
return Err(());
}
if initial_params.params.vk.beta_g1 != self.params.vk.beta_g1 {
return Err(());
}
if initial_params.params.vk.beta_g2 != self.params.vk.beta_g2 {
return Err(());
}
if initial_params.params.vk.gamma_g2 != self.params.vk.gamma_g2 {
return Err(());
}
// IC shouldn't change, as gamma doesn't change
if initial_params.params.vk.ic != self.params.vk.ic {
return Err(());
}
// cs_hash should be the same
if &initial_params.cs_hash[..] != &self.cs_hash[..] {
return Err(());
}
let sink = io::sink();
let mut sink = HashWriter::new(sink);
sink.write_all(&initial_params.cs_hash[..]).unwrap();
let mut current_delta = G1Affine::one();
let mut result = vec![];
for pubkey in &self.contributions {
let mut our_sink = sink.clone();
our_sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap();
our_sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap();
pubkey.write(&mut sink).unwrap();
let h = our_sink.into_hash();
// The transcript must be consistent
if &pubkey.transcript[..] != h.as_ref() {
return Err(());
}
let r = hash_to_g2(h.as_ref()).into_affine();
// Check the signature of knowledge
if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) {
return Err(());
}
// Check the change from the old delta is consistent
if !same_ratio(
(current_delta, pubkey.delta_after),
(r, pubkey.r_delta)
) {
return Err(());
}
current_delta = pubkey.delta_after;
{
let sink = io::sink();
let mut sink = HashWriter::new(sink);
pubkey.write(&mut sink).unwrap();
let h = sink.into_hash();
let mut response = [0u8; 64];
response.copy_from_slice(h.as_ref());
result.push(response);
}
}
// Current parameters should have consistent delta in G1
if current_delta != self.params.vk.delta_g1 {
return Err(());
}
// Current parameters should have consistent delta in G2
if !same_ratio(
(G1Affine::one(), current_delta),
(G2Affine::one(), self.params.vk.delta_g2)
) {
return Err(());
}
// H and L queries should be updated with delta^-1
if !same_ratio(
merge_pairs(&initial_params.params.h, &self.params.h),
(self.params.vk.delta_g2, G2Affine::one()) // reversed for inverse
) {
return Err(());
}
if !same_ratio(
merge_pairs(&initial_params.params.l, &self.params.l),
(self.params.vk.delta_g2, G2Affine::one()) // reversed for inverse
) {
return Err(());
}
Ok(result)
}
/// Serialize these parameters. The serialized parameters
/// can be read by bellman as Groth16 `Parameters`.
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
self.params.write(&mut writer)?;
writer.write_all(&self.cs_hash)?;
writer.write_u32::<BigEndian>(self.contributions.len() as u32)?;
for pubkey in &self.contributions {
pubkey.write(&mut writer)?;
}
Ok(())
}
/// Deserialize these parameters. If `checked` is false,
/// we won't perform curve validity and group order
/// checks.
pub fn read<R: Read>(
mut reader: R,
disallow_points_at_infinity: bool,
checked: bool
) -> io::Result<MPCParameters>
{
let params = Parameters::read(&mut reader, disallow_points_at_infinity, checked)?;
let mut cs_hash = [0u8; 64];
reader.read_exact(&mut cs_hash)?;
let contributions_len = reader.read_u32::<BigEndian>()? as usize;
let mut contributions = vec![];
for _ in 0..contributions_len {
contributions.push(PublicKey::read(&mut reader)?);
}
Ok(MPCParameters {
params, cs_hash, contributions
})
}
}
/// This is a cheap helper utility that exists purely
/// because Rust still doesn't have type-level integers
/// and so doesn't implement `PartialEq` for `[T; 64]`
pub fn contains_contribution(
contributions: &[[u8; 64]],
my_contribution: &[u8; 64]
) -> bool
{
for contrib in contributions {
if &contrib[..] == &my_contribution[..] {
return true
}
}
return false
}
/// Verify a contribution, given the old parameters and
/// the new parameters. Returns the hash of the contribution.
pub fn verify_contribution(
before: &MPCParameters,
after: &MPCParameters
) -> Result<[u8; 64], ()>
{
// Transformation involves a single new object
if after.contributions.len() != (before.contributions.len() + 1) {
return Err(());
}
// None of the previous transformations should change
if &before.contributions[..] != &after.contributions[0..before.contributions.len()] {
return Err(());
}
// H/L will change, but should have same length
if before.params.h.len() != after.params.h.len() {
return Err(());
}
if before.params.l.len() != after.params.l.len() {
return Err(());
}
// A/B_G1/B_G2 doesn't change at all
if before.params.a != after.params.a {
return Err(());
}
if before.params.b_g1 != after.params.b_g1 {
return Err(());
}
if before.params.b_g2 != after.params.b_g2 {
return Err(());
}
// alpha/beta/gamma don't change
if before.params.vk.alpha_g1 != after.params.vk.alpha_g1 {
return Err(());
}
if before.params.vk.beta_g1 != after.params.vk.beta_g1 {
return Err(());
}
if before.params.vk.beta_g2 != after.params.vk.beta_g2 {
return Err(());
}
if before.params.vk.gamma_g2 != after.params.vk.gamma_g2 {
return Err(());
}
// IC shouldn't change, as gamma doesn't change
if before.params.vk.ic != after.params.vk.ic {
return Err(());
}
// cs_hash should be the same
if &before.cs_hash[..] != &after.cs_hash[..] {
return Err(());
}
let sink = io::sink();
let mut sink = HashWriter::new(sink);
sink.write_all(&before.cs_hash[..]).unwrap();
for pubkey in &before.contributions {
pubkey.write(&mut sink).unwrap();
}
let pubkey = after.contributions.last().unwrap();
sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap();
sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap();
let h = sink.into_hash();
// The transcript must be consistent
if &pubkey.transcript[..] != h.as_ref() {
return Err(());
}
let r = hash_to_g2(h.as_ref()).into_affine();
// Check the signature of knowledge
if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) {
return Err(());
}
// Check the change from the old delta is consistent
if !same_ratio(
(before.params.vk.delta_g1, pubkey.delta_after),
(r, pubkey.r_delta)
) {
return Err(());
}
// Current parameters should have consistent delta in G1
if pubkey.delta_after != after.params.vk.delta_g1 {
return Err(());
}
// Current parameters should have consistent delta in G2
if !same_ratio(
(G1Affine::one(), pubkey.delta_after),
(G2Affine::one(), after.params.vk.delta_g2)
) {
return Err(());
}
// H and L queries should be updated with delta^-1
if !same_ratio(
merge_pairs(&before.params.h, &after.params.h),
(after.params.vk.delta_g2, before.params.vk.delta_g2) // reversed for inverse
) {
return Err(());
}
if !same_ratio(
merge_pairs(&before.params.l, &after.params.l),
(after.params.vk.delta_g2, before.params.vk.delta_g2) // reversed for inverse
) {
return Err(());
}
let sink = io::sink();
let mut sink = HashWriter::new(sink);
pubkey.write(&mut sink).unwrap();
let h = sink.into_hash();
let mut response = [0u8; 64];
response.copy_from_slice(h.as_ref());
Ok(response)
}
/// Compute a keypair, given the current parameters. Keypairs
/// cannot be reused for multiple contributions or contributions
/// in different parameters.
pub fn keypair<R: Rng>(
rng: &mut R,
current: &MPCParameters,
) -> (PublicKey, PrivateKey)
{
// Sample random delta
let delta: Fr = rng.gen();
// Compute delta s-pair in G1
let s = G1::rand(rng).into_affine();
let s_delta = s.mul(delta).into_affine();
// H(cs_hash | <previous pubkeys> | s | s_delta)
let h = {
let sink = io::sink();
let mut sink = HashWriter::new(sink);
sink.write_all(&current.cs_hash[..]).unwrap();
for pubkey in &current.contributions {
pubkey.write(&mut sink).unwrap();
}
sink.write_all(s.into_uncompressed().as_ref()).unwrap();
sink.write_all(s_delta.into_uncompressed().as_ref()).unwrap();
sink.into_hash()
};
// This avoids making a weird assumption about the hash into the
// group.
let mut transcript = [0; 64];
transcript.copy_from_slice(h.as_ref());
// Compute delta s-pair in G2
let r = hash_to_g2(h.as_ref()).into_affine();
let r_delta = r.mul(delta).into_affine();
(
PublicKey {
delta_after: current.params.vk.delta_g1.mul(delta).into_affine(),
s: s,
s_delta: s_delta,
r_delta: r_delta,
transcript: transcript
},
PrivateKey {
delta: delta
}
)
}

117
phase2/src/utils.rs Normal file

@ -0,0 +1,117 @@
extern crate bellman_ce;
extern crate rand;
extern crate byteorder;
use byteorder::{
BigEndian,
ReadBytesExt,
};
use std::sync::Arc;
use bellman_ce::pairing::{
ff::{
PrimeField,
},
CurveAffine,
CurveProjective,
Wnaf,
bn256::{
G2,
}
};
use rand::{
Rng,
Rand,
ChaChaRng,
SeedableRng
};
/// Checks if pairs have the same ratio.
pub fn same_ratio<G1: CurveAffine>(
g1: (G1, G1),
g2: (G1::Pair, G1::Pair)
) -> bool
{
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
}
/// Computes a random linear combination over v1/v2.
///
/// Checking that many pairs of elements are exponentiated by
/// the same `x` can be achieved (with high probability) with
/// the following technique:
///
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
/// random r1, r2, r3. Given (g, g^s)...
///
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
///
/// ... with high probability.
pub fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G)
{
use std::sync::Mutex;
use rand::{thread_rng};
assert_eq!(v1.len(), v2.len());
let chunk = (v1.len() / num_cpus::get()) + 1;
let s = Arc::new(Mutex::new(G::Projective::zero()));
let sx = Arc::new(Mutex::new(G::Projective::zero()));
crossbeam::scope(|scope| {
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
let s = s.clone();
let sx = sx.clone();
scope.spawn(move || {
// We do not need to be overly cautious of the RNG
// used for this check.
let rng = &mut thread_rng();
let mut wnaf = Wnaf::new();
let mut local_s = G::Projective::zero();
let mut local_sx = G::Projective::zero();
for (v1, v2) in v1.iter().zip(v2.iter()) {
let rho = G::Scalar::rand(rng);
let mut wnaf = wnaf.scalar(rho.into_repr());
let v1 = wnaf.base(v1.into_projective());
let v2 = wnaf.base(v2.into_projective());
local_s.add_assign(&v1);
local_sx.add_assign(&v2);
}
s.lock().unwrap().add_assign(&local_s);
sx.lock().unwrap().add_assign(&local_sx);
});
}
});
let s = s.lock().unwrap().into_affine();
let sx = sx.lock().unwrap().into_affine();
(s, sx)
}
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
/// than 32 bytes.
pub fn hash_to_g2(mut digest: &[u8]) -> G2
{
assert!(digest.len() >= 32);
let mut seed = Vec::with_capacity(8);
for _ in 0..8 {
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
}
ChaChaRng::from_seed(&seed).gen()
}