From 32bbd5f35c6e25d497a616ae4da5ba56872f832d Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 12 Feb 2020 14:46:33 +0200 Subject: [PATCH] chore: cargo fmt + make clippy happy (#9) --- bellman/src/groth16/verifier.rs | 2 +- pairing/src/lib.rs | 2 +- powersoftau/Cargo.lock | 6 +- powersoftau/Cargo.toml | 2 +- powersoftau/src/accumulator.rs | 258 +++--- powersoftau/src/batched_accumulator.rs | 864 ++++++++++++------ powersoftau/src/bin/beacon_constrained.rs | 139 +-- powersoftau/src/bin/compute_constrained.rs | 159 ++-- powersoftau/src/bin/new.rs | 24 +- powersoftau/src/bin/new_constrained.rs | 69 +- powersoftau/src/bin/prepare_phase2.rs | 161 ++-- powersoftau/src/bin/reduce_powers.rs | 114 ++- powersoftau/src/bin/verify.rs | 317 ++++--- .../src/bin/verify_transform_constrained.rs | 140 ++- powersoftau/src/bn256/mod.rs | 182 ++-- powersoftau/src/keypair.rs | 147 ++- powersoftau/src/lib.rs | 6 +- powersoftau/src/parameters.rs | 41 +- powersoftau/src/utils.rs | 220 ++--- 19 files changed, 1645 insertions(+), 1208 deletions(-) diff --git a/bellman/src/groth16/verifier.rs b/bellman/src/groth16/verifier.rs index 03de2b9..eef3b88 100644 --- a/bellman/src/groth16/verifier.rs +++ b/bellman/src/groth16/verifier.rs @@ -62,6 +62,6 @@ pub fn verify_proof<'a, E: Engine>( (&proof.a.prepare(), &proof.b.prepare()), (&acc.into_affine().prepare(), &pvk.neg_gamma_g2), (&proof.c.prepare(), &pvk.neg_delta_g2) - ].into_iter()) + ].iter()) ).unwrap() == pvk.alpha_g1_beta_g2) } diff --git a/pairing/src/lib.rs b/pairing/src/lib.rs index 8c1a255..14987eb 100644 --- a/pairing/src/lib.rs +++ b/pairing/src/lib.rs @@ -104,7 +104,7 @@ pub trait Engine: ScalarEngine { G2: Into, { Self::final_exponentiation(&Self::miller_loop( - [(&(p.into().prepare()), &(q.into().prepare()))].into_iter(), + [(&(p.into().prepare()), &(q.into().prepare()))].iter(), )).unwrap() } } diff --git a/powersoftau/Cargo.lock b/powersoftau/Cargo.lock index 2e72aeb..3ce7bfb 100644 --- a/powersoftau/Cargo.lock +++ b/powersoftau/Cargo.lock @@ -216,7 +216,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "hex-literal" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -317,7 +317,7 @@ dependencies = [ "crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "exitcode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", - "hex-literal 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -505,7 +505,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" "checksum generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fceb69994e330afed50c93524be68c42fa898c2d9fd4ee8da03bd7363acd26f2" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" -"checksum hex-literal 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "27455ce8b4a6666c87220e4b59c9a83995476bdadc10197905e61dbe906e36fa" +"checksum hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ddc2928beef125e519d69ae1baa8c37ea2e0d3848545217f6db0179c5eb1d639" "checksum hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d340b6514f232f6db1bd16db65302a5278a04fef9ce867cb932e7e5fa21130a" "checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" "checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" diff --git a/powersoftau/Cargo.toml b/powersoftau/Cargo.toml index 4df30c1..b43968f 100644 --- a/powersoftau/Cargo.toml +++ b/powersoftau/Cargo.toml @@ -18,7 +18,7 @@ blake2 = "0.6.1" generic-array = "0.8.3" typenum = "1.9.0" byteorder = "1.1.0" -hex-literal = "0.1" +hex-literal = "0.1.4" rust-crypto = "0.2" exitcode = "1.1.2" diff --git a/powersoftau/src/accumulator.rs b/powersoftau/src/accumulator.rs index f0054a0..1280240 100644 --- a/powersoftau/src/accumulator.rs +++ b/powersoftau/src/accumulator.rs @@ -25,33 +25,23 @@ //! After some time has elapsed for participants to contribute to the ceremony, a participant is //! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK //! public parameters for all circuits within a bounded size. -extern crate rand; -extern crate crossbeam; -extern crate num_cpus; -extern crate blake2; -extern crate generic_array; -extern crate typenum; -extern crate byteorder; -extern crate bellman_ce; -extern crate memmap; +use bellman_ce::pairing::{ + ff::{Field, PrimeField}, + CurveAffine, CurveProjective, EncodedPoint, Engine, Wnaf, +}; +use blake2::{Blake2b, Digest}; + +use generic_array::GenericArray; -use memmap::{Mmap, MmapMut}; -use bellman_ce::pairing::ff::{Field, PrimeField}; -use byteorder::{ReadBytesExt, BigEndian}; -use rand::{SeedableRng, Rng, Rand}; -use rand::chacha::ChaChaRng; -use bellman_ce::pairing::bn256::{Bn256}; -use bellman_ce::pairing::*; use std::io::{self, Read, Write}; use std::sync::{Arc, Mutex}; -use generic_array::GenericArray; use typenum::consts::U64; -use blake2::{Blake2b, Digest}; -use std::fmt; -use super::keypair::*; -use super::utils::*; -use super::parameters::*; +use super::keypair::{PrivateKey, PublicKey}; +use super::parameters::{ + CheckForCorrectness, DeserializationError, PowersOfTauParameters, UseCompression, +}; +use super::utils::{hash_to_g2, power_pairs, same_ratio, write_point}; /// The `Accumulator` is an object that participants of the ceremony contribute /// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over @@ -73,20 +63,20 @@ pub struct Accumulator { /// beta pub beta_g2: E::G2Affine, /// Keep parameters here - pub parameters: P + pub parameters: P, } impl PartialEq for Accumulator { fn eq(&self, other: &Accumulator) -> bool { - self.tau_powers_g1.eq(&other.tau_powers_g1) && - self.tau_powers_g2.eq(&other.tau_powers_g2) && - self.alpha_tau_powers_g1.eq(&other.alpha_tau_powers_g1) && - self.beta_tau_powers_g1.eq(&other.beta_tau_powers_g1) && - self.beta_g2 == other.beta_g2 + self.tau_powers_g1.eq(&other.tau_powers_g1) + && self.tau_powers_g2.eq(&other.tau_powers_g2) + && self.alpha_tau_powers_g1.eq(&other.alpha_tau_powers_g1) + && self.beta_tau_powers_g1.eq(&other.beta_tau_powers_g1) + && self.beta_g2 == other.beta_g2 } } -impl Accumulator { +impl Accumulator { /// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1. pub fn new(parameters: P) -> Self { Accumulator { @@ -95,7 +85,7 @@ impl Accumulator { alpha_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH], beta_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH], beta_g2: E::G2Affine::one(), - parameters: parameters + parameters, } } @@ -103,15 +93,13 @@ impl Accumulator { pub fn serialize( &self, writer: &mut W, - compression: UseCompression - ) -> io::Result<()> - { + compression: UseCompression, + ) -> io::Result<()> { fn write_all( writer: &mut W, c: &[C], - compression: UseCompression - ) -> io::Result<()> - { + compression: UseCompression, + ) -> io::Result<()> { for c in c { write_point(writer, c, compression)?; } @@ -135,22 +123,19 @@ impl Accumulator { reader: &mut R, compression: UseCompression, checked: CheckForCorrectness, - parameters: P - ) -> Result - { - fn read_all > ( + parameters: P, + ) -> Result { + fn read_all>( reader: &mut R, size: usize, compression: UseCompression, - checked: CheckForCorrectness - ) -> Result, DeserializationError> - { + checked: CheckForCorrectness, + ) -> Result, DeserializationError> { fn decompress_all( reader: &mut R, size: usize, - checked: CheckForCorrectness - ) -> Result, DeserializationError> - { + checked: CheckForCorrectness, + ) -> Result, DeserializationError> { // Read the encoded elements let mut res = vec![ENC::empty(); size]; @@ -171,7 +156,10 @@ impl Accumulator { let decoding_error = Arc::new(Mutex::new(None)); crossbeam::scope(|scope| { - for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) { + for (source, target) in res + .chunks(chunk_size) + .zip(res_affine.chunks_mut(chunk_size)) + { let decoding_error = decoding_error.clone(); scope.spawn(move || { @@ -185,21 +173,24 @@ impl Accumulator { match checked { CheckForCorrectness::Yes => { // Points at infinity are never expected in the accumulator - source.into_affine().map_err(|e| e.into()).and_then(|source| { - if source.is_zero() { - Err(DeserializationError::PointAtInfinity) - } else { - Ok(source) - } - }) - }, - CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into()) + source.into_affine().map_err(|e| e.into()).and_then( + |source| { + if source.is_zero() { + Err(DeserializationError::PointAtInfinity) + } else { + Ok(source) + } + }, + ) + } + CheckForCorrectness::No => { + source.into_affine_unchecked().map_err(|e| e.into()) + } } - } - { + } { Ok(source) => { *target = source; - }, + } Err(e) => { *decoding_error.lock().unwrap() = Some(e); } @@ -209,41 +200,44 @@ impl Accumulator { } }); - match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() { - Some(e) => { - Err(e) - }, - None => { - Ok(res_affine) - } + match Arc::try_unwrap(decoding_error) + .unwrap() + .into_inner() + .unwrap() + { + Some(e) => Err(e), + None => Ok(res_affine), } } match compression { UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked), - UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked) + UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked), } } - let tau_powers_g1 = read_all::(reader, P::TAU_POWERS_G1_LENGTH, compression, checked)?; - let tau_powers_g2 = read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; - let alpha_tau_powers_g1 = read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; - let beta_tau_powers_g1 = read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; + let tau_powers_g1 = + read_all::(reader, P::TAU_POWERS_G1_LENGTH, compression, checked)?; + let tau_powers_g2 = + read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; + let alpha_tau_powers_g1 = + read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; + let beta_tau_powers_g1 = + read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; let beta_g2 = read_all::(reader, 1, compression, checked)?[0]; Ok(Accumulator { - tau_powers_g1: tau_powers_g1, - tau_powers_g2: tau_powers_g2, - alpha_tau_powers_g1: alpha_tau_powers_g1, - beta_tau_powers_g1: beta_tau_powers_g1, - beta_g2: beta_g2, - parameters: parameters + tau_powers_g1, + tau_powers_g2, + alpha_tau_powers_g1, + beta_tau_powers_g1, + beta_g2, + parameters, }) } /// Transforms the accumulator with a private key. - pub fn transform(&mut self, key: &PrivateKey) - { + pub fn transform(&mut self, key: &PrivateKey) { // Construct the powers of tau let mut taupowers = vec![E::Fr::zero(); P::TAU_POWERS_G1_LENGTH]; let chunk_size = P::TAU_POWERS_G1_LENGTH / num_cpus::get(); @@ -264,30 +258,35 @@ impl Accumulator { /// Exponentiate a large number of points, with an optional coefficient to be applied to the /// exponent. - fn batch_exp >(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) { + fn batch_exp>( + bases: &mut [C], + exp: &[C::Scalar], + coeff: Option<&C::Scalar>, + ) { assert_eq!(bases.len(), exp.len()); let mut projective = vec![C::Projective::zero(); bases.len()]; let chunk_size = bases.len() / num_cpus::get(); // Perform wNAF over multiple cores, placing results into `projective`. crossbeam::scope(|scope| { - for ((bases, exp), projective) in bases.chunks_mut(chunk_size) - .zip(exp.chunks(chunk_size)) - .zip(projective.chunks_mut(chunk_size)) + for ((bases, exp), projective) in bases + .chunks_mut(chunk_size) + .zip(exp.chunks(chunk_size)) + .zip(projective.chunks_mut(chunk_size)) { scope.spawn(move || { let mut wnaf = Wnaf::new(); - for ((base, exp), projective) in bases.iter_mut() - .zip(exp.iter()) - .zip(projective.iter_mut()) + for ((base, exp), projective) in + bases.iter_mut().zip(exp.iter()).zip(projective.iter_mut()) { let mut exp = *exp; if let Some(coeff) = coeff { exp.mul_assign(coeff); } - *projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr()); + *projective = + wnaf.base(base.into_projective(), 1).scalar(exp.into_repr()); } }); } @@ -295,8 +294,7 @@ impl Accumulator { // Perform batch normalization crossbeam::scope(|scope| { - for projective in projective.chunks_mut(chunk_size) - { + for projective in projective.chunks_mut(chunk_size) { scope.spawn(move || { C::Projective::batch_normalization(projective); }); @@ -310,16 +308,32 @@ impl Accumulator { } batch_exp::(&mut self.tau_powers_g1, &taupowers[0..], None); - batch_exp::(&mut self.tau_powers_g2, &taupowers[0..P::TAU_POWERS_LENGTH], None); - batch_exp::(&mut self.alpha_tau_powers_g1, &taupowers[0..P::TAU_POWERS_LENGTH], Some(&key.alpha)); - batch_exp::(&mut self.beta_tau_powers_g1, &taupowers[0..P::TAU_POWERS_LENGTH], Some(&key.beta)); + batch_exp::( + &mut self.tau_powers_g2, + &taupowers[0..P::TAU_POWERS_LENGTH], + None, + ); + batch_exp::( + &mut self.alpha_tau_powers_g1, + &taupowers[0..P::TAU_POWERS_LENGTH], + Some(&key.alpha), + ); + batch_exp::( + &mut self.beta_tau_powers_g1, + &taupowers[0..P::TAU_POWERS_LENGTH], + Some(&key.beta), + ); self.beta_g2 = self.beta_g2.mul(key.beta).into_affine(); } } /// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`. -pub fn verify_transform(before: &Accumulator, after: &Accumulator, key: &PublicKey, digest: &[u8]) -> bool -{ +pub fn verify_transform( + before: &Accumulator, + after: &Accumulator, + key: &PublicKey, + digest: &[u8], +) -> bool { assert_eq!(digest.len(), 64); let compute_g2_s = |g1_s: E::G1Affine, g1_s_x: E::G1Affine, personalization: u8| { @@ -336,7 +350,7 @@ pub fn verify_transform(before: &Accumulato let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2); // Check the proofs-of-knowledge for tau/alpha/beta - + // g1^s / g1^(s*x) = g2^s / g2^(s*x) if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) { return false; @@ -357,54 +371,76 @@ pub fn verify_transform(before: &Accumulato } // Did the participant multiply the previous tau by the new one? - if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) { + if !same_ratio( + (before.tau_powers_g1[1], after.tau_powers_g1[1]), + (tau_g2_s, key.tau_g2), + ) { return false; } // Did the participant multiply the previous alpha by the new one? - if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) { + if !same_ratio( + (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), + (alpha_g2_s, key.alpha_g2), + ) { return false; } // Did the participant multiply the previous beta by the new one? - if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) { + if !same_ratio( + (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), + (beta_g2_s, key.beta_g2), + ) { return false; } - if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) { + if !same_ratio( + (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), + (before.beta_g2, after.beta_g2), + ) { return false; } // Are the powers of tau correct? - if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) { + if !same_ratio( + power_pairs(&after.tau_powers_g1), + (after.tau_powers_g2[0], after.tau_powers_g2[1]), + ) { return false; } - if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) { + if !same_ratio( + power_pairs(&after.tau_powers_g2), + (after.tau_powers_g1[0], after.tau_powers_g1[1]), + ) { return false; } - if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) { + if !same_ratio( + power_pairs(&after.alpha_tau_powers_g1), + (after.tau_powers_g2[0], after.tau_powers_g2[1]), + ) { return false; } - if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) { + if !same_ratio( + power_pairs(&after.beta_tau_powers_g1), + (after.tau_powers_g2[0], after.tau_powers_g2[1]), + ) { return false; } true } - - /// Abstraction over a reader which hashes the data being read. pub struct HashReader { reader: R, - hasher: Blake2b + hasher: Blake2b, } impl HashReader { /// Construct a new `HashReader` given an existing `reader` by value. pub fn new(reader: R) -> Self { HashReader { - reader: reader, - hasher: Blake2b::default() + reader, + hasher: Blake2b::default(), } } @@ -429,15 +465,15 @@ impl Read for HashReader { /// Abstraction over a writer which hashes the data being written. pub struct HashWriter { writer: W, - hasher: Blake2b + hasher: Blake2b, } impl HashWriter { /// Construct a new `HashWriter` given an existing `writer` by value. pub fn new(writer: W) -> Self { HashWriter { - writer: writer, - hasher: Blake2b::default() + writer, + hasher: Blake2b::default(), } } diff --git a/powersoftau/src/batched_accumulator.rs b/powersoftau/src/batched_accumulator.rs index a0984e8..4ff215d 100644 --- a/powersoftau/src/batched_accumulator.rs +++ b/powersoftau/src/batched_accumulator.rs @@ -1,37 +1,24 @@ /// Memory constrained accumulator that checks parts of the initial information in parts that fit to memory /// and then contributes to entropy in parts as well +use bellman_ce::pairing::ff::{Field, PrimeField}; +use bellman_ce::pairing::*; +use blake2::{Blake2b, Digest}; -extern crate rand; -extern crate crossbeam; -extern crate num_cpus; -extern crate blake2; -extern crate generic_array; -extern crate typenum; -extern crate byteorder; -extern crate bellman_ce; -extern crate memmap; -extern crate itertools; - +use generic_array::GenericArray; use itertools::Itertools; use memmap::{Mmap, MmapMut}; -use bellman_ce::pairing::ff::{Field, PrimeField}; -use byteorder::{ReadBytesExt, BigEndian}; -use rand::{SeedableRng, Rng, Rand}; -use rand::chacha::ChaChaRng; -use bellman_ce::pairing::bn256::{Bn256}; -use bellman_ce::pairing::*; + use std::io::{self, Read, Write}; use std::sync::{Arc, Mutex}; -use generic_array::GenericArray; use typenum::consts::U64; -use blake2::{Blake2b, Digest}; -use std::fmt; -use super::keypair::*; -use super::utils::*; -use super::parameters::*; +use super::keypair::{PrivateKey, PublicKey}; +use super::parameters::{ + CheckForCorrectness, DeserializationError, ElementType, PowersOfTauParameters, UseCompression, +}; +use super::utils::{blank_hash, compute_g2_s, power_pairs, same_ratio}; -pub enum AccumulatorState{ +pub enum AccumulatorState { Empty, NonEmpty, Transformed, @@ -61,14 +48,12 @@ pub struct BatchedAccumulator { marker: std::marker::PhantomData

, } -impl BatchedAccumulator { +impl BatchedAccumulator { /// Calculate the contribution hash from the resulting file. Original powers of tau implementation /// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained /// implementation now writes without a particular order, so plain recalculation at the end /// of the procedure is more efficient - pub fn calculate_hash( - input_map: &Mmap - ) -> GenericArray { + pub fn calculate_hash(input_map: &Mmap) -> GenericArray { let chunk_size = 1 << 30; // read by 1GB from map let mut hasher = Blake2b::default(); for chunk in input_map.chunks(chunk_size) { @@ -78,7 +63,7 @@ impl BatchedAccumulator { } } -impl BatchedAccumulator { +impl BatchedAccumulator { pub fn empty() -> Self { Self { tau_powers_g1: vec![], @@ -87,41 +72,33 @@ impl BatchedAccumulator { beta_tau_powers_g1: vec![], beta_g2: E::G2Affine::zero(), hash: blank_hash(), - marker: std::marker::PhantomData::

{} + marker: std::marker::PhantomData::

{}, } } } -impl BatchedAccumulator { +impl BatchedAccumulator { fn g1_size(compression: UseCompression) -> usize { match compression { - UseCompression::Yes => { - return P::G1_COMPRESSED_BYTE_SIZE; - }, - UseCompression::No => { - return P::G1_UNCOMPRESSED_BYTE_SIZE; - } + UseCompression::Yes => P::G1_COMPRESSED_BYTE_SIZE, + UseCompression::No => P::G1_UNCOMPRESSED_BYTE_SIZE, } } fn g2_size(compression: UseCompression) -> usize { match compression { - UseCompression::Yes => { - return P::G2_COMPRESSED_BYTE_SIZE; - }, - UseCompression::No => { - return P::G2_UNCOMPRESSED_BYTE_SIZE; - } + UseCompression::Yes => P::G2_COMPRESSED_BYTE_SIZE, + UseCompression::No => P::G2_UNCOMPRESSED_BYTE_SIZE, } } fn get_size(element_type: ElementType, compression: UseCompression) -> usize { - let size = match element_type { - ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => { Self::g1_size(compression) }, - ElementType::BetaG2 | ElementType::TauG2 => { Self::g2_size(compression) } - }; - - size + match element_type { + ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => { + Self::g1_size(compression) + } + ElementType::BetaG2 | ElementType::TauG2 => Self::g2_size(compression), + } } /// File expected structure @@ -133,7 +110,11 @@ impl BatchedAccumulator { /// One G2 point for beta /// Public key appended to the end of file, but it's irrelevant for an accumulator itself - fn calculate_mmap_position(index: usize, element_type: ElementType, compression: UseCompression) -> usize { + fn calculate_mmap_position( + index: usize, + element_type: ElementType, + compression: UseCompression, + ) -> usize { let g1_size = Self::g1_size(compression); let g2_size = Self::g2_size(compression); let required_tau_g1_power = P::TAU_POWERS_G1_LENGTH; @@ -142,37 +123,65 @@ impl BatchedAccumulator { ElementType::TauG1 => { let mut position = 0; position += g1_size * index; - assert!(index < P::TAU_POWERS_G1_LENGTH, format!("Index of TauG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_G1_LENGTH, index)); + assert!( + index < P::TAU_POWERS_G1_LENGTH, + format!( + "Index of TauG1 element written must not exceed {}, while it's {}", + P::TAU_POWERS_G1_LENGTH, + index + ) + ); position - }, + } ElementType::TauG2 => { let mut position = 0; position += g1_size * required_tau_g1_power; - assert!(index < P::TAU_POWERS_LENGTH, format!("Index of TauG2 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index)); + assert!( + index < P::TAU_POWERS_LENGTH, + format!( + "Index of TauG2 element written must not exceed {}, while it's {}", + P::TAU_POWERS_LENGTH, + index + ) + ); position += g2_size * index; position - }, + } ElementType::AlphaG1 => { let mut position = 0; position += g1_size * required_tau_g1_power; position += g2_size * required_power; - assert!(index < P::TAU_POWERS_LENGTH, format!("Index of AlphaG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index)); + assert!( + index < P::TAU_POWERS_LENGTH, + format!( + "Index of AlphaG1 element written must not exceed {}, while it's {}", + P::TAU_POWERS_LENGTH, + index + ) + ); position += g1_size * index; position - }, + } ElementType::BetaG1 => { let mut position = 0; position += g1_size * required_tau_g1_power; position += g2_size * required_power; position += g1_size * required_power; - assert!(index < P::TAU_POWERS_LENGTH, format!("Index of BetaG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index)); + assert!( + index < P::TAU_POWERS_LENGTH, + format!( + "Index of BetaG1 element written must not exceed {}, while it's {}", + P::TAU_POWERS_LENGTH, + index + ) + ); position += g1_size * index; position - }, + } ElementType::BetaG2 => { let mut position = 0; position += g1_size * required_tau_g1_power; @@ -189,8 +198,12 @@ impl BatchedAccumulator { } /// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`. -pub fn verify_transform(before: &BatchedAccumulator, after: &BatchedAccumulator, key: &PublicKey, digest: &[u8]) -> bool -{ +pub fn verify_transform( + before: &BatchedAccumulator, + after: &BatchedAccumulator, + key: &PublicKey, + digest: &[u8], +) -> bool { assert_eq!(digest.len(), 64); let tau_g2_s = compute_g2_s::(&digest, &key.tau_g1.0, &key.tau_g1.1, 0); @@ -219,42 +232,67 @@ pub fn verify_transform(before: &BatchedAcc } // Did the participant multiply the previous tau by the new one? - if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) { + if !same_ratio( + (before.tau_powers_g1[1], after.tau_powers_g1[1]), + (tau_g2_s, key.tau_g2), + ) { return false; } // Did the participant multiply the previous alpha by the new one? - if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) { + if !same_ratio( + (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), + (alpha_g2_s, key.alpha_g2), + ) { return false; } // Did the participant multiply the previous beta by the new one? - if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) { + if !same_ratio( + (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), + (beta_g2_s, key.beta_g2), + ) { return false; } - if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) { + if !same_ratio( + (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), + (before.beta_g2, after.beta_g2), + ) { return false; } // Are the powers of tau correct? - if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) { + if !same_ratio( + power_pairs(&after.tau_powers_g1), + (after.tau_powers_g2[0], after.tau_powers_g2[1]), + ) { return false; } - if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) { + if !same_ratio( + power_pairs(&after.tau_powers_g2), + (after.tau_powers_g1[0], after.tau_powers_g1[1]), + ) { return false; } - if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) { + if !same_ratio( + power_pairs(&after.alpha_tau_powers_g1), + (after.tau_powers_g2[0], after.tau_powers_g2[1]), + ) { return false; } - if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) { + if !same_ratio( + power_pairs(&after.beta_tau_powers_g1), + (after.tau_powers_g2[0], after.tau_powers_g2[1]), + ) { return false; } true } -impl BatchedAccumulator { +impl BatchedAccumulator { /// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`. + #[allow(clippy::too_many_arguments)] pub fn verify_transformation( input_map: &Mmap, output_map: &Mmap, @@ -264,9 +302,8 @@ impl BatchedAccumulator { output_is_compressed: UseCompression, check_input_for_correctness: CheckForCorrectness, check_output_for_correctness: CheckForCorrectness, - ) -> bool - { - use itertools::MinMaxResult::{MinMax}; + ) -> bool { + use itertools::MinMaxResult::MinMax; assert_eq!(digest.len(), 64); let tau_g2_s = compute_g2_s::(&digest, &key.tau_g1.0, &key.tau_g1.1, 0); @@ -298,8 +335,24 @@ impl BatchedAccumulator { { let chunk_size = 2; - before.read_chunk(0, chunk_size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk from `challenge`"); - after.read_chunk(0, chunk_size, output_is_compressed, check_output_for_correctness, &output_map).expect("must read a first chunk from `response`"); + before + .read_chunk( + 0, + chunk_size, + input_is_compressed, + check_input_for_correctness, + &input_map, + ) + .expect("must read a first chunk from `challenge`"); + after + .read_chunk( + 0, + chunk_size, + output_is_compressed, + check_output_for_correctness, + &output_map, + ) + .expect("must read a first chunk from `response`"); // Check the correctness of the generators for tau powers if after.tau_powers_g1[0] != E::G1Affine::one() { @@ -312,59 +365,114 @@ impl BatchedAccumulator { } // Did the participant multiply the previous tau by the new one? - if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) { + if !same_ratio( + (before.tau_powers_g1[1], after.tau_powers_g1[1]), + (tau_g2_s, key.tau_g2), + ) { println!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)"); return false; } // Did the participant multiply the previous alpha by the new one? - if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) { + if !same_ratio( + (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), + (alpha_g2_s, key.alpha_g2), + ) { println!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)"); return false; } // Did the participant multiply the previous beta by the new one? - if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) { + if !same_ratio( + (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), + (beta_g2_s, key.beta_g2), + ) { println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)"); return false; } - if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) { + if !same_ratio( + (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), + (before.beta_g2, after.beta_g2), + ) { println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)"); return false; } - } - let tau_powers_g2_0 = after.tau_powers_g2[0].clone(); - let tau_powers_g2_1 = after.tau_powers_g2[1].clone(); - let tau_powers_g1_0 = after.tau_powers_g1[0].clone(); - let tau_powers_g1_1 = after.tau_powers_g1[1].clone(); + let tau_powers_g2_0 = after.tau_powers_g2[0]; + let tau_powers_g2_1 = after.tau_powers_g2[1]; + let tau_powers_g1_0 = after.tau_powers_g1[0]; + let tau_powers_g1_1 = after.tau_powers_g1[1]; // Read by parts and just verify same ratios. Cause of two fixed variables above with tau_powers_g2_1 = tau_powers_g2_0 ^ s // one does not need to care about some overlapping let mut tau_powers_last_first_chunks = vec![E::G1Affine::zero(); 2]; - for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { if let MinMax(start, end) = chunk.minmax() { // extra 1 to ensure intersection between chunks and ensure we don't overflow - let size = end - start + 1 + if end == P::TAU_POWERS_LENGTH - 1 { 0 } else { 1 }; - before.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from `challenge`", start, end)); - after.read_chunk(start, size, output_is_compressed, check_output_for_correctness, &output_map).expect(&format!("must read a chunk from {} to {} from `response`", start, end)); + let size = end - start + + 1 + + if end == P::TAU_POWERS_LENGTH - 1 { + 0 + } else { + 1 + }; + before + .read_chunk( + start, + size, + input_is_compressed, + check_input_for_correctness, + &input_map, + ) + .unwrap_or_else(|_| { + panic!(format!( + "must read a chunk from {} to {} from `challenge`", + start, end + )) + }); + after + .read_chunk( + start, + size, + output_is_compressed, + check_output_for_correctness, + &output_map, + ) + .unwrap_or_else(|_| { + panic!(format!( + "must read a chunk from {} to {} from `response`", + start, end + )) + }); // Are the powers of tau correct? - if !same_ratio(power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) { + if !same_ratio( + power_pairs(&after.tau_powers_g1), + (tau_powers_g2_0, tau_powers_g2_1), + ) { println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); return false; } - if !same_ratio(power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)) { + if !same_ratio( + power_pairs(&after.tau_powers_g2), + (tau_powers_g1_0, tau_powers_g1_1), + ) { println!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)"); return false; } - if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) { + if !same_ratio( + power_pairs(&after.alpha_tau_powers_g1), + (tau_powers_g2_0, tau_powers_g2_1), + ) { println!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); return false; } - if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) { + if !same_ratio( + power_pairs(&after.beta_tau_powers_g1), + (tau_powers_g2_0, tau_powers_g2_1), + ) { println!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); return false; } @@ -377,18 +485,63 @@ impl BatchedAccumulator { } } - for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in + &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + { if let MinMax(start, end) = chunk.minmax() { // extra 1 to ensure intersection between chunks and ensure we don't overflow - let size = end - start + 1 + if end == P::TAU_POWERS_G1_LENGTH - 1 { 0 } else { 1 }; - before.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from `challenge`", start, end)); - after.read_chunk(start, size, output_is_compressed, check_output_for_correctness, &output_map).expect(&format!("must read a chunk from {} to {} from `response`", start, end)); + let size = end - start + + 1 + + if end == P::TAU_POWERS_G1_LENGTH - 1 { + 0 + } else { + 1 + }; + before + .read_chunk( + start, + size, + input_is_compressed, + check_input_for_correctness, + &input_map, + ) + .unwrap_or_else(|_| { + panic!(format!( + "must read a chunk from {} to {} from `challenge`", + start, end + )) + }); + after + .read_chunk( + start, + size, + output_is_compressed, + check_output_for_correctness, + &output_map, + ) + .unwrap_or_else(|_| { + panic!(format!( + "must read a chunk from {} to {} from `response`", + start, end + )) + }); - assert_eq!(before.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty"); - assert_eq!(after.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty"); + assert_eq!( + before.tau_powers_g2.len(), + 0, + "during rest of tau g1 generation tau g2 must be empty" + ); + assert_eq!( + after.tau_powers_g2.len(), + 0, + "during rest of tau g1 generation tau g2 must be empty" + ); // Are the powers of tau correct? - if !same_ratio(power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) { + if !same_ratio( + power_pairs(&after.tau_powers_g1), + (tau_powers_g2_0, tau_powers_g2_1), + ) { println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution"); return false; } @@ -401,7 +554,10 @@ impl BatchedAccumulator { } } - if !same_ratio(power_pairs(&tau_powers_last_first_chunks), (tau_powers_g2_0, tau_powers_g2_1)) { + if !same_ratio( + power_pairs(&tau_powers_last_first_chunks), + (tau_powers_g2_0, tau_powers_g2_1), + ) { println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in TauG1 contribution intersection"); } true @@ -411,32 +567,70 @@ impl BatchedAccumulator { input_map: &Mmap, output_map: &mut MmapMut, check_input_for_correctness: CheckForCorrectness, - ) -> io::Result<()> - { - use itertools::MinMaxResult::{MinMax}; + ) -> io::Result<()> { + use itertools::MinMaxResult::MinMax; let mut accumulator = Self::empty(); - for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; - accumulator.read_chunk(start, size, UseCompression::Yes, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from source of decompression", start, end)); + accumulator + .read_chunk( + start, + size, + UseCompression::Yes, + check_input_for_correctness, + &input_map, + ) + .unwrap_or_else(|_| { + panic!(format!( + "must read a chunk from {} to {} from source of decompression", + start, end + )) + }); accumulator.write_chunk(start, UseCompression::No, output_map)?; } else { panic!("Chunk does not have a min and max"); } } - for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in + &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; - accumulator.read_chunk(start, size, UseCompression::Yes, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from source of decompression", start, end)); - assert_eq!(accumulator.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty"); - assert_eq!(accumulator.alpha_tau_powers_g1.len(), 0, "during rest of tau g1 generation alpha*tau in g1 must be empty"); - assert_eq!(accumulator.beta_tau_powers_g1.len(), 0, "during rest of tau g1 generation beta*tau in g1 must be empty"); + accumulator + .read_chunk( + start, + size, + UseCompression::Yes, + check_input_for_correctness, + &input_map, + ) + .unwrap_or_else(|_| { + panic!(format!( + "must read a chunk from {} to {} from source of decompression", + start, end + )) + }); + assert_eq!( + accumulator.tau_powers_g2.len(), + 0, + "during rest of tau g1 generation tau g2 must be empty" + ); + assert_eq!( + accumulator.alpha_tau_powers_g1.len(), + 0, + "during rest of tau g1 generation alpha*tau in g1 must be empty" + ); + assert_eq!( + accumulator.beta_tau_powers_g1.len(), + 0, + "during rest of tau g1 generation beta*tau in g1 must be empty" + ); accumulator.write_chunk(start, UseCompression::No, output_map)?; - } else { panic!("Chunk does not have a min and max"); } @@ -449,9 +643,8 @@ impl BatchedAccumulator { input_map: &Mmap, check_input_for_correctness: CheckForCorrectness, compression: UseCompression, - ) -> io::Result> - { - use itertools::MinMaxResult::{MinMax}; + ) -> io::Result> { + use itertools::MinMaxResult::MinMax; let mut accumulator = Self::empty(); @@ -461,10 +654,23 @@ impl BatchedAccumulator { let mut beta_tau_powers_g1 = vec![]; let mut beta_g2 = vec![]; - for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; - accumulator.read_chunk(start, size, compression, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from source of decompression", start, end)); + accumulator + .read_chunk( + start, + size, + compression, + check_input_for_correctness, + &input_map, + ) + .unwrap_or_else(|_| { + panic!(format!( + "must read a chunk from {} to {} from source of decompression", + start, end + )) + }); tau_powers_g1.extend_from_slice(&accumulator.tau_powers_g1); tau_powers_g2.extend_from_slice(&accumulator.tau_powers_g2); alpha_tau_powers_g1.extend_from_slice(&accumulator.alpha_tau_powers_g1); @@ -477,13 +683,40 @@ impl BatchedAccumulator { } } - for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in + &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; - accumulator.read_chunk(start, size, compression, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from source of decompression", start, end)); - assert_eq!(accumulator.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty"); - assert_eq!(accumulator.alpha_tau_powers_g1.len(), 0, "during rest of tau g1 generation alpha*tau in g1 must be empty"); - assert_eq!(accumulator.beta_tau_powers_g1.len(), 0, "during rest of tau g1 generation beta*tau in g1 must be empty"); + accumulator + .read_chunk( + start, + size, + compression, + check_input_for_correctness, + &input_map, + ) + .unwrap_or_else(|_| { + panic!(format!( + "must read a chunk from {} to {} from source of decompression", + start, end + )) + }); + assert_eq!( + accumulator.tau_powers_g2.len(), + 0, + "during rest of tau g1 generation tau g2 must be empty" + ); + assert_eq!( + accumulator.alpha_tau_powers_g1.len(), + 0, + "during rest of tau g1 generation alpha*tau in g1 must be empty" + ); + assert_eq!( + accumulator.beta_tau_powers_g1.len(), + 0, + "during rest of tau g1 generation beta*tau in g1 must be empty" + ); tau_powers_g1.extend_from_slice(&accumulator.tau_powers_g1); tau_powers_g2.extend_from_slice(&accumulator.tau_powers_g2); @@ -495,34 +728,33 @@ impl BatchedAccumulator { } Ok(BatchedAccumulator { - tau_powers_g1: tau_powers_g1, - tau_powers_g2: tau_powers_g2, - alpha_tau_powers_g1: alpha_tau_powers_g1, - beta_tau_powers_g1: beta_tau_powers_g1, + tau_powers_g1, + tau_powers_g2, + alpha_tau_powers_g1, + beta_tau_powers_g1, beta_g2: beta_g2[0], hash: blank_hash(), - marker: std::marker::PhantomData::

{} + marker: std::marker::PhantomData::

{}, }) } pub fn serialize( &mut self, output_map: &mut MmapMut, - compression: UseCompression - ) -> io::Result<()> - { - use itertools::MinMaxResult::{MinMax}; + compression: UseCompression, + ) -> io::Result<()> { + use itertools::MinMaxResult::MinMax; - for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { if let MinMax(start, end) = chunk.minmax() { - let mut tmp_acc = BatchedAccumulator:: { - tau_powers_g1: (&self.tau_powers_g1[start..end+1]).to_vec(), - tau_powers_g2: (&self.tau_powers_g2[start..end+1]).to_vec(), - alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..end+1]).to_vec(), - beta_tau_powers_g1: (&self.beta_tau_powers_g1[start..end+1]).to_vec(), - beta_g2: self.beta_g2.clone(), - hash: self.hash.clone(), - marker: std::marker::PhantomData::

{} + let mut tmp_acc = BatchedAccumulator:: { + tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(), + tau_powers_g2: (&self.tau_powers_g2[start..=end]).to_vec(), + alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..=end]).to_vec(), + beta_tau_powers_g1: (&self.beta_tau_powers_g1[start..=end]).to_vec(), + beta_g2: self.beta_g2, + hash: self.hash, + marker: std::marker::PhantomData::

{}, }; tmp_acc.write_chunk(start, compression, output_map)?; } else { @@ -530,16 +762,18 @@ impl BatchedAccumulator { } } - for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in + &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + { if let MinMax(start, end) = chunk.minmax() { - let mut tmp_acc = BatchedAccumulator:: { - tau_powers_g1: (&self.tau_powers_g1[start..end+1]).to_vec(), + let mut tmp_acc = BatchedAccumulator:: { + tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(), tau_powers_g2: vec![], alpha_tau_powers_g1: vec![], beta_tau_powers_g1: vec![], - beta_g2: self.beta_g2.clone(), - hash: self.hash.clone(), - marker: std::marker::PhantomData::

{} + beta_g2: self.beta_g2, + hash: self.hash, + marker: std::marker::PhantomData::

{}, }; tmp_acc.write_chunk(start, compression, output_map)?; } else { @@ -549,69 +783,126 @@ impl BatchedAccumulator { Ok(()) } - } -impl BatchedAccumulator { - pub fn read_chunk ( +impl BatchedAccumulator { + pub fn read_chunk( &mut self, from: usize, size: usize, compression: UseCompression, checked: CheckForCorrectness, input_map: &Mmap, - ) -> Result<(), DeserializationError> - { + ) -> Result<(), DeserializationError> { self.tau_powers_g1 = match compression { - UseCompression::Yes => { - self.read_points_chunk::<::Compressed>(from, size, ElementType::TauG1, compression, checked, &input_map)? - }, - UseCompression::No => { - self.read_points_chunk::<::Uncompressed>(from, size, ElementType::TauG1, compression, checked, &input_map)? - }, - + UseCompression::Yes => self + .read_points_chunk::<::Compressed>( + from, + size, + ElementType::TauG1, + compression, + checked, + &input_map, + )?, + UseCompression::No => self + .read_points_chunk::<::Uncompressed>( + from, + size, + ElementType::TauG1, + compression, + checked, + &input_map, + )?, }; self.tau_powers_g2 = match compression { - UseCompression::Yes => { - self.read_points_chunk::<::Compressed>(from, size, ElementType::TauG2, compression, checked, &input_map)? - }, - UseCompression::No => { - self.read_points_chunk::<::Uncompressed>(from, size, ElementType::TauG2, compression, checked, &input_map)? - }, - + UseCompression::Yes => self + .read_points_chunk::<::Compressed>( + from, + size, + ElementType::TauG2, + compression, + checked, + &input_map, + )?, + UseCompression::No => self + .read_points_chunk::<::Uncompressed>( + from, + size, + ElementType::TauG2, + compression, + checked, + &input_map, + )?, }; self.alpha_tau_powers_g1 = match compression { - UseCompression::Yes => { - self.read_points_chunk::<::Compressed>(from, size, ElementType::AlphaG1, compression, checked, &input_map)? - }, - UseCompression::No => { - self.read_points_chunk::<::Uncompressed>(from, size, ElementType::AlphaG1, compression, checked, &input_map)? - }, - + UseCompression::Yes => self + .read_points_chunk::<::Compressed>( + from, + size, + ElementType::AlphaG1, + compression, + checked, + &input_map, + )?, + UseCompression::No => self + .read_points_chunk::<::Uncompressed>( + from, + size, + ElementType::AlphaG1, + compression, + checked, + &input_map, + )?, }; self.beta_tau_powers_g1 = match compression { - UseCompression::Yes => { - self.read_points_chunk::<::Compressed>(from, size, ElementType::BetaG1, compression, checked, &input_map)? - }, - UseCompression::No => { - self.read_points_chunk::<::Uncompressed>(from, size, ElementType::BetaG1, compression, checked, &input_map)? - }, + UseCompression::Yes => self + .read_points_chunk::<::Compressed>( + from, + size, + ElementType::BetaG1, + compression, + checked, + &input_map, + )?, + UseCompression::No => self + .read_points_chunk::<::Uncompressed>( + from, + size, + ElementType::BetaG1, + compression, + checked, + &input_map, + )?, }; self.beta_g2 = match compression { UseCompression::Yes => { - let points = self.read_points_chunk::<::Compressed>(0, 1, ElementType::BetaG2, compression, checked, &input_map)?; + let points = self.read_points_chunk::<::Compressed>( + 0, + 1, + ElementType::BetaG2, + compression, + checked, + &input_map, + )?; points[0] - }, + } UseCompression::No => { - let points = self.read_points_chunk::<::Uncompressed>(0, 1, ElementType::BetaG2, compression, checked, &input_map)?; + let points = self.read_points_chunk::<::Uncompressed>( + 0, + 1, + ElementType::BetaG2, + compression, + checked, + &input_map, + )?; points[0] - }, + } }; Ok(()) @@ -625,8 +916,7 @@ impl BatchedAccumulator { compression: UseCompression, checked: CheckForCorrectness, input_map: &Mmap, - ) -> Result, DeserializationError> - { + ) -> Result, DeserializationError> { // Read the encoded elements let mut res = vec![ENC::empty(); size]; @@ -637,8 +927,11 @@ impl BatchedAccumulator { if index >= P::TAU_POWERS_G1_LENGTH { return Ok(vec![]); } - }, - ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::BetaG2 | ElementType::TauG2 => { + } + ElementType::AlphaG1 + | ElementType::BetaG1 + | ElementType::BetaG2 + | ElementType::TauG2 => { if index >= P::TAU_POWERS_LENGTH { return Ok(vec![]); } @@ -646,8 +939,10 @@ impl BatchedAccumulator { }; let position = Self::calculate_mmap_position(index, element_type, compression); let element_size = Self::get_size(element_type, compression); - let memory_slice = input_map.get(position..position+element_size).expect("must read point data from file"); - memory_slice.clone().read_exact(encoded.as_mut())?; + let mut memory_slice = input_map + .get(position..position + element_size) + .expect("must read point data from file"); + memory_slice.read_exact(encoded.as_mut())?; } // Allocate space for the deserialized elements @@ -663,7 +958,10 @@ impl BatchedAccumulator { let decoding_error = Arc::new(Mutex::new(None)); crossbeam::scope(|scope| { - for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) { + for (source, target) in res + .chunks(chunk_size) + .zip(res_affine.chunks_mut(chunk_size)) + { let decoding_error = decoding_error.clone(); scope.spawn(move || { @@ -678,21 +976,25 @@ impl BatchedAccumulator { match checked { CheckForCorrectness::Yes => { // Points at infinity are never expected in the accumulator - source.into_affine().map_err(|e| e.into()).and_then(|source| { - if source.is_zero() { - Err(DeserializationError::PointAtInfinity) - } else { - Ok(source) - } - }) - }, - CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into()) + source + .into_affine() + .map_err(|e| e.into()) + .and_then(|source| { + if source.is_zero() { + Err(DeserializationError::PointAtInfinity) + } else { + Ok(source) + } + }) + } + CheckForCorrectness::No => { + source.into_affine_unchecked().map_err(|e| e.into()) + } } - } - { + } { Ok(source) => { *target = source; - }, + } Err(e) => { *decoding_error.lock().unwrap() = Some(e); } @@ -710,54 +1012,59 @@ impl BatchedAccumulator { } } - match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() { - Some(e) => { - Err(e) - }, - None => { - Ok(res_affine) - } + match Arc::try_unwrap(decoding_error) + .unwrap() + .into_inner() + .unwrap() + { + Some(e) => Err(e), + None => Ok(res_affine), } } } -impl BatchedAccumulator { +impl BatchedAccumulator { fn write_all( &mut self, chunk_start: usize, compression: UseCompression, element_type: ElementType, output_map: &mut MmapMut, - ) -> io::Result<()> - { + ) -> io::Result<()> { match element_type { ElementType::TauG1 => { for (i, c) in self.tau_powers_g1.clone().iter().enumerate() { let index = chunk_start + i; self.write_point(index, c, compression, element_type.clone(), output_map)?; } - }, + } ElementType::TauG2 => { for (i, c) in self.tau_powers_g2.clone().iter().enumerate() { let index = chunk_start + i; self.write_point(index, c, compression, element_type.clone(), output_map)?; } - }, + } ElementType::AlphaG1 => { for (i, c) in self.alpha_tau_powers_g1.clone().iter().enumerate() { let index = chunk_start + i; self.write_point(index, c, compression, element_type.clone(), output_map)?; } - }, + } ElementType::BetaG1 => { for (i, c) in self.beta_tau_powers_g1.clone().iter().enumerate() { let index = chunk_start + i; self.write_point(index, c, compression, element_type.clone(), output_map)?; } - }, + } ElementType::BetaG2 => { let index = chunk_start; - self.write_point(index, &self.beta_g2.clone(), compression, element_type.clone(), output_map)? + self.write_point( + index, + &self.beta_g2.clone(), + compression, + element_type.clone(), + output_map, + )? } }; @@ -774,15 +1081,19 @@ impl BatchedAccumulator { element_type: ElementType, output_map: &mut MmapMut, ) -> io::Result<()> - where C: CurveAffine + where + C: CurveAffine, { match element_type { ElementType::TauG1 => { if index >= P::TAU_POWERS_G1_LENGTH { return Ok(()); } - }, - ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::BetaG2 | ElementType::TauG2 => { + } + ElementType::AlphaG1 + | ElementType::BetaG1 + | ElementType::BetaG2 + | ElementType::TauG2 => { if index >= P::TAU_POWERS_LENGTH { return Ok(()); } @@ -793,13 +1104,13 @@ impl BatchedAccumulator { UseCompression::Yes => { let position = Self::calculate_mmap_position(index, element_type, compression); // let size = self.get_size(element_type, compression); - (&mut output_map[position..]).write(p.into_compressed().as_ref())?; - }, + (&mut output_map[position..]).write_all(p.into_compressed().as_ref())?; + } UseCompression::No => { let position = Self::calculate_mmap_position(index, element_type, compression); // let size = self.get_size(element_type, compression); - (&mut output_map[position..]).write(p.into_uncompressed().as_ref())?; - }, + (&mut output_map[position..]).write_all(p.into_uncompressed().as_ref())?; + } }; Ok(()) @@ -810,9 +1121,8 @@ impl BatchedAccumulator { &mut self, chunk_start: usize, compression: UseCompression, - output_map: &mut MmapMut - ) -> io::Result<()> - { + output_map: &mut MmapMut, + ) -> io::Result<()> { self.write_all(chunk_start, compression, ElementType::TauG1, output_map)?; if chunk_start < P::TAU_POWERS_LENGTH { self.write_all(chunk_start, compression, ElementType::TauG2, output_map)?; @@ -823,10 +1133,9 @@ impl BatchedAccumulator { Ok(()) } - } -impl BatchedAccumulator { +impl BatchedAccumulator { /// Transforms the accumulator with a private key. /// Due to large amount of data in a previous accumulator even in the compressed form /// this function can now work on compressed input. Output can be made in any form @@ -839,36 +1148,39 @@ impl BatchedAccumulator { input_is_compressed: UseCompression, compress_the_output: UseCompression, check_input_for_correctness: CheckForCorrectness, - key: &PrivateKey - ) -> io::Result<()> - { - + key: &PrivateKey, + ) -> io::Result<()> { /// Exponentiate a large number of points, with an optional coefficient to be applied to the /// exponent. - fn batch_exp >(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) { + fn batch_exp>( + bases: &mut [C], + exp: &[C::Scalar], + coeff: Option<&C::Scalar>, + ) { assert_eq!(bases.len(), exp.len()); let mut projective = vec![C::Projective::zero(); bases.len()]; let chunk_size = bases.len() / num_cpus::get(); // Perform wNAF over multiple cores, placing results into `projective`. crossbeam::scope(|scope| { - for ((bases, exp), projective) in bases.chunks_mut(chunk_size) - .zip(exp.chunks(chunk_size)) - .zip(projective.chunks_mut(chunk_size)) + for ((bases, exp), projective) in bases + .chunks_mut(chunk_size) + .zip(exp.chunks(chunk_size)) + .zip(projective.chunks_mut(chunk_size)) { scope.spawn(move || { let mut wnaf = Wnaf::new(); - for ((base, exp), projective) in bases.iter_mut() - .zip(exp.iter()) - .zip(projective.iter_mut()) + for ((base, exp), projective) in + bases.iter_mut().zip(exp.iter()).zip(projective.iter_mut()) { let mut exp = *exp; if let Some(coeff) = coeff { exp.mul_assign(coeff); } - *projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr()); + *projective = + wnaf.base(base.into_projective(), 1).scalar(exp.into_repr()); } }); } @@ -876,8 +1188,7 @@ impl BatchedAccumulator { // Perform batch normalization crossbeam::scope(|scope| { - for projective in projective.chunks_mut(chunk_size) - { + for projective in projective.chunks_mut(chunk_size) { scope.spawn(move || { C::Projective::batch_normalization(projective); }); @@ -887,18 +1198,29 @@ impl BatchedAccumulator { // Turn it all back into affine points for (projective, affine) in projective.iter().zip(bases.iter_mut()) { *affine = projective.into_affine(); - assert!(!affine.is_zero(), "your contribution happened to produce a point at infinity, please re-run"); + assert!( + !affine.is_zero(), + "your contribution happened to produce a point at infinity, please re-run" + ); } } let mut accumulator = Self::empty(); - use itertools::MinMaxResult::{MinMax}; + use itertools::MinMaxResult::MinMax; - for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; - accumulator.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk"); + accumulator + .read_chunk( + start, + size, + input_is_compressed, + check_input_for_correctness, + &input_map, + ) + .expect("must read a first chunk"); // Construct the powers of tau let mut taupowers = vec![E::Fr::zero(); size]; @@ -920,10 +1242,21 @@ impl BatchedAccumulator { batch_exp::(&mut accumulator.tau_powers_g1, &taupowers[0..], None); batch_exp::(&mut accumulator.tau_powers_g2, &taupowers[0..], None); - batch_exp::(&mut accumulator.alpha_tau_powers_g1, &taupowers[0..], Some(&key.alpha)); - batch_exp::(&mut accumulator.beta_tau_powers_g1, &taupowers[0..], Some(&key.beta)); + batch_exp::( + &mut accumulator.alpha_tau_powers_g1, + &taupowers[0..], + Some(&key.alpha), + ); + batch_exp::( + &mut accumulator.beta_tau_powers_g1, + &taupowers[0..], + Some(&key.beta), + ); accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine(); - assert!(!accumulator.beta_g2.is_zero(), "your contribution happened to produce a point at infinity, please re-run"); + assert!( + !accumulator.beta_g2.is_zero(), + "your contribution happened to produce a point at infinity, please re-run" + ); accumulator.write_chunk(start, compress_the_output, output_map)?; println!("Done processing {} powers of tau", end); } else { @@ -931,11 +1264,25 @@ impl BatchedAccumulator { } } - for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in + &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; - accumulator.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk"); - assert_eq!(accumulator.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty"); + accumulator + .read_chunk( + start, + size, + input_is_compressed, + check_input_for_correctness, + &input_map, + ) + .expect("must read a first chunk"); + assert_eq!( + accumulator.tau_powers_g2.len(), + 0, + "during rest of tau g1 generation tau g2 must be empty" + ); // Construct the powers of tau let mut taupowers = vec![E::Fr::zero(); size]; @@ -970,16 +1317,15 @@ impl BatchedAccumulator { } } -impl BatchedAccumulator { +impl BatchedAccumulator { /// Transforms the accumulator with a private key. pub fn generate_initial( output_map: &mut MmapMut, compress_the_output: UseCompression, - ) -> io::Result<()> - { - use itertools::MinMaxResult::{MinMax}; + ) -> io::Result<()> { + use itertools::MinMaxResult::MinMax; - for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; let mut accumulator = Self { @@ -989,7 +1335,7 @@ impl BatchedAccumulator { beta_tau_powers_g1: vec![E::G1Affine::one(); size], beta_g2: E::G2Affine::one(), hash: blank_hash(), - marker: std::marker::PhantomData::

{} + marker: std::marker::PhantomData::

{}, }; accumulator.write_chunk(start, compress_the_output, output_map)?; @@ -999,7 +1345,9 @@ impl BatchedAccumulator { } } - for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in + &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; let mut accumulator = Self { @@ -1009,7 +1357,7 @@ impl BatchedAccumulator { beta_tau_powers_g1: vec![], beta_g2: E::G2Affine::one(), hash: blank_hash(), - marker: std::marker::PhantomData::

{} + marker: std::marker::PhantomData::

{}, }; accumulator.write_chunk(start, compress_the_output, output_map)?; diff --git a/powersoftau/src/bin/beacon_constrained.rs b/powersoftau/src/bin/beacon_constrained.rs index affd8dd..b858c95 100644 --- a/powersoftau/src/bin/beacon_constrained.rs +++ b/powersoftau/src/bin/beacon_constrained.rs @@ -1,20 +1,12 @@ -extern crate powersoftau; -extern crate bellman_ce; -extern crate memmap; -extern crate rand; -extern crate blake2; -extern crate byteorder; -extern crate crypto; +use powersoftau::bn256::Bn256CeremonyParameters; -use powersoftau::bn256::{Bn256CeremonyParameters}; +use powersoftau::batched_accumulator::BatchedAccumulator; +use powersoftau::keypair::keypair; +use powersoftau::parameters::{CheckForCorrectness, UseCompression}; -use powersoftau::batched_accumulator::{BatchedAccumulator}; -use powersoftau::keypair::{keypair}; -use powersoftau::parameters::{UseCompression, CheckForCorrectness}; - -use std::fs::OpenOptions; use bellman_ce::pairing::bn256::Bn256; use memmap::*; +use std::fs::OpenOptions; use std::io::Write; @@ -27,7 +19,7 @@ const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No; const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes; const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No; - +#[allow(clippy::modulo_one)] fn main() { let args: Vec = std::env::args().collect(); if args.len() != 3 { @@ -37,29 +29,36 @@ fn main() { let challenge_filename = &args[1]; let response_filename = &args[2]; - println!("Will contribute a random beacon to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER); - println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH); - + println!( + "Will contribute a random beacon to accumulator for 2^{} powers of tau", + Bn256CeremonyParameters::REQUIRED_POWER + ); + println!( + "In total will generate up to {} powers", + Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH + ); + // Create an RNG based on the outcome of the random beacon let mut rng = { - use byteorder::{ReadBytesExt, BigEndian}; - use rand::{SeedableRng}; - use rand::chacha::ChaChaRng; - use crypto::sha2::Sha256; + use byteorder::{BigEndian, ReadBytesExt}; use crypto::digest::Digest; + use crypto::sha2::Sha256; + use rand::chacha::ChaChaRng; + use rand::SeedableRng; // Place block hash here (block number #564321) - let mut cur_hash: [u8; 32] = hex!("0000000000000000000a558a61ddc8ee4e488d647a747fe4dcc362fe2026c620"); + let mut cur_hash: [u8; 32] = + hex!("0000000000000000000a558a61ddc8ee4e488d647a747fe4dcc362fe2026c620"); // Performs 2^n hash iterations over it const N: u64 = 10; - for i in 0..(1u64<().expect("digest is large enough for this to work"); + for s in &mut seed { + *s = digest + .read_u32::() + .expect("digest is large enough for this to work"); } ChaChaRng::from_seed(&seed) @@ -97,22 +98,28 @@ fn main() { .expect("unable open challenge file in this directory"); { - let metadata = reader.metadata().expect("unable to get filesystem metadata for challenge file"); + let metadata = reader + .metadata() + .expect("unable to get filesystem metadata for challenge file"); let expected_challenge_length = match INPUT_IS_COMPRESSED { - UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - }, - UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - } + UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, + UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE, }; if metadata.len() != (expected_challenge_length as u64) { - panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len()); + panic!( + "The size of challenge file should be {}, but it's {}, so something isn't right.", + expected_challenge_length, + metadata.len() + ); } } - let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") }; + let readable_map = unsafe { + MmapOptions::new() + .map(&reader) + .expect("unable to create a memory map for input") + }; // Create response file in this directory let writer = OpenOptions::new() @@ -123,21 +130,27 @@ fn main() { .expect("unable to create response file in this directory"); let required_output_length = match COMPRESS_THE_OUTPUT { - UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - }, + UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE + Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + + Bn256CeremonyParameters::PUBLIC_KEY_SIZE } }; - writer.set_len(required_output_length as u64).expect("must make output file large enough"); + writer + .set_len(required_output_length as u64) + .expect("must make output file large enough"); + + let mut writable_map = unsafe { + MmapOptions::new() + .map_mut(&writer) + .expect("unable to create a memory map for output") + }; - let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") }; - println!("Calculating previous contribution hash..."); - let current_accumulator_hash = BatchedAccumulator::::calculate_hash(&readable_map); + let current_accumulator_hash = + BatchedAccumulator::::calculate_hash(&readable_map); { println!("Contributing on top of the hash:"); @@ -152,9 +165,13 @@ fn main() { println!(); } - (&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap"); + (&mut writable_map[0..]) + .write_all(current_accumulator_hash.as_slice()) + .expect("unable to write a challenge hash to mmap"); - writable_map.flush().expect("unable to write hash to response file"); + writable_map + .flush() + .expect("unable to write hash to response file"); } // Construct our keypair using the RNG we created above @@ -165,25 +182,33 @@ fn main() { // this computes a transformation and writes it BatchedAccumulator::::transform( - &readable_map, - &mut writable_map, - INPUT_IS_COMPRESSED, - COMPRESS_THE_OUTPUT, - CHECK_INPUT_CORRECTNESS, - &privkey - ).expect("must transform with the key"); + &readable_map, + &mut writable_map, + INPUT_IS_COMPRESSED, + COMPRESS_THE_OUTPUT, + CHECK_INPUT_CORRECTNESS, + &privkey, + ) + .expect("must transform with the key"); println!("Finishing writing your contribution to response file..."); // Write the public key - pubkey.write::(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key"); + pubkey + .write::(&mut writable_map, COMPRESS_THE_OUTPUT) + .expect("unable to write public key"); // Get the hash of the contribution, so the user can compare later - let output_readonly = writable_map.make_read_only().expect("must make a map readonly"); - let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly); + let output_readonly = writable_map + .make_read_only() + .expect("must make a map readonly"); + let contribution_hash = + BatchedAccumulator::::calculate_hash(&output_readonly); - print!("Done!\n\n\ + print!( + "Done!\n\n\ Your contribution has been written to response file\n\n\ - The BLAKE2b hash of response file is:\n"); + The BLAKE2b hash of response file is:\n" + ); for line in contribution_hash.as_slice().chunks(16) { print!("\t"); diff --git a/powersoftau/src/bin/compute_constrained.rs b/powersoftau/src/bin/compute_constrained.rs index d62a8ae..255f9f7 100644 --- a/powersoftau/src/bin/compute_constrained.rs +++ b/powersoftau/src/bin/compute_constrained.rs @@ -1,19 +1,11 @@ -extern crate powersoftau; -extern crate bellman_ce; -extern crate memmap; -extern crate rand; -extern crate blake2; -extern crate byteorder; -extern crate exitcode; +use powersoftau::batched_accumulator::BatchedAccumulator; +use powersoftau::bn256::Bn256CeremonyParameters; +use powersoftau::keypair::keypair; +use powersoftau::parameters::{CheckForCorrectness, UseCompression}; -use powersoftau::bn256::{Bn256CeremonyParameters}; -use powersoftau::batched_accumulator::{BatchedAccumulator}; -use powersoftau::keypair::{keypair}; -use powersoftau::parameters::{UseCompression, CheckForCorrectness}; - -use std::fs::OpenOptions; use bellman_ce::pairing::bn256::Bn256; use memmap::*; +use std::fs::OpenOptions; use std::io::{Read, Write}; @@ -32,15 +24,21 @@ fn main() { let challenge_filename = &args[1]; let response_filename = &args[2]; - println!("Will contribute to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER); - println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH); - + println!( + "Will contribute to accumulator for 2^{} powers of tau", + Bn256CeremonyParameters::REQUIRED_POWER + ); + println!( + "In total will generate up to {} powers", + Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH + ); + // Create an RNG based on a mixture of system randomness and user provided randomness let mut rng = { - use byteorder::{ReadBytesExt, BigEndian}; use blake2::{Blake2b, Digest}; - use rand::{SeedableRng, Rng, OsRng}; + use byteorder::{BigEndian, ReadBytesExt}; use rand::chacha::ChaChaRng; + use rand::{OsRng, Rng, SeedableRng}; let h = { let mut system_rng = OsRng::new().unwrap(); @@ -55,7 +53,9 @@ fn main() { // Ask the user to provide some information for additional entropy let mut user_input = String::new(); println!("Type some random text and press [ENTER] to provide additional entropy..."); - std::io::stdin().read_line(&mut user_input).expect("expected to read some random text from the user"); + std::io::stdin() + .read_line(&mut user_input) + .expect("expected to read some random text from the user"); // Hash it all up to make a seed h.input(&user_input.as_bytes()); @@ -66,8 +66,10 @@ fn main() { // Interpret the first 32 bytes of the digest as 8 32-bit words let mut seed = [0u32; 8]; - for i in 0..8 { - seed[i] = digest.read_u32::().expect("digest is large enough for this to work"); + for s in &mut seed { + *s = digest + .read_u32::() + .expect("digest is large enough for this to work"); } ChaChaRng::from_seed(&seed) @@ -75,52 +77,67 @@ fn main() { // Try to load challenge file from disk. let reader = OpenOptions::new() - .read(true) - .open(challenge_filename) - .expect("unable open challenge file"); + .read(true) + .open(challenge_filename) + .expect("unable open challenge file"); { - let metadata = reader.metadata().expect("unable to get filesystem metadata for challenge file"); + let metadata = reader + .metadata() + .expect("unable to get filesystem metadata for challenge file"); let expected_challenge_length = match INPUT_IS_COMPRESSED { - UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - }, - UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - } + UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, + UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE, }; if metadata.len() != (expected_challenge_length as u64) { - panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len()); + panic!( + "The size of challenge file should be {}, but it's {}, so something isn't right.", + expected_challenge_length, + metadata.len() + ); } } - let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") }; + let readable_map = unsafe { + MmapOptions::new() + .map(&reader) + .expect("unable to create a memory map for input") + }; // Create response file in this directory let writer = OpenOptions::new() - .read(true) - .write(true) - .create_new(true) - .open(response_filename) - .expect("unable to create response file"); + .read(true) + .write(true) + .create_new(true) + .open(response_filename) + .expect("unable to create response file"); let required_output_length = match COMPRESS_THE_OUTPUT { - UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - }, + UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE + Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + + Bn256CeremonyParameters::PUBLIC_KEY_SIZE } }; - writer.set_len(required_output_length as u64).expect("must make output file large enough"); + writer + .set_len(required_output_length as u64) + .expect("must make output file large enough"); + + let mut writable_map = unsafe { + MmapOptions::new() + .map_mut(&writer) + .expect("unable to create a memory map for output") + }; - let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") }; - println!("Calculating previous contribution hash..."); - assert!(UseCompression::No == INPUT_IS_COMPRESSED, "Hashing the compressed file in not yet defined"); - let current_accumulator_hash = BatchedAccumulator::::calculate_hash(&readable_map); + assert!( + UseCompression::No == INPUT_IS_COMPRESSED, + "Hashing the compressed file in not yet defined" + ); + let current_accumulator_hash = + BatchedAccumulator::::calculate_hash(&readable_map); { println!("`challenge` file contains decompressed points and has a hash:"); @@ -135,15 +152,23 @@ fn main() { println!(); } - (&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap"); + (&mut writable_map[0..]) + .write_all(current_accumulator_hash.as_slice()) + .expect("unable to write a challenge hash to mmap"); - writable_map.flush().expect("unable to write hash to response file"); + writable_map + .flush() + .expect("unable to write hash to response file"); } { let mut challenge_hash = [0; 64]; - let memory_slice = readable_map.get(0..64).expect("must read point data from file"); - memory_slice.clone().read_exact(&mut challenge_hash).expect("couldn't read hash of challenge file from response file"); + let mut memory_slice = readable_map + .get(0..64) + .expect("must read point data from file"); + memory_slice + .read_exact(&mut challenge_hash) + .expect("couldn't read hash of challenge file from response file"); println!("`challenge` file claims (!!! Must not be blindly trusted) that it was based on the original contribution with a hash:"); for line in challenge_hash.chunks(16) { @@ -166,28 +191,36 @@ fn main() { // this computes a transformation and writes it BatchedAccumulator::::transform( - &readable_map, - &mut writable_map, - INPUT_IS_COMPRESSED, - COMPRESS_THE_OUTPUT, - CHECK_INPUT_CORRECTNESS, - &privkey - ).expect("must transform with the key"); + &readable_map, + &mut writable_map, + INPUT_IS_COMPRESSED, + COMPRESS_THE_OUTPUT, + CHECK_INPUT_CORRECTNESS, + &privkey, + ) + .expect("must transform with the key"); println!("Finishing writing your contribution to response file..."); // Write the public key - pubkey.write::(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key"); + pubkey + .write::(&mut writable_map, COMPRESS_THE_OUTPUT) + .expect("unable to write public key"); writable_map.flush().expect("must flush a memory map"); // Get the hash of the contribution, so the user can compare later - let output_readonly = writable_map.make_read_only().expect("must make a map readonly"); - let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly); + let output_readonly = writable_map + .make_read_only() + .expect("must make a map readonly"); + let contribution_hash = + BatchedAccumulator::::calculate_hash(&output_readonly); - print!("Done!\n\n\ + print!( + "Done!\n\n\ Your contribution has been written to response file\n\n\ - The BLAKE2b hash of response file is:\n"); + The BLAKE2b hash of response file is:\n" + ); for line in contribution_hash.as_slice().chunks(16) { print!("\t"); diff --git a/powersoftau/src/bin/new.rs b/powersoftau/src/bin/new.rs index 5ac888e..e1f6d91 100644 --- a/powersoftau/src/bin/new.rs +++ b/powersoftau/src/bin/new.rs @@ -1,14 +1,11 @@ -extern crate powersoftau; -extern crate bellman_ce; +use powersoftau::accumulator::Accumulator; +use powersoftau::bn256::Bn256CeremonyParameters; +use powersoftau::parameters::UseCompression; +use powersoftau::utils::blank_hash; -use powersoftau::bn256::{Bn256CeremonyParameters}; -use powersoftau::accumulator::{Accumulator}; -use powersoftau::utils::{blank_hash}; -use powersoftau::parameters::{UseCompression}; - -use std::fs::OpenOptions; -use std::io::{Write, BufWriter}; use bellman_ce::pairing::bn256::Bn256; +use std::fs::OpenOptions; +use std::io::{BufWriter, Write}; fn main() { let args: Vec = std::env::args().collect(); @@ -28,13 +25,16 @@ fn main() { let mut writer = BufWriter::new(file); // Write a blank BLAKE2b hash: - writer.write_all(&blank_hash().as_slice()).expect("unable to write blank hash to challenge file"); + writer + .write_all(&blank_hash().as_slice()) + .expect("unable to write blank hash to challenge file"); - let parameters = Bn256CeremonyParameters{}; + let parameters = Bn256CeremonyParameters {}; let acc: Accumulator = Accumulator::new(parameters); println!("Writing an empty accumulator to disk"); - acc.serialize(&mut writer, UseCompression::No).expect("unable to write fresh accumulator to challenge file"); + acc.serialize(&mut writer, UseCompression::No) + .expect("unable to write fresh accumulator to challenge file"); writer.flush().expect("unable to flush accumulator to disk"); println!("Wrote a fresh accumulator to challenge file"); diff --git a/powersoftau/src/bin/new_constrained.rs b/powersoftau/src/bin/new_constrained.rs index 7839ea0..181ce7b 100644 --- a/powersoftau/src/bin/new_constrained.rs +++ b/powersoftau/src/bin/new_constrained.rs @@ -1,17 +1,13 @@ -extern crate powersoftau; -extern crate bellman_ce; -extern crate memmap; +use powersoftau::bn256::Bn256CeremonyParameters; -use powersoftau::bn256::{Bn256CeremonyParameters}; +use powersoftau::batched_accumulator::BatchedAccumulator; +use powersoftau::parameters::UseCompression; +use powersoftau::utils::blank_hash; -use powersoftau::batched_accumulator::{BatchedAccumulator}; -use powersoftau::parameters::{UseCompression}; -use powersoftau::utils::{blank_hash}; - -use std::fs::OpenOptions; -use std::io::{Write}; use bellman_ce::pairing::bn256::Bn256; use memmap::*; +use std::fs::OpenOptions; +use std::io::Write; use powersoftau::parameters::PowersOfTauParameters; @@ -25,8 +21,14 @@ fn main() { } let challenge_filename = &args[1]; - println!("Will generate an empty accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER); - println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH); + println!( + "Will generate an empty accumulator for 2^{} powers of tau", + Bn256CeremonyParameters::REQUIRED_POWER + ); + println!( + "In total will generate up to {} powers", + Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH + ); let file = OpenOptions::new() .read(true) @@ -34,24 +36,32 @@ fn main() { .create_new(true) .open(challenge_filename) .expect("unable to create challenge file"); - + let expected_challenge_length = match COMPRESS_NEW_CHALLENGE { UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - Bn256CeremonyParameters::PUBLIC_KEY_SIZE - }, - UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE + - Bn256CeremonyParameters::PUBLIC_KEY_SIZE } + UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE, }; - file.set_len(expected_challenge_length as u64).expect("unable to allocate large enough file"); + file.set_len(expected_challenge_length as u64) + .expect("unable to allocate large enough file"); - let mut writable_map = unsafe { MmapOptions::new().map_mut(&file).expect("unable to create a memory map") }; + let mut writable_map = unsafe { + MmapOptions::new() + .map_mut(&file) + .expect("unable to create a memory map") + }; // Write a blank BLAKE2b hash: let hash = blank_hash(); - (&mut writable_map[0..]).write(hash.as_slice()).expect("unable to write a default hash to mmap"); - writable_map.flush().expect("unable to write blank hash to challenge file"); + (&mut writable_map[0..]) + .write_all(hash.as_slice()) + .expect("unable to write a default hash to mmap"); + writable_map + .flush() + .expect("unable to write blank hash to challenge file"); println!("Blank hash for an empty challenge:"); for line in hash.as_slice().chunks(16) { @@ -65,12 +75,21 @@ fn main() { println!(); } - BatchedAccumulator::::generate_initial(&mut writable_map, COMPRESS_NEW_CHALLENGE).expect("generation of initial accumulator is successful"); - writable_map.flush().expect("unable to flush memmap to disk"); + BatchedAccumulator::::generate_initial( + &mut writable_map, + COMPRESS_NEW_CHALLENGE, + ) + .expect("generation of initial accumulator is successful"); + writable_map + .flush() + .expect("unable to flush memmap to disk"); // Get the hash of the contribution, so the user can compare later - let output_readonly = writable_map.make_read_only().expect("must make a map readonly"); - let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly); + let output_readonly = writable_map + .make_read_only() + .expect("must make a map readonly"); + let contribution_hash = + BatchedAccumulator::::calculate_hash(&output_readonly); println!("Empty contribution is formed with a hash:"); diff --git a/powersoftau/src/bin/prepare_phase2.rs b/powersoftau/src/bin/prepare_phase2.rs index b47e879..2acd491 100644 --- a/powersoftau/src/bin/prepare_phase2.rs +++ b/powersoftau/src/bin/prepare_phase2.rs @@ -1,27 +1,23 @@ -extern crate powersoftau; -extern crate rand; -extern crate blake2; -extern crate byteorder; -extern crate bellman_ce; - -use bellman_ce::pairing::{CurveAffine, CurveProjective}; use bellman_ce::pairing::bn256::Bn256; use bellman_ce::pairing::bn256::{G1, G2}; -use powersoftau::bn256::{Bn256CeremonyParameters}; +use bellman_ce::pairing::{CurveAffine, CurveProjective}; use powersoftau::batched_accumulator::*; +use powersoftau::bn256::Bn256CeremonyParameters; use powersoftau::*; use crate::parameters::*; -use bellman_ce::multicore::Worker; use bellman_ce::domain::{EvaluationDomain, Point}; +use bellman_ce::multicore::Worker; use std::fs::OpenOptions; use std::io::{BufWriter, Write}; use memmap::*; -const fn num_bits() -> usize { std::mem::size_of::() * 8 } +const fn num_bits() -> usize { + std::mem::size_of::() * 8 +} fn log_2(x: u64) -> u32 { assert!(x > 0); @@ -38,51 +34,63 @@ fn main() { // Try to load response file from disk. let reader = OpenOptions::new() - .read(true) - .open(response_filename) - .expect("unable open response file in this directory"); - let response_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") }; + .read(true) + .open(response_filename) + .expect("unable open response file in this directory"); + let response_readable_map = unsafe { + MmapOptions::new() + .map(&reader) + .expect("unable to create a memory map for input") + }; let current_accumulator = BatchedAccumulator::::deserialize( &response_readable_map, CheckForCorrectness::Yes, UseCompression::Yes, - ).expect("unable to read uncompressed accumulator"); - + ) + .expect("unable to read uncompressed accumulator"); let worker = &Worker::new(); // Create the parameters for various 2^m circuit depths. let max_degree = log_2(current_accumulator.tau_powers_g2.len() as u64); - for m in 0..max_degree+1 { + for m in 0..=max_degree { let paramname = format!("phase1radix2m{}", m); println!("Creating {}", paramname); let degree = 1 << m; let mut g1_coeffs = EvaluationDomain::from_coeffs( - current_accumulator.tau_powers_g1[0..degree].iter() + current_accumulator.tau_powers_g1[0..degree] + .iter() .map(|e| Point(e.into_projective())) - .collect() - ).unwrap(); + .collect(), + ) + .unwrap(); let mut g2_coeffs = EvaluationDomain::from_coeffs( - current_accumulator.tau_powers_g2[0..degree].iter() + current_accumulator.tau_powers_g2[0..degree] + .iter() .map(|e| Point(e.into_projective())) - .collect() - ).unwrap(); + .collect(), + ) + .unwrap(); let mut g1_alpha_coeffs = EvaluationDomain::from_coeffs( - current_accumulator.alpha_tau_powers_g1[0..degree].iter() + current_accumulator.alpha_tau_powers_g1[0..degree] + .iter() .map(|e| Point(e.into_projective())) - .collect() - ).unwrap(); + .collect(), + ) + .unwrap(); let mut g1_beta_coeffs = EvaluationDomain::from_coeffs( - current_accumulator.beta_tau_powers_g1[0..degree].iter() + current_accumulator.beta_tau_powers_g1[0..degree] + .iter() .map(|e| Point(e.into_projective())) - .collect() - ).unwrap(); + .collect(), + ) + .unwrap(); // This converts all of the elements into Lagrange coefficients // for later construction of interpolation polynomials @@ -103,21 +111,13 @@ fn main() { // Remove the Point() wrappers - let mut g1_coeffs = g1_coeffs.into_iter() - .map(|e| e.0) - .collect::>(); + let mut g1_coeffs = g1_coeffs.into_iter().map(|e| e.0).collect::>(); - let mut g2_coeffs = g2_coeffs.into_iter() - .map(|e| e.0) - .collect::>(); + let mut g2_coeffs = g2_coeffs.into_iter().map(|e| e.0).collect::>(); - let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter() - .map(|e| e.0) - .collect::>(); + let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter().map(|e| e.0).collect::>(); - let mut g1_beta_coeffs = g1_beta_coeffs.into_iter() - .map(|e| e.0) - .collect::>(); + let mut g1_beta_coeffs = g1_beta_coeffs.into_iter().map(|e| e.0).collect::>(); // Batch normalize G1::batch_normalization(&mut g1_coeffs); @@ -130,7 +130,7 @@ fn main() { // x^(i + m) - x^i for i in 0..=(m-2) // for radix2 evaluation domains let mut h = Vec::with_capacity(degree - 1); - for i in 0..(degree-1) { + for i in 0..(degree - 1) { let mut tmp = current_accumulator.tau_powers_g1[i + degree].into_projective(); let mut tmp2 = current_accumulator.tau_powers_g1[i].into_projective(); tmp2.negate(); @@ -144,39 +144,41 @@ fn main() { // Create the parameter file let writer = OpenOptions::new() - .read(false) - .write(true) - .create_new(true) - .open(paramname) - .expect("unable to create parameter file in this directory"); + .read(false) + .write(true) + .create_new(true) + .open(paramname) + .expect("unable to create parameter file in this directory"); let mut writer = BufWriter::new(writer); // Write alpha (in g1) // Needed by verifier for e(alpha, beta) // Needed by prover for A and C elements of proof - writer.write_all( - current_accumulator.alpha_tau_powers_g1[0] - .into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all( + current_accumulator.alpha_tau_powers_g1[0] + .into_uncompressed() + .as_ref(), + ) + .unwrap(); // Write beta (in g1) // Needed by prover for C element of proof - writer.write_all( - current_accumulator.beta_tau_powers_g1[0] - .into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all( + current_accumulator.beta_tau_powers_g1[0] + .into_uncompressed() + .as_ref(), + ) + .unwrap(); // Write beta (in g2) // Needed by verifier for e(alpha, beta) // Needed by prover for B element of proof - writer.write_all( - current_accumulator.beta_g2 - .into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(current_accumulator.beta_g2.into_uncompressed().as_ref()) + .unwrap(); // Lagrange coefficients in G1 (for constructing // LC/IC queries and precomputing polynomials for A) @@ -184,10 +186,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } // Lagrange coefficients in G2 (for precomputing @@ -196,10 +197,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } // Lagrange coefficients in G1 with alpha (for @@ -208,10 +208,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } // Lagrange coefficients in G1 with beta (for @@ -220,10 +219,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } // Bases for H polynomial computation @@ -231,10 +229,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } } } diff --git a/powersoftau/src/bin/reduce_powers.rs b/powersoftau/src/bin/reduce_powers.rs index 2a7c0b6..86b3551 100644 --- a/powersoftau/src/bin/reduce_powers.rs +++ b/powersoftau/src/bin/reduce_powers.rs @@ -1,27 +1,18 @@ -extern crate powersoftau; -extern crate rand; -extern crate blake2; -extern crate byteorder; -extern crate bellman_ce; - use bellman_ce::pairing::bn256::Bn256; -use powersoftau::bn256::Bn256CeremonyParameters; -use powersoftau::batched_accumulator::*; -use powersoftau::parameters::UseCompression; -use powersoftau::utils::reduced_hash; -use powersoftau::*; - -use crate::parameters::*; +use powersoftau::{ + batched_accumulator::BatchedAccumulator, + bn256::Bn256CeremonyParameters, + parameters::{CheckForCorrectness, PowersOfTauParameters, UseCompression}, + utils::reduced_hash, +}; use std::fs::OpenOptions; use std::io::Write; -use memmap::*; +use memmap::MmapOptions; #[derive(Clone)] -pub struct Bn256ReducedCeremonyParameters { - -} +pub struct Bn256ReducedCeremonyParameters {} impl PowersOfTauParameters for Bn256ReducedCeremonyParameters { const REQUIRED_POWER: usize = 10; @@ -33,7 +24,9 @@ impl PowersOfTauParameters for Bn256ReducedCeremonyParameters { const G2_COMPRESSED_BYTE_SIZE: usize = 64; } -const fn num_bits() -> usize { std::mem::size_of::() * 8 } +const fn num_bits() -> usize { + std::mem::size_of::() * 8 +} pub fn log_2(x: u64) -> u32 { assert!(x > 0); @@ -43,40 +36,66 @@ pub fn log_2(x: u64) -> u32 { fn main() { // Try to load `./challenge` from disk. let reader = OpenOptions::new() - .read(true) - .open("challenge") - .expect("unable open `./challenge` in this directory"); - let challenge_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") }; + .read(true) + .open("challenge") + .expect("unable open `./challenge` in this directory"); + let challenge_readable_map = unsafe { + MmapOptions::new() + .map(&reader) + .expect("unable to create a memory map for input") + }; let current_accumulator = BatchedAccumulator::::deserialize( &challenge_readable_map, CheckForCorrectness::Yes, UseCompression::No, - ).expect("unable to read compressed accumulator"); + ) + .expect("unable to read compressed accumulator"); - let mut reduced_accumulator = BatchedAccumulator::::empty(); - reduced_accumulator.tau_powers_g1 = current_accumulator.tau_powers_g1[..Bn256ReducedCeremonyParameters::TAU_POWERS_G1_LENGTH].to_vec(); - reduced_accumulator.tau_powers_g2 = current_accumulator.tau_powers_g2[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH].to_vec(); - reduced_accumulator.alpha_tau_powers_g1 = current_accumulator.alpha_tau_powers_g1[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH].to_vec(); - reduced_accumulator.beta_tau_powers_g1 = current_accumulator.beta_tau_powers_g1[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH].to_vec(); + let mut reduced_accumulator = + BatchedAccumulator::::empty(); + reduced_accumulator.tau_powers_g1 = current_accumulator.tau_powers_g1 + [..Bn256ReducedCeremonyParameters::TAU_POWERS_G1_LENGTH] + .to_vec(); + reduced_accumulator.tau_powers_g2 = current_accumulator.tau_powers_g2 + [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH] + .to_vec(); + reduced_accumulator.alpha_tau_powers_g1 = current_accumulator.alpha_tau_powers_g1 + [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH] + .to_vec(); + reduced_accumulator.beta_tau_powers_g1 = current_accumulator.beta_tau_powers_g1 + [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH] + .to_vec(); reduced_accumulator.beta_g2 = current_accumulator.beta_g2; let writer = OpenOptions::new() - .read(true) - .write(true) - .create_new(true) - .open("reduced_challenge").expect("unable to create `./reduced_challenge` in this directory"); - - + .read(true) + .write(true) + .create_new(true) + .open("reduced_challenge") + .expect("unable to create `./reduced_challenge` in this directory"); // Recomputation stips the public key and uses hashing to link with the previous contibution after decompression - writer.set_len(Bn256ReducedCeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough"); + writer + .set_len(Bn256ReducedCeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64) + .expect("must make output file large enough"); - let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") }; + let mut writable_map = unsafe { + MmapOptions::new() + .map_mut(&writer) + .expect("unable to create a memory map for output") + }; - let hash = reduced_hash(Bn256CeremonyParameters::REQUIRED_POWER as u8, Bn256ReducedCeremonyParameters::REQUIRED_POWER as u8); - (&mut writable_map[0..]).write(hash.as_slice()).expect("unable to write a default hash to mmap"); - writable_map.flush().expect("unable to write reduced hash to `./reduced_challenge`"); + let hash = reduced_hash( + Bn256CeremonyParameters::REQUIRED_POWER as u8, + Bn256ReducedCeremonyParameters::REQUIRED_POWER as u8, + ); + (&mut writable_map[0..]) + .write_all(hash.as_slice()) + .expect("unable to write a default hash to mmap"); + writable_map + .flush() + .expect("unable to write reduced hash to `./reduced_challenge`"); println!("Reduced hash for a reduced challenge:"); for line in hash.as_slice().chunks(16) { @@ -87,14 +106,21 @@ fn main() { } print!(" "); } - println!(""); + println!(); } - reduced_accumulator.serialize(&mut writable_map, UseCompression::No).unwrap(); + reduced_accumulator + .serialize(&mut writable_map, UseCompression::No) + .unwrap(); // Get the hash of the contribution, so the user can compare later - let output_readonly = writable_map.make_read_only().expect("must make a map readonly"); - let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly); + let output_readonly = writable_map + .make_read_only() + .expect("must make a map readonly"); + let contribution_hash = + BatchedAccumulator::::calculate_hash( + &output_readonly, + ); println!("Reduced contribution is formed with a hash:"); @@ -106,7 +132,7 @@ fn main() { } print!(" "); } - println!(""); + println!(); } println!("Wrote a reduced accumulator to `./challenge`"); diff --git a/powersoftau/src/bin/verify.rs b/powersoftau/src/bin/verify.rs index e647739..45f5126 100644 --- a/powersoftau/src/bin/verify.rs +++ b/powersoftau/src/bin/verify.rs @@ -1,31 +1,27 @@ -extern crate powersoftau; -extern crate rand; -extern crate blake2; -extern crate byteorder; -extern crate bellman_ce; - -use bellman_ce::pairing::{CurveAffine, CurveProjective}; use bellman_ce::pairing::bn256::Bn256; use bellman_ce::pairing::bn256::{G1, G2}; -use powersoftau::bn256::{Bn256CeremonyParameters}; -use powersoftau::batched_accumulator::*; +use bellman_ce::pairing::{CurveAffine, CurveProjective}; use powersoftau::accumulator::HashWriter; +use powersoftau::batched_accumulator::*; +use powersoftau::bn256::Bn256CeremonyParameters; use powersoftau::*; -use crate::utils::*; -use crate::parameters::*; use crate::keypair::*; +use crate::parameters::*; +use crate::utils::*; -use bellman_ce::multicore::Worker; use bellman_ce::domain::{EvaluationDomain, Point}; +use bellman_ce::multicore::Worker; +use std::fs::{remove_file, OpenOptions}; +use std::io::{self, BufWriter, Read, Write}; use std::path::Path; -use std::fs::{OpenOptions, remove_file}; -use std::io::{self, Read, BufWriter, Write}; use memmap::*; -const fn num_bits() -> usize { std::mem::size_of::() * 8 } +const fn num_bits() -> usize { + std::mem::size_of::() * 8 +} fn log_2(x: u64) -> u32 { assert!(x > 0); @@ -36,11 +32,10 @@ fn log_2(x: u64) -> u32 { // given the current state of the accumulator and the last // response file hash. fn get_challenge_file_hash( - acc: &mut BatchedAccumulator::, + acc: &mut BatchedAccumulator, last_response_file_hash: &[u8; 64], is_initial: bool, -) -> [u8; 64] -{ +) -> [u8; 64] { let sink = io::sink(); let mut sink = HashWriter::new(sink); @@ -57,19 +52,31 @@ fn get_challenge_file_hash( .open(file_name) .expect("unable to create temporary tmp_challenge_file_hash"); - writer.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough"); - let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") }; + writer + .set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64) + .expect("must make output file large enough"); + let mut writable_map = unsafe { + MmapOptions::new() + .map_mut(&writer) + .expect("unable to create a memory map for output") + }; - (&mut writable_map[0..]).write(&last_response_file_hash[..]).expect("unable to write a default hash to mmap"); - writable_map.flush().expect("unable to write blank hash to challenge file"); + (&mut writable_map[0..]) + .write_all(&last_response_file_hash[..]) + .expect("unable to write a default hash to mmap"); + writable_map + .flush() + .expect("unable to write blank hash to challenge file"); if is_initial { - BatchedAccumulator::::generate_initial(&mut writable_map, UseCompression::No).expect("generation of initial accumulator is successful"); - } else { - acc.serialize( + BatchedAccumulator::::generate_initial( &mut writable_map, - UseCompression::No - ).unwrap(); + UseCompression::No, + ) + .expect("generation of initial accumulator is successful"); + } else { + acc.serialize(&mut writable_map, UseCompression::No) + .unwrap(); } writable_map.flush().expect("must flush the memory map"); @@ -77,13 +84,13 @@ fn get_challenge_file_hash( let mut challenge_reader = OpenOptions::new() .read(true) - .open(file_name).expect("unable to open temporary tmp_challenge_file_hash"); + .open(file_name) + .expect("unable to open temporary tmp_challenge_file_hash"); let mut contents = vec![]; challenge_reader.read_to_end(&mut contents).unwrap(); - sink.write_all(&contents) - .unwrap(); + sink.write_all(&contents).unwrap(); let mut tmp = [0; 64]; tmp.copy_from_slice(sink.into_hash().as_slice()); @@ -95,11 +102,10 @@ fn get_challenge_file_hash( // accumulator, the player's public key, and the challenge // file's hash. fn get_response_file_hash( - acc: &mut BatchedAccumulator::, - pubkey: &PublicKey::, - last_challenge_file_hash: &[u8; 64] -) -> [u8; 64] -{ + acc: &mut BatchedAccumulator, + pubkey: &PublicKey, + last_challenge_file_hash: &[u8; 64], +) -> [u8; 64] { let sink = io::sink(); let mut sink = HashWriter::new(sink); @@ -115,31 +121,40 @@ fn get_response_file_hash( .open(file_name) .expect("unable to create temporary tmp_response_file_hash"); - writer.set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64).expect("must make output file large enough"); - let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") }; + writer + .set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64) + .expect("must make output file large enough"); + let mut writable_map = unsafe { + MmapOptions::new() + .map_mut(&writer) + .expect("unable to create a memory map for output") + }; - (&mut writable_map[0..]).write(&last_challenge_file_hash[..]).expect("unable to write a default hash to mmap"); - writable_map.flush().expect("unable to write blank hash to challenge file"); + (&mut writable_map[0..]) + .write_all(&last_challenge_file_hash[..]) + .expect("unable to write a default hash to mmap"); + writable_map + .flush() + .expect("unable to write blank hash to challenge file"); - acc.serialize( - &mut writable_map, - UseCompression::Yes - ).unwrap(); + acc.serialize(&mut writable_map, UseCompression::Yes) + .unwrap(); - pubkey.write::(&mut writable_map, UseCompression::Yes).expect("unable to write public key"); + pubkey + .write::(&mut writable_map, UseCompression::Yes) + .expect("unable to write public key"); writable_map.flush().expect("must flush the memory map"); } let mut challenge_reader = OpenOptions::new() .read(true) - .open(file_name).expect("unable to open temporary tmp_response_file_hash"); + .open(file_name) + .expect("unable to open temporary tmp_response_file_hash"); let mut contents = vec![]; challenge_reader.read_to_end(&mut contents).unwrap(); - sink.write_all(&contents) - .unwrap(); - + sink.write_all(&contents).unwrap(); let mut tmp = [0; 64]; tmp.copy_from_slice(sink.into_hash().as_slice()); @@ -162,11 +177,22 @@ fn new_accumulator_for_verify() -> BatchedAccumulator::generate_initial(&mut writable_map, UseCompression::No).expect("generation of initial accumulator is successful"); - writable_map.flush().expect("unable to flush memmap to disk"); + let mut writable_map = unsafe { + MmapOptions::new() + .map_mut(&file) + .expect("unable to create a memory map") + }; + BatchedAccumulator::::generate_initial( + &mut writable_map, + UseCompression::No, + ) + .expect("generation of initial accumulator is successful"); + writable_map + .flush() + .expect("unable to flush memmap to disk"); } let reader = OpenOptions::new() @@ -174,14 +200,14 @@ fn new_accumulator_for_verify() -> BatchedAccumulator::read::(&response_readable_map, UseCompression::Yes).unwrap(); + let response_file_pubkey = PublicKey::::read::( + &response_readable_map, + UseCompression::Yes, + ) + .unwrap(); // Compute the hash of the response file. (we had it in uncompressed // form in the transcript, but the response file is compressed to save // participants bandwidth.) last_response_file_hash = get_response_file_hash( &mut response_file_accumulator, &response_file_pubkey, - &last_challenge_file_hash + &last_challenge_file_hash, ); // Verify the transformation from the previous accumulator to the new @@ -268,9 +315,8 @@ fn main() { ¤t_accumulator, &response_file_accumulator, &response_file_pubkey, - &last_challenge_file_hash - ) - { + &last_challenge_file_hash, + ) { println!(" ... FAILED"); panic!("INVALID RESPONSE FILE!"); } else { @@ -286,35 +332,43 @@ fn main() { // Create the parameters for various 2^m circuit depths. let max_degree = log_2(current_accumulator.tau_powers_g2.len() as u64); - for m in 0..max_degree+1 { + for m in 0..=max_degree { let paramname = format!("phase1radix2m{}", m); println!("Creating {}", paramname); let degree = 1 << m; let mut g1_coeffs = EvaluationDomain::from_coeffs( - current_accumulator.tau_powers_g1[0..degree].iter() + current_accumulator.tau_powers_g1[0..degree] + .iter() .map(|e| Point(e.into_projective())) - .collect() - ).unwrap(); + .collect(), + ) + .unwrap(); let mut g2_coeffs = EvaluationDomain::from_coeffs( - current_accumulator.tau_powers_g2[0..degree].iter() + current_accumulator.tau_powers_g2[0..degree] + .iter() .map(|e| Point(e.into_projective())) - .collect() - ).unwrap(); + .collect(), + ) + .unwrap(); let mut g1_alpha_coeffs = EvaluationDomain::from_coeffs( - current_accumulator.alpha_tau_powers_g1[0..degree].iter() + current_accumulator.alpha_tau_powers_g1[0..degree] + .iter() .map(|e| Point(e.into_projective())) - .collect() - ).unwrap(); + .collect(), + ) + .unwrap(); let mut g1_beta_coeffs = EvaluationDomain::from_coeffs( - current_accumulator.beta_tau_powers_g1[0..degree].iter() + current_accumulator.beta_tau_powers_g1[0..degree] + .iter() .map(|e| Point(e.into_projective())) - .collect() - ).unwrap(); + .collect(), + ) + .unwrap(); // This converts all of the elements into Lagrange coefficients // for later construction of interpolation polynomials @@ -335,21 +389,13 @@ fn main() { // Remove the Point() wrappers - let mut g1_coeffs = g1_coeffs.into_iter() - .map(|e| e.0) - .collect::>(); + let mut g1_coeffs = g1_coeffs.into_iter().map(|e| e.0).collect::>(); - let mut g2_coeffs = g2_coeffs.into_iter() - .map(|e| e.0) - .collect::>(); + let mut g2_coeffs = g2_coeffs.into_iter().map(|e| e.0).collect::>(); - let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter() - .map(|e| e.0) - .collect::>(); + let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter().map(|e| e.0).collect::>(); - let mut g1_beta_coeffs = g1_beta_coeffs.into_iter() - .map(|e| e.0) - .collect::>(); + let mut g1_beta_coeffs = g1_beta_coeffs.into_iter().map(|e| e.0).collect::>(); // Batch normalize G1::batch_normalization(&mut g1_coeffs); @@ -362,7 +408,7 @@ fn main() { // x^(i + m) - x^i for i in 0..=(m-2) // for radix2 evaluation domains let mut h = Vec::with_capacity(degree - 1); - for i in 0..(degree-1) { + for i in 0..(degree - 1) { let mut tmp = current_accumulator.tau_powers_g1[i + degree].into_projective(); let mut tmp2 = current_accumulator.tau_powers_g1[i].into_projective(); tmp2.negate(); @@ -387,28 +433,30 @@ fn main() { // Write alpha (in g1) // Needed by verifier for e(alpha, beta) // Needed by prover for A and C elements of proof - writer.write_all( - current_accumulator.alpha_tau_powers_g1[0] - .into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all( + current_accumulator.alpha_tau_powers_g1[0] + .into_uncompressed() + .as_ref(), + ) + .unwrap(); // Write beta (in g1) // Needed by prover for C element of proof - writer.write_all( - current_accumulator.beta_tau_powers_g1[0] - .into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all( + current_accumulator.beta_tau_powers_g1[0] + .into_uncompressed() + .as_ref(), + ) + .unwrap(); // Write beta (in g2) // Needed by verifier for e(alpha, beta) // Needed by prover for B element of proof - writer.write_all( - current_accumulator.beta_g2 - .into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(current_accumulator.beta_g2.into_uncompressed().as_ref()) + .unwrap(); // Lagrange coefficients in G1 (for constructing // LC/IC queries and precomputing polynomials for A) @@ -416,10 +464,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } // Lagrange coefficients in G2 (for precomputing @@ -428,10 +475,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } // Lagrange coefficients in G1 with alpha (for @@ -440,10 +486,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } // Lagrange coefficients in G1 with beta (for @@ -452,10 +497,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } // Bases for H polynomial computation @@ -463,10 +507,9 @@ fn main() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); - writer.write_all( - coeff.into_uncompressed() - .as_ref() - ).unwrap(); + writer + .write_all(coeff.into_uncompressed().as_ref()) + .unwrap(); } } } diff --git a/powersoftau/src/bin/verify_transform_constrained.rs b/powersoftau/src/bin/verify_transform_constrained.rs index 88def3d..c05c334 100644 --- a/powersoftau/src/bin/verify_transform_constrained.rs +++ b/powersoftau/src/bin/verify_transform_constrained.rs @@ -1,18 +1,11 @@ -extern crate powersoftau; -extern crate bellman_ce; -extern crate memmap; -extern crate rand; -extern crate blake2; -extern crate byteorder; +use powersoftau::batched_accumulator::BatchedAccumulator; +use powersoftau::bn256::Bn256CeremonyParameters; +use powersoftau::keypair::PublicKey; +use powersoftau::parameters::{CheckForCorrectness, UseCompression}; -use powersoftau::bn256::{Bn256CeremonyParameters}; -use powersoftau::batched_accumulator::{BatchedAccumulator}; -use powersoftau::keypair::{PublicKey}; -use powersoftau::parameters::{UseCompression, CheckForCorrectness}; - -use std::fs::OpenOptions; use bellman_ce::pairing::bn256::Bn256; use memmap::*; +use std::fs::OpenOptions; use std::io::{Read, Write}; @@ -32,8 +25,11 @@ fn main() { let response_filename = &args[2]; let new_challenge_filename = &args[3]; - println!("Will verify and decompress a contribution to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER); - + println!( + "Will verify and decompress a contribution to accumulator for 2^{} powers of tau", + Bn256CeremonyParameters::REQUIRED_POWER + ); + // Try to load challenge file from disk. let challenge_reader = OpenOptions::new() .read(true) @@ -41,21 +37,30 @@ fn main() { .expect("unable open challenge file in this directory"); { - let metadata = challenge_reader.metadata().expect("unable to get filesystem metadata for challenge file"); + let metadata = challenge_reader + .metadata() + .expect("unable to get filesystem metadata for challenge file"); let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED { UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - Bn256CeremonyParameters::PUBLIC_KEY_SIZE - }, - UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE + - Bn256CeremonyParameters::PUBLIC_KEY_SIZE } + UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE, }; if metadata.len() != (expected_challenge_length as u64) { - panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len()); + panic!( + "The size of challenge file should be {}, but it's {}, so something isn't right.", + expected_challenge_length, + metadata.len() + ); } } - let challenge_readable_map = unsafe { MmapOptions::new().map(&challenge_reader).expect("unable to create a memory map for input") }; + let challenge_readable_map = unsafe { + MmapOptions::new() + .map(&challenge_reader) + .expect("unable to create a memory map for input") + }; // Try to load response file from disk. let response_reader = OpenOptions::new() @@ -64,27 +69,39 @@ fn main() { .expect("unable open response file in this directory"); { - let metadata = response_reader.metadata().expect("unable to get filesystem metadata for response file"); + let metadata = response_reader + .metadata() + .expect("unable to get filesystem metadata for response file"); let expected_response_length = match CONTRIBUTION_IS_COMPRESSED { - UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - }, + UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE + Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + + Bn256CeremonyParameters::PUBLIC_KEY_SIZE } }; if metadata.len() != (expected_response_length as u64) { - panic!("The size of response file should be {}, but it's {}, so something isn't right.", expected_response_length, metadata.len()); + panic!( + "The size of response file should be {}, but it's {}, so something isn't right.", + expected_response_length, + metadata.len() + ); } } - let response_readable_map = unsafe { MmapOptions::new().map(&response_reader).expect("unable to create a memory map for input") }; + let response_readable_map = unsafe { + MmapOptions::new() + .map(&response_reader) + .expect("unable to create a memory map for input") + }; println!("Calculating previous challenge hash..."); // Check that contribution is correct - let current_accumulator_hash = BatchedAccumulator::::calculate_hash(&challenge_readable_map); + let current_accumulator_hash = + BatchedAccumulator::::calculate_hash( + &challenge_readable_map, + ); println!("Hash of the `challenge` file for verification:"); for line in current_accumulator_hash.as_slice().chunks(16) { @@ -101,8 +118,12 @@ fn main() { // Check the hash chain - a new response must be based on the previous challenge! { let mut response_challenge_hash = [0; 64]; - let memory_slice = response_readable_map.get(0..64).expect("must read point data from file"); - memory_slice.clone().read_exact(&mut response_challenge_hash).expect("couldn't read hash of challenge file from response file"); + let mut memory_slice = response_readable_map + .get(0..64) + .expect("must read point data from file"); + memory_slice + .read_exact(&mut response_challenge_hash) + .expect("couldn't read hash of challenge file from response file"); println!("`response` was based on the hash:"); for line in response_challenge_hash.chunks(16) { @@ -121,7 +142,9 @@ fn main() { } } - let response_hash = BatchedAccumulator::::calculate_hash(&response_readable_map); + let response_hash = BatchedAccumulator::::calculate_hash( + &response_readable_map, + ); println!("Hash of the response file for verification:"); for line in response_hash.as_slice().chunks(16) { @@ -136,18 +159,22 @@ fn main() { } // get the contributor's public key - let public_key = PublicKey::::read::(&response_readable_map, CONTRIBUTION_IS_COMPRESSED) - .expect("wasn't able to deserialize the response file's public key"); - + let public_key = PublicKey::::read::( + &response_readable_map, + CONTRIBUTION_IS_COMPRESSED, + ) + .expect("wasn't able to deserialize the response file's public key"); // check that it follows the protocol - println!("Verifying a contribution to contain proper powers and correspond to the public key..."); + println!( + "Verifying a contribution to contain proper powers and correspond to the public key..." + ); let valid = BatchedAccumulator::::verify_transformation( &challenge_readable_map, &response_readable_map, - &public_key, + &public_key, current_accumulator_hash.as_slice(), PREVIOUS_CHALLENGE_IS_COMPRESSED, CONTRIBUTION_IS_COMPRESSED, @@ -163,7 +190,9 @@ fn main() { } if COMPRESS_NEW_CHALLENGE == UseCompression::Yes { - println!("Don't need to recompress the contribution, please copy response file as new challenge"); + println!( + "Don't need to recompress the contribution, please copy response file as new challenge" + ); } else { println!("Verification succeeded! Writing to new challenge file..."); @@ -175,29 +204,44 @@ fn main() { .open(new_challenge_filename) .expect("unable to create new challenge file in this directory"); - - // Recomputation strips the public key and uses hashing to link with the previous contribution after decompression - writer.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough"); + writer + .set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64) + .expect("must make output file large enough"); - let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") }; + let mut writable_map = unsafe { + MmapOptions::new() + .map_mut(&writer) + .expect("unable to create a memory map for output") + }; { - (&mut writable_map[0..]).write(response_hash.as_slice()).expect("unable to write a default hash to mmap"); + (&mut writable_map[0..]) + .write_all(response_hash.as_slice()) + .expect("unable to write a default hash to mmap"); - writable_map.flush().expect("unable to write hash to new challenge file"); + writable_map + .flush() + .expect("unable to write hash to new challenge file"); } BatchedAccumulator::::decompress( &response_readable_map, &mut writable_map, - CheckForCorrectness::No).expect("must decompress a response for a new challenge"); - + CheckForCorrectness::No, + ) + .expect("must decompress a response for a new challenge"); + writable_map.flush().expect("must flush the memory map"); - let new_challenge_readable_map = writable_map.make_read_only().expect("must make a map readonly"); + let new_challenge_readable_map = writable_map + .make_read_only() + .expect("must make a map readonly"); - let recompressed_hash = BatchedAccumulator::::calculate_hash(&new_challenge_readable_map); + let recompressed_hash = + BatchedAccumulator::::calculate_hash( + &new_challenge_readable_map, + ); println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:"); diff --git a/powersoftau/src/bn256/mod.rs b/powersoftau/src/bn256/mod.rs index 1c1c023..33693c0 100644 --- a/powersoftau/src/bn256/mod.rs +++ b/powersoftau/src/bn256/mod.rs @@ -1,33 +1,7 @@ -extern crate rand; -extern crate crossbeam; -extern crate num_cpus; -extern crate blake2; -extern crate generic_array; -extern crate typenum; -extern crate byteorder; -extern crate bellman_ce; - -use self::bellman_ce::pairing::ff::{Field, PrimeField}; -use self::byteorder::{ReadBytesExt, BigEndian}; -use self::rand::{SeedableRng, Rng, Rand}; -use self::rand::chacha::ChaChaRng; -use self::bellman_ce::pairing::bn256::{Bn256}; -use self::bellman_ce::pairing::*; -use std::io::{self, Read, Write}; -use std::sync::{Arc, Mutex}; -use self::generic_array::GenericArray; -use self::typenum::consts::U64; -use self::blake2::{Blake2b, Digest}; -use std::fmt; - -use crate::parameters::*; -use crate::keypair::*; -use crate::utils::*; +use crate::parameters::PowersOfTauParameters; #[derive(Clone)] -pub struct Bn256CeremonyParameters { - -} +pub struct Bn256CeremonyParameters {} impl PowersOfTauParameters for Bn256CeremonyParameters { #[cfg(not(feature = "smalltest"))] @@ -45,81 +19,91 @@ impl PowersOfTauParameters for Bn256CeremonyParameters { const G2_COMPRESSED_BYTE_SIZE: usize = 64; } -#[test] -fn test_pubkey_serialization() { - use self::rand::thread_rng; - - let rng = &mut thread_rng(); - let digest = (0..64).map(|_| rng.gen()).collect::>(); - let (pk, _) = keypair::<_, Bn256>(rng, &digest); - let mut v = vec![]; - pk.serialize(&mut v).unwrap(); - assert_eq!(v.len(), Bn256CeremonyParameters::PUBLIC_KEY_SIZE); - let deserialized = PublicKey::::deserialize(&mut &v[..]).unwrap(); - assert!(pk == deserialized); -} +#[cfg(test)] +mod tests { + use super::*; + use crate::accumulator::*; + use crate::{ + keypair::{keypair, PublicKey}, + parameters::{CheckForCorrectness, UseCompression}, + utils::{power_pairs, same_ratio}, + }; + use bellman_ce::pairing::{ + bn256::{Bn256, Fr, G1Affine, G2Affine}, + ff::Field, + CurveAffine, CurveProjective, + }; + use rand::{thread_rng, Rand, Rng}; -#[test] -fn test_power_pairs() { - use self::rand::thread_rng; - use self::bellman_ce::pairing::bn256::{Fr, G1Affine, G2Affine}; - let rng = &mut thread_rng(); - - let mut v = vec![]; - let x = Fr::rand(rng); - let mut acc = Fr::one(); - for _ in 0..100 { - v.push(G1Affine::one().mul(acc).into_affine()); - acc.mul_assign(&x); + #[test] + fn test_pubkey_serialization() { + let rng = &mut thread_rng(); + let digest = (0..64).map(|_| rng.gen()).collect::>(); + let (pk, _) = keypair::<_, Bn256>(rng, &digest); + let mut v = vec![]; + pk.serialize(&mut v).unwrap(); + assert_eq!(v.len(), Bn256CeremonyParameters::PUBLIC_KEY_SIZE); + let deserialized = PublicKey::::deserialize(&mut &v[..]).unwrap(); + assert!(pk == deserialized); } - let gx = G2Affine::one().mul(x).into_affine(); + #[test] + fn test_power_pairs() { + let rng = &mut thread_rng(); - assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx))); + let mut v = vec![]; + let x = Fr::rand(rng); + let mut acc = Fr::one(); + for _ in 0..100 { + v.push(G1Affine::one().mul(acc).into_affine()); + acc.mul_assign(&x); + } - v[1] = v[1].mul(Fr::rand(rng)).into_affine(); + let gx = G2Affine::one().mul(x).into_affine(); - assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx))); -} - -#[test] -fn test_same_ratio() { - use self::rand::thread_rng; - use self::bellman_ce::pairing::bn256::{Fr, G1Affine, G2Affine}; - - let rng = &mut thread_rng(); - - let s = Fr::rand(rng); - let g1 = G1Affine::one(); - let g2 = G2Affine::one(); - let g1_s = g1.mul(s).into_affine(); - let g2_s = g2.mul(s).into_affine(); - - assert!(same_ratio((g1, g1_s), (g2, g2_s))); - assert!(!same_ratio((g1_s, g1), (g2, g2_s))); -} - -#[test] -fn test_accumulator_serialization() { - use crate::accumulator::*; - - use self::rand::thread_rng; - use self::bellman_ce::pairing::bn256::{Bn256, Fr, G1Affine, G2Affine}; - use self::PowersOfTauParameters; - - let rng = &mut thread_rng(); - let mut digest = (0..64).map(|_| rng.gen()).collect::>(); - let params = Bn256CeremonyParameters{}; - let mut acc = Accumulator::::new(params.clone()); - let before = acc.clone(); - let (pk, sk) = keypair::<_, Bn256>(rng, &digest); - acc.transform(&sk); - assert!(verify_transform(&before, &acc, &pk, &digest)); - digest[0] = !digest[0]; - assert!(!verify_transform(&before, &acc, &pk, &digest)); - let mut v = Vec::with_capacity(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64); - acc.serialize(&mut v, UseCompression::No).unwrap(); - assert_eq!(v.len(), Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64); - let deserialized = Accumulator::deserialize(&mut &v[..], UseCompression::No, CheckForCorrectness::No, params).unwrap(); - assert!(acc == deserialized); + assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx))); + + v[1] = v[1].mul(Fr::rand(rng)).into_affine(); + + assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx))); + } + + #[test] + fn test_same_ratio() { + let rng = &mut thread_rng(); + + let s = Fr::rand(rng); + let g1 = G1Affine::one(); + let g2 = G2Affine::one(); + let g1_s = g1.mul(s).into_affine(); + let g2_s = g2.mul(s).into_affine(); + + assert!(same_ratio((g1, g1_s), (g2, g2_s))); + assert!(!same_ratio((g1_s, g1), (g2, g2_s))); + } + + #[test] + fn test_accumulator_serialization() { + let rng = &mut thread_rng(); + let mut digest = (0..64).map(|_| rng.gen()).collect::>(); + let params = Bn256CeremonyParameters {}; + let mut acc = Accumulator::::new(params.clone()); + let before = acc.clone(); + let (pk, sk) = keypair::<_, Bn256>(rng, &digest); + acc.transform(&sk); + assert!(verify_transform(&before, &acc, &pk, &digest)); + digest[0] = !digest[0]; + assert!(!verify_transform(&before, &acc, &pk, &digest)); + let mut v = Vec::with_capacity(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64); + acc.serialize(&mut v, UseCompression::No).unwrap(); + assert_eq!(v.len(), Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64); + let deserialized = Accumulator::deserialize( + &mut &v[..], + UseCompression::No, + CheckForCorrectness::No, + params, + ) + .unwrap(); + assert!(acc == deserialized); + } } diff --git a/powersoftau/src/keypair.rs b/powersoftau/src/keypair.rs index c9dcbf9..44cffd6 100644 --- a/powersoftau/src/keypair.rs +++ b/powersoftau/src/keypair.rs @@ -1,31 +1,16 @@ -extern crate rand; -extern crate crossbeam; -extern crate num_cpus; -extern crate blake2; -extern crate generic_array; -extern crate typenum; -extern crate byteorder; -extern crate bellman_ce; -extern crate memmap; -extern crate itertools; +use bellman_ce::pairing::{CurveAffine, CurveProjective, EncodedPoint, Engine}; +use blake2::{Blake2b, Digest}; -use itertools::Itertools; use memmap::{Mmap, MmapMut}; -use self::bellman_ce::pairing::ff::{Field, PrimeField}; -use self::byteorder::{ReadBytesExt, BigEndian}; -use self::rand::{SeedableRng, Rng, Rand}; -use self::rand::chacha::ChaChaRng; -use self::bellman_ce::pairing::bn256::{Bn256}; -use self::bellman_ce::pairing::*; -use std::io::{self, Read, Write}; -use std::sync::{Arc, Mutex}; -use self::generic_array::GenericArray; -use self::typenum::consts::U64; -use self::blake2::{Blake2b, Digest}; -use std::fmt; -use super::utils::*; -use super::parameters::*; +use rand::{Rand, Rng}; + +use std::io::{self, Read, Write}; + +use typenum::consts::U64; + +use super::parameters::{DeserializationError, PowersOfTauParameters, UseCompression}; +use super::utils::{hash_to_g2, write_point}; /// Contains terms of the form (s1, s1x, H(s1x)2, H(s1x)2x) /// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity. @@ -41,20 +26,20 @@ pub struct PublicKey { pub beta_g1: (E::G1Affine, E::G1Affine), pub tau_g2: E::G2Affine, pub alpha_g2: E::G2Affine, - pub beta_g2: E::G2Affine + pub beta_g2: E::G2Affine, } impl PartialEq for PublicKey { fn eq(&self, other: &PublicKey) -> bool { - self.tau_g1.0 == other.tau_g1.0 && - self.tau_g1.1 == other.tau_g1.1 && - self.alpha_g1.0 == other.alpha_g1.0 && - self.alpha_g1.1 == other.alpha_g1.1 && - self.beta_g1.0 == other.beta_g1.0 && - self.beta_g1.1 == other.beta_g1.1 && - self.tau_g2 == other.tau_g2 && - self.alpha_g2 == other.alpha_g2 && - self.beta_g2 == other.beta_g2 + self.tau_g1.0 == other.tau_g1.0 + && self.tau_g1.1 == other.tau_g1.1 + && self.alpha_g1.0 == other.alpha_g1.0 + && self.alpha_g1.1 == other.alpha_g1.1 + && self.beta_g1.0 == other.beta_g1.0 + && self.beta_g1.1 == other.beta_g1.1 + && self.tau_g2 == other.tau_g2 + && self.alpha_g2 == other.alpha_g2 + && self.beta_g2 == other.beta_g2 } } @@ -62,12 +47,11 @@ impl PartialEq for PublicKey { pub struct PrivateKey { pub tau: E::Fr, pub alpha: E::Fr, - pub beta: E::Fr + pub beta: E::Fr, } /// Constructs a keypair given an RNG and a 64-byte transcript `digest`. -pub fn keypair(rng: &mut R, digest: &[u8]) -> (PublicKey, PrivateKey) -{ +pub fn keypair(rng: &mut R, digest: &[u8]) -> (PublicKey, PrivateKey) { assert_eq!(digest.len(), 64); // tau is a contribution to the "powers of tau", in a set of points of the form "tau^i * G" @@ -114,18 +98,13 @@ pub fn keypair(rng: &mut R, digest: &[u8]) -> (PublicKey, alpha_g2: pk_alpha.1, beta_g2: pk_beta.1, }, - PrivateKey { - tau: tau, - alpha: alpha, - beta: beta - } + PrivateKey { tau, alpha, beta }, ) } impl PublicKey { /// Serialize the public key. Points are always in uncompressed form. - pub fn serialize(&self, writer: &mut W) -> io::Result<()> - { + pub fn serialize(&self, writer: &mut W) -> io::Result<()> { write_point(writer, &self.tau_g1.0, UseCompression::No)?; write_point(writer, &self.tau_g1.1, UseCompression::No)?; @@ -145,9 +124,10 @@ impl PublicKey { /// Deserialize the public key. Points are always in uncompressed form, and /// always checked, since there aren't very many of them. Does not allow any /// points at infinity. - pub fn deserialize(reader: &mut R) -> Result, DeserializationError> - { - fn read_uncompressed, R: Read>(reader: &mut R) -> Result { + pub fn deserialize(reader: &mut R) -> Result, DeserializationError> { + fn read_uncompressed, R: Read>( + reader: &mut R, + ) -> Result { let mut repr = C::Uncompressed::empty(); reader.read_exact(repr.as_mut())?; let v = repr.into_affine()?; @@ -176,60 +156,55 @@ impl PublicKey { tau_g1: (tau_g1_s, tau_g1_s_tau), alpha_g1: (alpha_g1_s, alpha_g1_s_alpha), beta_g1: (beta_g1_s, beta_g1_s_beta), - tau_g2: tau_g2, - alpha_g2: alpha_g2, - beta_g2: beta_g2 + tau_g2, + alpha_g2, + beta_g2, }) } } impl PublicKey { - /// This function is intended to write the key to the memory map and calculates /// a position for writing into the file itself based on information whether /// contribution was output in compressed on uncompressed form pub fn write

( &self, output_map: &mut MmapMut, - accumulator_was_compressed: UseCompression - ) - -> io::Result<()> - where P: PowersOfTauParameters + accumulator_was_compressed: UseCompression, + ) -> io::Result<()> + where + P: PowersOfTauParameters, { let mut position = match accumulator_was_compressed { - UseCompression::Yes => { - P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE - }, - UseCompression::No => { - P::ACCUMULATOR_BYTE_SIZE - } + UseCompression::Yes => P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE, + UseCompression::No => P::ACCUMULATOR_BYTE_SIZE, }; - (&mut output_map[position..]).write(&self.tau_g1.0.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.tau_g1.0.into_uncompressed().as_ref())?; position += P::G1_UNCOMPRESSED_BYTE_SIZE; - (&mut output_map[position..]).write(&self.tau_g1.1.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.tau_g1.1.into_uncompressed().as_ref())?; position += P::G1_UNCOMPRESSED_BYTE_SIZE; - (&mut output_map[position..]).write(&self.alpha_g1.0.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.alpha_g1.0.into_uncompressed().as_ref())?; position += P::G1_UNCOMPRESSED_BYTE_SIZE; - (&mut output_map[position..]).write(&self.alpha_g1.1.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.alpha_g1.1.into_uncompressed().as_ref())?; position += P::G1_UNCOMPRESSED_BYTE_SIZE; - (&mut output_map[position..]).write(&self.beta_g1.0.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.beta_g1.0.into_uncompressed().as_ref())?; position += P::G1_UNCOMPRESSED_BYTE_SIZE; - (&mut output_map[position..]).write(&self.beta_g1.1.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.beta_g1.1.into_uncompressed().as_ref())?; position += P::G1_UNCOMPRESSED_BYTE_SIZE; - (&mut output_map[position..]).write(&self.tau_g2.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.tau_g2.into_uncompressed().as_ref())?; position += P::G2_UNCOMPRESSED_BYTE_SIZE; - (&mut output_map[position..]).write(&self.alpha_g2.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.alpha_g2.into_uncompressed().as_ref())?; position += P::G2_UNCOMPRESSED_BYTE_SIZE; - (&mut output_map[position..]).write(&self.beta_g2.into_uncompressed().as_ref())?; + (&mut output_map[position..]).write_all(&self.beta_g2.into_uncompressed().as_ref())?; output_map.flush()?; @@ -241,15 +216,21 @@ impl PublicKey { /// points at infinity. pub fn read

( input_map: &Mmap, - accumulator_was_compressed: UseCompression + accumulator_was_compressed: UseCompression, ) -> Result - where P: PowersOfTauParameters + where + P: PowersOfTauParameters, { - fn read_uncompressed>(input_map: &Mmap, position: usize) -> Result { + fn read_uncompressed>( + input_map: &Mmap, + position: usize, + ) -> Result { let mut repr = C::Uncompressed::empty(); let element_size = C::Uncompressed::size(); - let memory_slice = input_map.get(position..position+element_size).expect("must read point data from file"); - memory_slice.clone().read_exact(repr.as_mut())?; + let mut memory_slice = input_map + .get(position..position + element_size) + .expect("must read point data from file"); + memory_slice.read_exact(repr.as_mut())?; let v = repr.into_affine()?; if v.is_zero() { @@ -260,12 +241,8 @@ impl PublicKey { } let mut position = match accumulator_was_compressed { - UseCompression::Yes => { - P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE - }, - UseCompression::No => { - P::ACCUMULATOR_BYTE_SIZE - } + UseCompression::Yes => P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE, + UseCompression::No => P::ACCUMULATOR_BYTE_SIZE, }; let tau_g1_s = read_uncompressed::(input_map, position)?; @@ -298,9 +275,9 @@ impl PublicKey { tau_g1: (tau_g1_s, tau_g1_s_tau), alpha_g1: (alpha_g1_s, alpha_g1_s_alpha), beta_g1: (beta_g1_s, beta_g1_s_beta), - tau_g2: tau_g2, - alpha_g2: alpha_g2, - beta_g2: beta_g2 + tau_g2, + alpha_g2, + beta_g2, }) } } diff --git a/powersoftau/src/lib.rs b/powersoftau/src/lib.rs index ed75167..f4ac2b3 100644 --- a/powersoftau/src/lib.rs +++ b/powersoftau/src/lib.rs @@ -1,8 +1,6 @@ -#![allow(unused_imports)] - -pub mod bn256; pub mod accumulator; pub mod batched_accumulator; +pub mod bn256; pub mod keypair; pub mod parameters; -pub mod utils; \ No newline at end of file +pub mod utils; diff --git a/powersoftau/src/parameters.rs b/powersoftau/src/parameters.rs index 32895e6..cee1b49 100644 --- a/powersoftau/src/parameters.rs +++ b/powersoftau/src/parameters.rs @@ -1,30 +1,10 @@ -extern crate rand; -extern crate crossbeam; -extern crate num_cpus; -extern crate blake2; -extern crate generic_array; -extern crate typenum; -extern crate byteorder; -extern crate bellman_ce; - -use bellman_ce::pairing::ff::{Field, PrimeField}; -use byteorder::{ReadBytesExt, BigEndian}; -use rand::{SeedableRng, Rng, Rand}; -use rand::chacha::ChaChaRng; -use bellman_ce::pairing::bn256::{Bn256}; -use bellman_ce::pairing::*; -use std::io::{self, Read, Write}; -use std::sync::{Arc, Mutex}; -use generic_array::GenericArray; -use typenum::consts::U64; -use blake2::{Blake2b, Digest}; +use bellman_ce::pairing::GroupDecodingError; use std::fmt; - -use super::keypair::*; +use std::io; pub trait PowersOfTauParameters: Clone { - const REQUIRED_POWER: usize; - + const REQUIRED_POWER: usize; + const G1_UNCOMPRESSED_BYTE_SIZE: usize; const G2_UNCOMPRESSED_BYTE_SIZE: usize; const G1_COMPRESSED_BYTE_SIZE: usize; @@ -58,13 +38,11 @@ pub trait PowersOfTauParameters: Clone { const EMPIRICAL_BATCH_SIZE: usize = 1 << 21; } - - /// Determines if point compression should be used. #[derive(Copy, Clone, PartialEq)] pub enum UseCompression { Yes, - No + No, } /// Determines if points should be checked for correctness during deserialization. @@ -73,16 +51,15 @@ pub enum UseCompression { #[derive(Copy, Clone, PartialEq)] pub enum CheckForCorrectness { Yes, - No + No, } - /// Errors that might occur during deserialization. #[derive(Debug)] pub enum DeserializationError { IoError(io::Error), DecodingError(GroupDecodingError), - PointAtInfinity + PointAtInfinity, } impl fmt::Display for DeserializationError { @@ -90,7 +67,7 @@ impl fmt::Display for DeserializationError { match *self { DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e), DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e), - DeserializationError::PointAtInfinity => write!(f, "Point at infinity found") + DeserializationError::PointAtInfinity => write!(f, "Point at infinity found"), } } } @@ -113,5 +90,5 @@ pub enum ElementType { TauG2, AlphaG1, BetaG1, - BetaG2 + BetaG2, } diff --git a/powersoftau/src/utils.rs b/powersoftau/src/utils.rs index fa08314..064db39 100644 --- a/powersoftau/src/utils.rs +++ b/powersoftau/src/utils.rs @@ -1,126 +1,76 @@ -extern crate rand; -extern crate crossbeam; -extern crate num_cpus; -extern crate blake2; -extern crate generic_array; -extern crate typenum; -extern crate byteorder; -extern crate bellman_ce; - use bellman_ce::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; -use byteorder::{ReadBytesExt, BigEndian}; -use rand::{SeedableRng, Rng, Rand}; -use rand::chacha::ChaChaRng; -use bellman_ce::pairing::bn256::{Bn256}; use bellman_ce::pairing::*; -use std::io::{self, Read, Write}; -use std::sync::{Arc, Mutex}; -use generic_array::GenericArray; -use typenum::consts::U64; use blake2::{Blake2b, Digest}; -use std::fmt; +use byteorder::{BigEndian, ReadBytesExt}; +use generic_array::GenericArray; +use rand::chacha::ChaChaRng; +use rand::{Rand, Rng, SeedableRng}; -use super::parameters::*; +use std::io::{self, Write}; +use std::sync::Arc; +use typenum::consts::U64; + +use super::parameters::UseCompression; /// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less /// than 32 bytes. -pub fn hash_to_g2(mut digest: &[u8]) -> E::G2 -{ +pub fn hash_to_g2(mut digest: &[u8]) -> E::G2 { assert!(digest.len() >= 32); let mut seed = Vec::with_capacity(8); for _ in 0..8 { - seed.push(digest.read_u32::().expect("assertion above guarantees this to work")); + seed.push( + digest + .read_u32::() + .expect("assertion above guarantees this to work"), + ); } ChaChaRng::from_seed(&seed).gen() } -#[test] -fn test_hash_to_g2() { - assert!( - hash_to_g2::(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33]) - == - hash_to_g2::(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34]) - ); +#[cfg(test)] +mod tests { + use super::*; + use bellman_ce::pairing::bn256::Bn256; - assert!( - hash_to_g2::(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32]) - != - hash_to_g2::(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33]) - ); + #[test] + fn test_hash_to_g2() { + assert!( + hash_to_g2::(&[ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33 + ]) == hash_to_g2::(&[ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 34 + ]) + ); + + assert!( + hash_to_g2::(&[ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32 + ]) != hash_to_g2::(&[ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 33 + ]) + ); + } } -/// Computes a random linear combination over v1/v2. -/// -/// Checking that many pairs of elements are exponentiated by -/// the same `x` can be achieved (with high probability) with -/// the following technique: -/// -/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute -/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some -/// random r1, r2, r3. Given (g, g^s)... -/// -/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3) -/// -/// ... with high probability. -// fn merge_pairs>(v1: &[G], v2: &[G]) -> (G, G) -// { -// use std::sync::{Arc, Mutex}; -// use self::rand::{thread_rng}; +fn merge_pairs>( + v1: &[G], + v2: &[G], +) -> (G, G) { + use rand::thread_rng; -// assert_eq!(v1.len(), v2.len()); - -// let chunk = (v1.len() / num_cpus::get()) + 1; - -// let s = Arc::new(Mutex::new(G::Projective::zero())); -// let sx = Arc::new(Mutex::new(G::Projective::zero())); - -// crossbeam::scope(|scope| { -// for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) { -// let s = s.clone(); -// let sx = sx.clone(); - -// scope.spawn(move || { -// // We do not need to be overly cautious of the RNG -// // used for this check. -// let rng = &mut thread_rng(); - -// let mut wnaf = Wnaf::new(); -// let mut local_s = G::Projective::zero(); -// let mut local_sx = G::Projective::zero(); - -// for (v1, v2) in v1.iter().zip(v2.iter()) { -// let rho = G::Scalar::rand(rng); -// let mut wnaf = wnaf.scalar(rho.into_repr()); -// let v1 = wnaf.base(v1.into_projective()); -// let v2 = wnaf.base(v2.into_projective()); - -// local_s.add_assign(&v1); -// local_sx.add_assign(&v2); -// } - -// s.lock().unwrap().add_assign(&local_s); -// sx.lock().unwrap().add_assign(&local_sx); -// }); -// } -// }); - -// let s = s.lock().unwrap().into_affine(); -// let sx = sx.lock().unwrap().into_affine(); - -// (s, sx) -// } - -fn merge_pairs>(v1: &[G], v2: &[G]) -> (G, G) -{ - use self::rand::{thread_rng}; - assert_eq!(v1.len(), v2.len()); let rng = &mut thread_rng(); - let randomness: Vec<::Repr> = (0..v1.len()).map(|_| G::Scalar::rand(rng).into_repr()).collect(); + let randomness: Vec<::Repr> = (0..v1.len()) + .map(|_| G::Scalar::rand(rng).into_repr()) + .collect(); let s = dense_multiexp(&v1, &randomness[..]).into_affine(); let sx = dense_multiexp(&v2, &randomness[..]).into_affine(); @@ -130,9 +80,8 @@ fn merge_pairs>(v1: &[G], /// Construct a single pair (s, s^x) for a vector of /// the form [1, x, x^2, x^3, ...]. -pub fn power_pairs>(v: &[G]) -> (G, G) -{ - merge_pairs::(&v[0..(v.len()-1)], &v[1..]) +pub fn power_pairs>(v: &[G]) -> (G, G) { + merge_pairs::(&v[0..(v.len() - 1)], &v[1..]) } /// Compute BLAKE2b("") @@ -146,26 +95,20 @@ pub fn reduced_hash(old_power: u8, new_power: u8) -> GenericArray { hasher.result() } - - /// Checks if pairs have the same ratio. /// Under the hood uses pairing to check /// x1/x2 = y1/y2 => x1*y2 = x2*y1 pub fn same_ratio>( g1: (G1, G1), - g2: (G1::Pair, G1::Pair) -) -> bool -{ + g2: (G1::Pair, G1::Pair), +) -> bool { g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0) } -pub fn write_point( - writer: &mut W, - p: &G, - compression: UseCompression -) -> io::Result<()> - where W: Write, - G: CurveAffine +pub fn write_point(writer: &mut W, p: &G, compression: UseCompression) -> io::Result<()> +where + W: Write, + G: CurveAffine, { match compression { UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()), @@ -173,13 +116,12 @@ pub fn write_point( } } -pub fn compute_g2_s ( +pub fn compute_g2_s( digest: &[u8], g1_s: &E::G1Affine, - g1_s_x: &E::G1Affine, - personalization: u8 -) -> E::G2Affine -{ + g1_s_x: &E::G1Affine, + personalization: u8, +) -> E::G2Affine { let mut h = Blake2b::default(); h.input(&[personalization]); h.input(digest); @@ -193,10 +135,9 @@ pub fn compute_g2_s ( /// the number of bases is the same as the number of exponents. #[allow(dead_code)] pub fn dense_multiexp( - bases: & [G], - exponents: & [::Repr] -) -> ::Projective -{ + bases: &[G], + exponents: &[::Repr], +) -> ::Projective { if exponents.len() != bases.len() { panic!("invalid length") } @@ -210,14 +151,13 @@ pub fn dense_multiexp( } fn dense_multiexp_inner( - bases: & [G], - exponents: & [::Repr], + bases: &[G], + exponents: &[::Repr], mut skip: u32, c: u32, - handle_trivial: bool -) -> ::Projective -{ - use std::sync::{Mutex}; + handle_trivial: bool, +) -> ::Projective { + use std::sync::Mutex; // Perform this region of the multiexp. We use a different strategy - go over region in parallel, // then over another region, etc. No Arc required let chunk = (bases.len() / num_cpus::get()) + 1; @@ -228,7 +168,7 @@ fn dense_multiexp_inner( crossbeam::scope(|scope| { for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) { let this_region_rwlock = arc.clone(); - // let handle = + // let handle = scope.spawn(move || { let mut buckets = vec![::Projective::zero(); (1 << c) - 1]; // Accumulate the result @@ -268,42 +208,32 @@ fn dense_multiexp_inner( acc.add_assign(&running_sum); } - let mut guard = match this_region_rwlock.lock() { - Ok(guard) => guard, - Err(_) => { - panic!("poisoned!"); - // poisoned.into_inner() - } - }; + let mut guard = this_region_rwlock.lock().expect("poisoned"); (*guard).add_assign(&acc); }); - } }); let this_region = Arc::try_unwrap(arc).unwrap(); - let this_region = this_region.into_inner().unwrap(); - this_region + this_region.into_inner().unwrap() }; skip += c; if skip >= ::NUM_BITS { // There isn't another region, and this will be the highest region - return this; + this } else { // next region is actually higher than this one, so double it enough times - let mut next_region = dense_multiexp_inner( - bases, exponents, skip, c, false); + let mut next_region = dense_multiexp_inner(bases, exponents, skip, c, false); for _ in 0..c { next_region.double(); } next_region.add_assign(&this); - return next_region; + next_region } } -