diff --git a/powersoftau/Cargo.lock b/powersoftau/Cargo.lock index 3ce7bfb..8bb4092 100644 --- a/powersoftau/Cargo.lock +++ b/powersoftau/Cargo.lock @@ -249,6 +249,14 @@ name = "libc" version = "0.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "memmap" version = "0.7.0" @@ -319,6 +327,7 @@ dependencies = [ "generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", "hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -510,6 +519,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" "checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" "checksum libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)" = "413f3dfc802c5dc91dc570b05125b6cda9855edfaa9825c9849807876376e70e" +"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" "checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" "checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3" "checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" diff --git a/powersoftau/Cargo.toml b/powersoftau/Cargo.toml index b43968f..624d3d4 100644 --- a/powersoftau/Cargo.toml +++ b/powersoftau/Cargo.toml @@ -26,7 +26,4 @@ memmap = "0.7.0" itertools = "0.8.0" bellman_ce = { path = "../bellman" } - -[features] -smalltest = [] - +log = "0.4.8" diff --git a/powersoftau/src/accumulator.rs b/powersoftau/src/accumulator.rs deleted file mode 100644 index 1280240..0000000 --- a/powersoftau/src/accumulator.rs +++ /dev/null @@ -1,500 +0,0 @@ -//! This ceremony constructs the "powers of tau" for Jens Groth's 2016 zk-SNARK proving -//! system using the BLS12-381 pairing-friendly elliptic curve construction. -//! -//! # Overview -//! -//! Participants of the ceremony receive a "challenge" file containing: -//! -//! * the BLAKE2b hash of the last file entered into the transcript -//! * an `Accumulator` (with curve points encoded in uncompressed form for fast deserialization) -//! -//! The participant runs a tool which generates a random keypair (`PublicKey`, `PrivateKey`) -//! used for modifying the `Accumulator` from the "challenge" file. The keypair is then used to -//! transform the `Accumulator`, and a "response" file is generated containing: -//! -//! * the BLAKE2b hash of the "challenge" file (thus forming a hash chain over the entire transcript) -//! * an `Accumulator` (with curve points encoded in compressed form for fast uploading) -//! * the `PublicKey` -//! -//! This "challenge" file is entered into the protocol transcript. A given transcript is valid -//! if the transformations between consecutive `Accumulator`s verify with their respective -//! `PublicKey`s. Participants (and the public) can ensure that their contribution to the -//! `Accumulator` was accepted by ensuring the transcript contains their "response" file, ideally -//! by comparison of the BLAKE2b hash of the "response" file. -//! -//! After some time has elapsed for participants to contribute to the ceremony, a participant is -//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK -//! public parameters for all circuits within a bounded size. -use bellman_ce::pairing::{ - ff::{Field, PrimeField}, - CurveAffine, CurveProjective, EncodedPoint, Engine, Wnaf, -}; -use blake2::{Blake2b, Digest}; - -use generic_array::GenericArray; - -use std::io::{self, Read, Write}; -use std::sync::{Arc, Mutex}; -use typenum::consts::U64; - -use super::keypair::{PrivateKey, PublicKey}; -use super::parameters::{ - CheckForCorrectness, DeserializationError, PowersOfTauParameters, UseCompression, -}; -use super::utils::{hash_to_g2, power_pairs, same_ratio, write_point}; - -/// The `Accumulator` is an object that participants of the ceremony contribute -/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over -/// fixed generators, and additionally in G1 over two other generators of exponents -/// `alpha` and `beta` over those fixed generators. In other words: -/// -/// * (τ, τ2, ..., τ222 - 2, α, ατ, ατ2, ..., ατ221 - 1, β, βτ, βτ2, ..., βτ221 - 1)1 -/// * (β, τ, τ2, ..., τ221 - 1)2 -#[derive(Eq, Clone)] -pub struct Accumulator { - /// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1} - pub tau_powers_g1: Vec, - /// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1} - pub tau_powers_g2: Vec, - /// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1} - pub alpha_tau_powers_g1: Vec, - /// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1} - pub beta_tau_powers_g1: Vec, - /// beta - pub beta_g2: E::G2Affine, - /// Keep parameters here - pub parameters: P, -} - -impl PartialEq for Accumulator { - fn eq(&self, other: &Accumulator) -> bool { - self.tau_powers_g1.eq(&other.tau_powers_g1) - && self.tau_powers_g2.eq(&other.tau_powers_g2) - && self.alpha_tau_powers_g1.eq(&other.alpha_tau_powers_g1) - && self.beta_tau_powers_g1.eq(&other.beta_tau_powers_g1) - && self.beta_g2 == other.beta_g2 - } -} - -impl Accumulator { - /// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1. - pub fn new(parameters: P) -> Self { - Accumulator { - tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_G1_LENGTH], - tau_powers_g2: vec![E::G2Affine::one(); P::TAU_POWERS_LENGTH], - alpha_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH], - beta_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH], - beta_g2: E::G2Affine::one(), - parameters, - } - } - - /// Write the accumulator with some compression behavior. - pub fn serialize( - &self, - writer: &mut W, - compression: UseCompression, - ) -> io::Result<()> { - fn write_all( - writer: &mut W, - c: &[C], - compression: UseCompression, - ) -> io::Result<()> { - for c in c { - write_point(writer, c, compression)?; - } - - Ok(()) - } - - write_all(writer, &self.tau_powers_g1, compression)?; - write_all(writer, &self.tau_powers_g2, compression)?; - write_all(writer, &self.alpha_tau_powers_g1, compression)?; - write_all(writer, &self.beta_tau_powers_g1, compression)?; - write_all(writer, &[self.beta_g2], compression)?; - - Ok(()) - } - - /// Read the accumulator from disk with some compression behavior. `checked` - /// indicates whether we should check it's a valid element of the group and - /// not the point at infinity. - pub fn deserialize( - reader: &mut R, - compression: UseCompression, - checked: CheckForCorrectness, - parameters: P, - ) -> Result { - fn read_all>( - reader: &mut R, - size: usize, - compression: UseCompression, - checked: CheckForCorrectness, - ) -> Result, DeserializationError> { - fn decompress_all( - reader: &mut R, - size: usize, - checked: CheckForCorrectness, - ) -> Result, DeserializationError> { - // Read the encoded elements - let mut res = vec![ENC::empty(); size]; - - for encoded in &mut res { - reader.read_exact(encoded.as_mut())?; - } - - // Allocate space for the deserialized elements - let mut res_affine = vec![ENC::Affine::zero(); size]; - - let mut chunk_size = res.len() / num_cpus::get(); - if chunk_size == 0 { - chunk_size = 1; - } - - // If any of our threads encounter a deserialization/IO error, catch - // it with this. - let decoding_error = Arc::new(Mutex::new(None)); - - crossbeam::scope(|scope| { - for (source, target) in res - .chunks(chunk_size) - .zip(res_affine.chunks_mut(chunk_size)) - { - let decoding_error = decoding_error.clone(); - - scope.spawn(move || { - for (source, target) in source.iter().zip(target.iter_mut()) { - match { - // If we're a participant, we don't need to check all of the - // elements in the accumulator, which saves a lot of time. - // The hash chain prevents this from being a problem: the - // transcript guarantees that the accumulator was properly - // formed. - match checked { - CheckForCorrectness::Yes => { - // Points at infinity are never expected in the accumulator - source.into_affine().map_err(|e| e.into()).and_then( - |source| { - if source.is_zero() { - Err(DeserializationError::PointAtInfinity) - } else { - Ok(source) - } - }, - ) - } - CheckForCorrectness::No => { - source.into_affine_unchecked().map_err(|e| e.into()) - } - } - } { - Ok(source) => { - *target = source; - } - Err(e) => { - *decoding_error.lock().unwrap() = Some(e); - } - } - } - }); - } - }); - - match Arc::try_unwrap(decoding_error) - .unwrap() - .into_inner() - .unwrap() - { - Some(e) => Err(e), - None => Ok(res_affine), - } - } - - match compression { - UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked), - UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked), - } - } - - let tau_powers_g1 = - read_all::(reader, P::TAU_POWERS_G1_LENGTH, compression, checked)?; - let tau_powers_g2 = - read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; - let alpha_tau_powers_g1 = - read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; - let beta_tau_powers_g1 = - read_all::(reader, P::TAU_POWERS_LENGTH, compression, checked)?; - let beta_g2 = read_all::(reader, 1, compression, checked)?[0]; - - Ok(Accumulator { - tau_powers_g1, - tau_powers_g2, - alpha_tau_powers_g1, - beta_tau_powers_g1, - beta_g2, - parameters, - }) - } - - /// Transforms the accumulator with a private key. - pub fn transform(&mut self, key: &PrivateKey) { - // Construct the powers of tau - let mut taupowers = vec![E::Fr::zero(); P::TAU_POWERS_G1_LENGTH]; - let chunk_size = P::TAU_POWERS_G1_LENGTH / num_cpus::get(); - - // Construct exponents in parallel - crossbeam::scope(|scope| { - for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() { - scope.spawn(move || { - let mut acc = key.tau.pow(&[(i * chunk_size) as u64]); - - for t in taupowers { - *t = acc; - acc.mul_assign(&key.tau); - } - }); - } - }); - - /// Exponentiate a large number of points, with an optional coefficient to be applied to the - /// exponent. - fn batch_exp>( - bases: &mut [C], - exp: &[C::Scalar], - coeff: Option<&C::Scalar>, - ) { - assert_eq!(bases.len(), exp.len()); - let mut projective = vec![C::Projective::zero(); bases.len()]; - let chunk_size = bases.len() / num_cpus::get(); - - // Perform wNAF over multiple cores, placing results into `projective`. - crossbeam::scope(|scope| { - for ((bases, exp), projective) in bases - .chunks_mut(chunk_size) - .zip(exp.chunks(chunk_size)) - .zip(projective.chunks_mut(chunk_size)) - { - scope.spawn(move || { - let mut wnaf = Wnaf::new(); - - for ((base, exp), projective) in - bases.iter_mut().zip(exp.iter()).zip(projective.iter_mut()) - { - let mut exp = *exp; - if let Some(coeff) = coeff { - exp.mul_assign(coeff); - } - - *projective = - wnaf.base(base.into_projective(), 1).scalar(exp.into_repr()); - } - }); - } - }); - - // Perform batch normalization - crossbeam::scope(|scope| { - for projective in projective.chunks_mut(chunk_size) { - scope.spawn(move || { - C::Projective::batch_normalization(projective); - }); - } - }); - - // Turn it all back into affine points - for (projective, affine) in projective.iter().zip(bases.iter_mut()) { - *affine = projective.into_affine(); - } - } - - batch_exp::(&mut self.tau_powers_g1, &taupowers[0..], None); - batch_exp::( - &mut self.tau_powers_g2, - &taupowers[0..P::TAU_POWERS_LENGTH], - None, - ); - batch_exp::( - &mut self.alpha_tau_powers_g1, - &taupowers[0..P::TAU_POWERS_LENGTH], - Some(&key.alpha), - ); - batch_exp::( - &mut self.beta_tau_powers_g1, - &taupowers[0..P::TAU_POWERS_LENGTH], - Some(&key.beta), - ); - self.beta_g2 = self.beta_g2.mul(key.beta).into_affine(); - } -} - -/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`. -pub fn verify_transform( - before: &Accumulator, - after: &Accumulator, - key: &PublicKey, - digest: &[u8], -) -> bool { - assert_eq!(digest.len(), 64); - - let compute_g2_s = |g1_s: E::G1Affine, g1_s_x: E::G1Affine, personalization: u8| { - let mut h = Blake2b::default(); - h.input(&[personalization]); - h.input(digest); - h.input(g1_s.into_uncompressed().as_ref()); - h.input(g1_s_x.into_uncompressed().as_ref()); - hash_to_g2::(h.result().as_ref()).into_affine() - }; - - let tau_g2_s = compute_g2_s(key.tau_g1.0, key.tau_g1.1, 0); - let alpha_g2_s = compute_g2_s(key.alpha_g1.0, key.alpha_g1.1, 1); - let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2); - - // Check the proofs-of-knowledge for tau/alpha/beta - - // g1^s / g1^(s*x) = g2^s / g2^(s*x) - if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) { - return false; - } - if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) { - return false; - } - if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) { - return false; - } - - // Check the correctness of the generators for tau powers - if after.tau_powers_g1[0] != E::G1Affine::one() { - return false; - } - if after.tau_powers_g2[0] != E::G2Affine::one() { - return false; - } - - // Did the participant multiply the previous tau by the new one? - if !same_ratio( - (before.tau_powers_g1[1], after.tau_powers_g1[1]), - (tau_g2_s, key.tau_g2), - ) { - return false; - } - - // Did the participant multiply the previous alpha by the new one? - if !same_ratio( - (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), - (alpha_g2_s, key.alpha_g2), - ) { - return false; - } - - // Did the participant multiply the previous beta by the new one? - if !same_ratio( - (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), - (beta_g2_s, key.beta_g2), - ) { - return false; - } - if !same_ratio( - (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), - (before.beta_g2, after.beta_g2), - ) { - return false; - } - - // Are the powers of tau correct? - if !same_ratio( - power_pairs(&after.tau_powers_g1), - (after.tau_powers_g2[0], after.tau_powers_g2[1]), - ) { - return false; - } - if !same_ratio( - power_pairs(&after.tau_powers_g2), - (after.tau_powers_g1[0], after.tau_powers_g1[1]), - ) { - return false; - } - if !same_ratio( - power_pairs(&after.alpha_tau_powers_g1), - (after.tau_powers_g2[0], after.tau_powers_g2[1]), - ) { - return false; - } - if !same_ratio( - power_pairs(&after.beta_tau_powers_g1), - (after.tau_powers_g2[0], after.tau_powers_g2[1]), - ) { - return false; - } - - true -} - -/// Abstraction over a reader which hashes the data being read. -pub struct HashReader { - reader: R, - hasher: Blake2b, -} - -impl HashReader { - /// Construct a new `HashReader` given an existing `reader` by value. - pub fn new(reader: R) -> Self { - HashReader { - reader, - hasher: Blake2b::default(), - } - } - - /// Destroy this reader and return the hash of what was read. - pub fn into_hash(self) -> GenericArray { - self.hasher.result() - } -} - -impl Read for HashReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let bytes = self.reader.read(buf)?; - - if bytes > 0 { - self.hasher.input(&buf[0..bytes]); - } - - Ok(bytes) - } -} - -/// Abstraction over a writer which hashes the data being written. -pub struct HashWriter { - writer: W, - hasher: Blake2b, -} - -impl HashWriter { - /// Construct a new `HashWriter` given an existing `writer` by value. - pub fn new(writer: W) -> Self { - HashWriter { - writer, - hasher: Blake2b::default(), - } - } - - /// Destroy this writer and return the hash of what was written. - pub fn into_hash(self) -> GenericArray { - self.hasher.result() - } -} - -impl Write for HashWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - let bytes = self.writer.write(buf)?; - - if bytes > 0 { - self.hasher.input(&buf[0..bytes]); - } - - Ok(bytes) - } - - fn flush(&mut self) -> io::Result<()> { - self.writer.flush() - } -} diff --git a/powersoftau/src/batched_accumulator.rs b/powersoftau/src/batched_accumulator.rs index 4ff215d..e7b026b 100644 --- a/powersoftau/src/batched_accumulator.rs +++ b/powersoftau/src/batched_accumulator.rs @@ -3,6 +3,7 @@ use bellman_ce::pairing::ff::{Field, PrimeField}; use bellman_ce::pairing::*; use blake2::{Blake2b, Digest}; +use log::{error, info}; use generic_array::GenericArray; use itertools::Itertools; @@ -14,7 +15,7 @@ use typenum::consts::U64; use super::keypair::{PrivateKey, PublicKey}; use super::parameters::{ - CheckForCorrectness, DeserializationError, ElementType, PowersOfTauParameters, UseCompression, + CeremonyParams, CheckForCorrectness, DeserializationError, ElementType, UseCompression, }; use super::utils::{blank_hash, compute_g2_s, power_pairs, same_ratio}; @@ -31,7 +32,7 @@ pub enum AccumulatorState { /// /// * (τ, τ2, ..., τ222 - 2, α, ατ, ατ2, ..., ατ221 - 1, β, βτ, βτ2, ..., βτ221 - 1)1 /// * (β, τ, τ2, ..., τ221 - 1)2 -pub struct BatchedAccumulator { +pub struct BatchedAccumulator<'a, E: Engine> { /// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1} pub tau_powers_g1: Vec, /// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1} @@ -44,11 +45,11 @@ pub struct BatchedAccumulator { pub beta_g2: E::G2Affine, /// Hash chain hash pub hash: GenericArray, - /// Keep parameters here as a marker - marker: std::marker::PhantomData

, + /// The parameters used for the setup of this accumulator + pub parameters: &'a CeremonyParams, } -impl BatchedAccumulator { +impl<'a, E: Engine> BatchedAccumulator<'a, E> { /// Calculate the contribution hash from the resulting file. Original powers of tau implementation /// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained /// implementation now writes without a particular order, so plain recalculation at the end @@ -61,10 +62,8 @@ impl BatchedAccumulator { } hasher.result() } -} -impl BatchedAccumulator { - pub fn empty() -> Self { + pub fn empty(parameters: &'a CeremonyParams) -> Self { Self { tau_powers_g1: vec![], tau_powers_g2: vec![], @@ -72,32 +71,30 @@ impl BatchedAccumulator { beta_tau_powers_g1: vec![], beta_g2: E::G2Affine::zero(), hash: blank_hash(), - marker: std::marker::PhantomData::

{}, + parameters, } } -} -impl BatchedAccumulator { - fn g1_size(compression: UseCompression) -> usize { + fn g1_size(&self, compression: UseCompression) -> usize { match compression { - UseCompression::Yes => P::G1_COMPRESSED_BYTE_SIZE, - UseCompression::No => P::G1_UNCOMPRESSED_BYTE_SIZE, + UseCompression::Yes => self.parameters.curve.g1_compressed, + UseCompression::No => self.parameters.curve.g1, } } - fn g2_size(compression: UseCompression) -> usize { + fn g2_size(&self, compression: UseCompression) -> usize { match compression { - UseCompression::Yes => P::G2_COMPRESSED_BYTE_SIZE, - UseCompression::No => P::G2_UNCOMPRESSED_BYTE_SIZE, + UseCompression::Yes => self.parameters.curve.g2_compressed, + UseCompression::No => self.parameters.curve.g2, } } - fn get_size(element_type: ElementType, compression: UseCompression) -> usize { + fn get_size(&self, element_type: ElementType, compression: UseCompression) -> usize { match element_type { ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => { - Self::g1_size(compression) + self.g1_size(compression) } - ElementType::BetaG2 | ElementType::TauG2 => Self::g2_size(compression), + ElementType::BetaG2 | ElementType::TauG2 => self.g2_size(compression), } } @@ -111,24 +108,25 @@ impl BatchedAccumulator { /// Public key appended to the end of file, but it's irrelevant for an accumulator itself fn calculate_mmap_position( + &self, index: usize, element_type: ElementType, compression: UseCompression, ) -> usize { - let g1_size = Self::g1_size(compression); - let g2_size = Self::g2_size(compression); - let required_tau_g1_power = P::TAU_POWERS_G1_LENGTH; - let required_power = P::TAU_POWERS_LENGTH; + let g1_size = self.g1_size(compression); + let g2_size = self.g2_size(compression); + let required_tau_g1_power = self.parameters.powers_g1_length; + let required_power = self.parameters.powers_length; + let parameters = &self.parameters; let position = match element_type { ElementType::TauG1 => { let mut position = 0; position += g1_size * index; assert!( - index < P::TAU_POWERS_G1_LENGTH, + index < parameters.powers_g1_length, format!( "Index of TauG1 element written must not exceed {}, while it's {}", - P::TAU_POWERS_G1_LENGTH, - index + parameters.powers_g1_length, index ) ); @@ -138,11 +136,10 @@ impl BatchedAccumulator { let mut position = 0; position += g1_size * required_tau_g1_power; assert!( - index < P::TAU_POWERS_LENGTH, + index < required_power, format!( "Index of TauG2 element written must not exceed {}, while it's {}", - P::TAU_POWERS_LENGTH, - index + required_power, index ) ); position += g2_size * index; @@ -154,11 +151,10 @@ impl BatchedAccumulator { position += g1_size * required_tau_g1_power; position += g2_size * required_power; assert!( - index < P::TAU_POWERS_LENGTH, + index < required_power, format!( "Index of AlphaG1 element written must not exceed {}, while it's {}", - P::TAU_POWERS_LENGTH, - index + required_power, index ) ); position += g1_size * index; @@ -171,11 +167,10 @@ impl BatchedAccumulator { position += g2_size * required_power; position += g1_size * required_power; assert!( - index < P::TAU_POWERS_LENGTH, + index < required_power, format!( "Index of BetaG1 element written must not exceed {}, while it's {}", - P::TAU_POWERS_LENGTH, - index + required_power, index ) ); position += g1_size * index; @@ -193,14 +188,14 @@ impl BatchedAccumulator { } }; - position + P::HASH_SIZE + position + self.parameters.hash_size } } -/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`. -pub fn verify_transform( - before: &BatchedAccumulator, - after: &BatchedAccumulator, +/// Verifies a transformation of the `BatchedAccumulator` with the `PublicKey`, given a 64-byte transcript `digest`. +pub fn verify_transform( + before: &BatchedAccumulator, + after: &BatchedAccumulator, key: &PublicKey, digest: &[u8], ) -> bool { @@ -290,9 +285,9 @@ pub fn verify_transform( true } -impl BatchedAccumulator { +impl<'a, E: Engine> BatchedAccumulator<'a, E> { /// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`. - #[allow(clippy::too_many_arguments)] + #[allow(clippy::too_many_arguments, clippy::cognitive_complexity)] pub fn verify_transformation( input_map: &Mmap, output_map: &Mmap, @@ -302,6 +297,7 @@ impl BatchedAccumulator { output_is_compressed: UseCompression, check_input_for_correctness: CheckForCorrectness, check_output_for_correctness: CheckForCorrectness, + parameters: &'a CeremonyParams, ) -> bool { use itertools::MinMaxResult::MinMax; assert_eq!(digest.len(), 64); @@ -314,22 +310,22 @@ impl BatchedAccumulator { // g1^s / g1^(s*x) = g2^s / g2^(s*x) if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) { - println!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)"); + error!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)"); return false; } if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) { - println!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)"); + error!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)"); return false; } if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) { - println!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)"); + error!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)"); return false; } // Load accumulators AND perform computations - let mut before = Self::empty(); - let mut after = Self::empty(); + let mut before = Self::empty(parameters); + let mut after = Self::empty(parameters); // these checks only touch a part of the accumulator, so read two elements @@ -356,11 +352,11 @@ impl BatchedAccumulator { // Check the correctness of the generators for tau powers if after.tau_powers_g1[0] != E::G1Affine::one() { - println!("tau_powers_g1[0] != 1"); + error!("tau_powers_g1[0] != 1"); return false; } if after.tau_powers_g2[0] != E::G2Affine::one() { - println!("tau_powers_g2[0] != 1"); + error!("tau_powers_g2[0] != 1"); return false; } @@ -369,7 +365,7 @@ impl BatchedAccumulator { (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2), ) { - println!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)"); + error!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)"); return false; } @@ -378,7 +374,7 @@ impl BatchedAccumulator { (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2), ) { - println!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)"); + error!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)"); return false; } @@ -387,14 +383,14 @@ impl BatchedAccumulator { (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2), ) { - println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)"); + error!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)"); return false; } if !same_ratio( (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2), ) { - println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)"); + error!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)"); return false; } } @@ -408,16 +404,11 @@ impl BatchedAccumulator { // one does not need to care about some overlapping let mut tau_powers_last_first_chunks = vec![E::G1Affine::zero(); 2]; - for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { + let tau_powers_length = parameters.powers_length; + for chunk in &(0..tau_powers_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { // extra 1 to ensure intersection between chunks and ensure we don't overflow - let size = end - start - + 1 - + if end == P::TAU_POWERS_LENGTH - 1 { - 0 - } else { - 1 - }; + let size = end - start + 1 + if end == tau_powers_length - 1 { 0 } else { 1 }; before .read_chunk( start, @@ -452,47 +443,46 @@ impl BatchedAccumulator { power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1), ) { - println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); + error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); return false; } if !same_ratio( power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1), ) { - println!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)"); + error!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)"); return false; } if !same_ratio( power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1), ) { - println!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); + error!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); return false; } if !same_ratio( power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1), ) { - println!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); + error!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)"); return false; } - if end == P::TAU_POWERS_LENGTH - 1 { + if end == tau_powers_length - 1 { tau_powers_last_first_chunks[0] = after.tau_powers_g1[size - 1]; } - println!("Done processing {} powers of tau", end); + info!("Done processing {} powers of tau", end); } else { panic!("Chunk does not have a min and max"); } } - for chunk in - &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + for chunk in &(tau_powers_length..parameters.powers_g1_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { // extra 1 to ensure intersection between chunks and ensure we don't overflow let size = end - start + 1 - + if end == P::TAU_POWERS_G1_LENGTH - 1 { + + if end == parameters.powers_g1_length - 1 { 0 } else { 1 @@ -542,13 +532,13 @@ impl BatchedAccumulator { power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1), ) { - println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution"); + error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution"); return false; } - if start == P::TAU_POWERS_LENGTH { + if start == parameters.powers_length { tau_powers_last_first_chunks[1] = after.tau_powers_g1[0]; } - println!("Done processing {} powers of tau", end); + info!("Done processing {} powers of tau", end); } else { panic!("Chunk does not have a min and max"); } @@ -558,7 +548,7 @@ impl BatchedAccumulator { power_pairs(&tau_powers_last_first_chunks), (tau_powers_g2_0, tau_powers_g2_1), ) { - println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in TauG1 contribution intersection"); + error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in TauG1 contribution intersection"); } true } @@ -567,12 +557,13 @@ impl BatchedAccumulator { input_map: &Mmap, output_map: &mut MmapMut, check_input_for_correctness: CheckForCorrectness, + parameters: &'a CeremonyParams, ) -> io::Result<()> { use itertools::MinMaxResult::MinMax; - let mut accumulator = Self::empty(); + let mut accumulator = Self::empty(parameters); - for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; accumulator @@ -596,7 +587,7 @@ impl BatchedAccumulator { } for chunk in - &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; @@ -643,10 +634,11 @@ impl BatchedAccumulator { input_map: &Mmap, check_input_for_correctness: CheckForCorrectness, compression: UseCompression, - ) -> io::Result> { + parameters: &'a CeremonyParams, + ) -> io::Result> { use itertools::MinMaxResult::MinMax; - let mut accumulator = Self::empty(); + let mut accumulator = Self::empty(parameters); let mut tau_powers_g1 = vec![]; let mut tau_powers_g2 = vec![]; @@ -654,7 +646,7 @@ impl BatchedAccumulator { let mut beta_tau_powers_g1 = vec![]; let mut beta_g2 = vec![]; - for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; accumulator @@ -684,7 +676,7 @@ impl BatchedAccumulator { } for chunk in - &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; @@ -734,7 +726,7 @@ impl BatchedAccumulator { beta_tau_powers_g1, beta_g2: beta_g2[0], hash: blank_hash(), - marker: std::marker::PhantomData::

{}, + parameters, }) } @@ -742,19 +734,20 @@ impl BatchedAccumulator { &mut self, output_map: &mut MmapMut, compression: UseCompression, + parameters: &CeremonyParams, ) -> io::Result<()> { use itertools::MinMaxResult::MinMax; - for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { - let mut tmp_acc = BatchedAccumulator:: { + let mut tmp_acc = BatchedAccumulator:: { tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(), tau_powers_g2: (&self.tau_powers_g2[start..=end]).to_vec(), alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..=end]).to_vec(), beta_tau_powers_g1: (&self.beta_tau_powers_g1[start..=end]).to_vec(), beta_g2: self.beta_g2, hash: self.hash, - marker: std::marker::PhantomData::

{}, + parameters, }; tmp_acc.write_chunk(start, compression, output_map)?; } else { @@ -763,17 +756,17 @@ impl BatchedAccumulator { } for chunk in - &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { - let mut tmp_acc = BatchedAccumulator:: { + let mut tmp_acc = BatchedAccumulator:: { tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(), tau_powers_g2: vec![], alpha_tau_powers_g1: vec![], beta_tau_powers_g1: vec![], beta_g2: self.beta_g2, hash: self.hash, - marker: std::marker::PhantomData::

{}, + parameters, }; tmp_acc.write_chunk(start, compression, output_map)?; } else { @@ -783,9 +776,7 @@ impl BatchedAccumulator { Ok(()) } -} -impl BatchedAccumulator { pub fn read_chunk( &mut self, from: usize, @@ -924,7 +915,7 @@ impl BatchedAccumulator { let index = from + i; match element_type { ElementType::TauG1 => { - if index >= P::TAU_POWERS_G1_LENGTH { + if index >= self.parameters.powers_g1_length { return Ok(vec![]); } } @@ -932,13 +923,13 @@ impl BatchedAccumulator { | ElementType::BetaG1 | ElementType::BetaG2 | ElementType::TauG2 => { - if index >= P::TAU_POWERS_LENGTH { + if index >= self.parameters.powers_length { return Ok(vec![]); } } }; - let position = Self::calculate_mmap_position(index, element_type, compression); - let element_size = Self::get_size(element_type, compression); + let position = self.calculate_mmap_position(index, element_type, compression); + let element_size = self.get_size(element_type, compression); let mut memory_slice = input_map .get(position..position + element_size) .expect("must read point data from file"); @@ -1021,9 +1012,7 @@ impl BatchedAccumulator { None => Ok(res_affine), } } -} -impl BatchedAccumulator { fn write_all( &mut self, chunk_start: usize, @@ -1086,7 +1075,7 @@ impl BatchedAccumulator { { match element_type { ElementType::TauG1 => { - if index >= P::TAU_POWERS_G1_LENGTH { + if index >= self.parameters.powers_g1_length { return Ok(()); } } @@ -1094,7 +1083,7 @@ impl BatchedAccumulator { | ElementType::BetaG1 | ElementType::BetaG2 | ElementType::TauG2 => { - if index >= P::TAU_POWERS_LENGTH { + if index >= self.parameters.powers_length { return Ok(()); } } @@ -1102,12 +1091,12 @@ impl BatchedAccumulator { match compression { UseCompression::Yes => { - let position = Self::calculate_mmap_position(index, element_type, compression); + let position = self.calculate_mmap_position(index, element_type, compression); // let size = self.get_size(element_type, compression); (&mut output_map[position..]).write_all(p.into_compressed().as_ref())?; } UseCompression::No => { - let position = Self::calculate_mmap_position(index, element_type, compression); + let position = self.calculate_mmap_position(index, element_type, compression); // let size = self.get_size(element_type, compression); (&mut output_map[position..]).write_all(p.into_uncompressed().as_ref())?; } @@ -1124,7 +1113,7 @@ impl BatchedAccumulator { output_map: &mut MmapMut, ) -> io::Result<()> { self.write_all(chunk_start, compression, ElementType::TauG1, output_map)?; - if chunk_start < P::TAU_POWERS_LENGTH { + if chunk_start < self.parameters.powers_length { self.write_all(chunk_start, compression, ElementType::TauG2, output_map)?; self.write_all(chunk_start, compression, ElementType::AlphaG1, output_map)?; self.write_all(chunk_start, compression, ElementType::BetaG1, output_map)?; @@ -1133,9 +1122,7 @@ impl BatchedAccumulator { Ok(()) } -} -impl BatchedAccumulator { /// Transforms the accumulator with a private key. /// Due to large amount of data in a previous accumulator even in the compressed form /// this function can now work on compressed input. Output can be made in any form @@ -1149,6 +1136,7 @@ impl BatchedAccumulator { compress_the_output: UseCompression, check_input_for_correctness: CheckForCorrectness, key: &PrivateKey, + parameters: &'a CeremonyParams, ) -> io::Result<()> { /// Exponentiate a large number of points, with an optional coefficient to be applied to the /// exponent. @@ -1205,11 +1193,11 @@ impl BatchedAccumulator { } } - let mut accumulator = Self::empty(); + let mut accumulator = Self::empty(parameters); use itertools::MinMaxResult::MinMax; - for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { + for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; accumulator @@ -1258,14 +1246,14 @@ impl BatchedAccumulator { "your contribution happened to produce a point at infinity, please re-run" ); accumulator.write_chunk(start, compress_the_output, output_map)?; - println!("Done processing {} powers of tau", end); + info!("Done processing {} powers of tau", end); } else { panic!("Chunk does not have a min and max"); } } for chunk in - &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; @@ -1307,7 +1295,7 @@ impl BatchedAccumulator { //assert!(!accumulator.beta_g2.is_zero(), "your contribution happened to produce a point at infinity, please re-run"); accumulator.write_chunk(start, compress_the_output, output_map)?; - println!("Done processing {} powers of tau", end); + info!("Done processing {} powers of tau", end); } else { panic!("Chunk does not have a min and max"); } @@ -1315,17 +1303,17 @@ impl BatchedAccumulator { Ok(()) } -} -impl BatchedAccumulator { /// Transforms the accumulator with a private key. pub fn generate_initial( output_map: &mut MmapMut, compress_the_output: UseCompression, + parameters: &'a CeremonyParams, ) -> io::Result<()> { use itertools::MinMaxResult::MinMax; - for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) { + // Write the first Tau powers in chunks where every initial element is a G1 or G2 `one` + for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; let mut accumulator = Self { @@ -1335,18 +1323,19 @@ impl BatchedAccumulator { beta_tau_powers_g1: vec![E::G1Affine::one(); size], beta_g2: E::G2Affine::one(), hash: blank_hash(), - marker: std::marker::PhantomData::

{}, + parameters, }; accumulator.write_chunk(start, compress_the_output, output_map)?; - println!("Done processing {} powers of tau", end); + info!("Done processing {} powers of tau", end); } else { panic!("Chunk does not have a min and max"); } } + // Write the next `G1 length` elements for chunk in - &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) + &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size) { if let MinMax(start, end) = chunk.minmax() { let size = end - start + 1; @@ -1357,11 +1346,11 @@ impl BatchedAccumulator { beta_tau_powers_g1: vec![], beta_g2: E::G2Affine::one(), hash: blank_hash(), - marker: std::marker::PhantomData::

{}, + parameters, }; accumulator.write_chunk(start, compress_the_output, output_map)?; - println!("Done processing {} powers of tau", end); + info!("Done processing {} powers of tau", end); } else { panic!("Chunk does not have a min and max"); } diff --git a/powersoftau/src/bin/beacon_constrained.rs b/powersoftau/src/bin/beacon_constrained.rs index b858c95..3fede95 100644 --- a/powersoftau/src/bin/beacon_constrained.rs +++ b/powersoftau/src/bin/beacon_constrained.rs @@ -1,17 +1,15 @@ -use powersoftau::bn256::Bn256CeremonyParameters; - -use powersoftau::batched_accumulator::BatchedAccumulator; -use powersoftau::keypair::keypair; -use powersoftau::parameters::{CheckForCorrectness, UseCompression}; +use powersoftau::{ + batched_accumulator::BatchedAccumulator, + keypair::keypair, + parameters::{CeremonyParams, CheckForCorrectness, CurveKind, UseCompression}, +}; use bellman_ce::pairing::bn256::Bn256; -use memmap::*; +use memmap::MmapOptions; use std::fs::OpenOptions; use std::io::Write; -use powersoftau::parameters::PowersOfTauParameters; - #[macro_use] extern crate hex_literal; @@ -22,20 +20,24 @@ const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No; #[allow(clippy::modulo_one)] fn main() { let args: Vec = std::env::args().collect(); - if args.len() != 3 { - println!("Usage: \n "); + if args.len() != 5 { + println!("Usage: \n "); std::process::exit(exitcode::USAGE); } let challenge_filename = &args[1]; let response_filename = &args[2]; + let circuit_power = args[3].parse().expect("could not parse circuit power"); + let batch_size = args[4].parse().expect("could not parse batch size"); + + let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size); println!( "Will contribute a random beacon to accumulator for 2^{} powers of tau", - Bn256CeremonyParameters::REQUIRED_POWER + parameters.size, ); println!( "In total will generate up to {} powers", - Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH + parameters.powers_g1_length, ); // Create an RNG based on the outcome of the random beacon @@ -102,8 +104,8 @@ fn main() { .metadata() .expect("unable to get filesystem metadata for challenge file"); let expected_challenge_length = match INPUT_IS_COMPRESSED { - UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, - UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE, + UseCompression::Yes => parameters.contribution_size, + UseCompression::No => parameters.accumulator_size, }; if metadata.len() != (expected_challenge_length as u64) { @@ -130,11 +132,8 @@ fn main() { .expect("unable to create response file in this directory"); let required_output_length = match COMPRESS_THE_OUTPUT { - UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, - UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - + Bn256CeremonyParameters::PUBLIC_KEY_SIZE - } + UseCompression::Yes => parameters.contribution_size, + UseCompression::No => parameters.accumulator_size + parameters.public_key_size, }; writer @@ -149,8 +148,7 @@ fn main() { println!("Calculating previous contribution hash..."); - let current_accumulator_hash = - BatchedAccumulator::::calculate_hash(&readable_map); + let current_accumulator_hash = BatchedAccumulator::::calculate_hash(&readable_map); { println!("Contributing on top of the hash:"); @@ -181,28 +179,28 @@ fn main() { println!("Computing and writing your contribution, this could take a while..."); // this computes a transformation and writes it - BatchedAccumulator::::transform( + BatchedAccumulator::::transform( &readable_map, &mut writable_map, INPUT_IS_COMPRESSED, COMPRESS_THE_OUTPUT, CHECK_INPUT_CORRECTNESS, &privkey, + ¶meters, ) .expect("must transform with the key"); println!("Finishing writing your contribution to response file..."); // Write the public key pubkey - .write::(&mut writable_map, COMPRESS_THE_OUTPUT) + .write(&mut writable_map, COMPRESS_THE_OUTPUT, ¶meters) .expect("unable to write public key"); // Get the hash of the contribution, so the user can compare later let output_readonly = writable_map .make_read_only() .expect("must make a map readonly"); - let contribution_hash = - BatchedAccumulator::::calculate_hash(&output_readonly); + let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly); print!( "Done!\n\n\ diff --git a/powersoftau/src/bin/compute_constrained.rs b/powersoftau/src/bin/compute_constrained.rs index 255f9f7..7016703 100644 --- a/powersoftau/src/bin/compute_constrained.rs +++ b/powersoftau/src/bin/compute_constrained.rs @@ -1,5 +1,4 @@ use powersoftau::batched_accumulator::BatchedAccumulator; -use powersoftau::bn256::Bn256CeremonyParameters; use powersoftau::keypair::keypair; use powersoftau::parameters::{CheckForCorrectness, UseCompression}; @@ -9,7 +8,7 @@ use std::fs::OpenOptions; use std::io::{Read, Write}; -use powersoftau::parameters::PowersOfTauParameters; +use powersoftau::parameters::{CeremonyParams, CurveKind}; const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No; const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes; @@ -17,20 +16,24 @@ const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No; fn main() { let args: Vec = std::env::args().collect(); - if args.len() != 3 { - println!("Usage: \n "); + if args.len() != 5 { + println!("Usage: \n "); std::process::exit(exitcode::USAGE); } let challenge_filename = &args[1]; let response_filename = &args[2]; + let circuit_power = args[3].parse().expect("could not parse circuit power"); + let batch_size = args[4].parse().expect("could not parse batch size"); + + let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size); println!( "Will contribute to accumulator for 2^{} powers of tau", - Bn256CeremonyParameters::REQUIRED_POWER + parameters.size ); println!( "In total will generate up to {} powers", - Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH + parameters.powers_g1_length ); // Create an RNG based on a mixture of system randomness and user provided randomness @@ -85,8 +88,8 @@ fn main() { .metadata() .expect("unable to get filesystem metadata for challenge file"); let expected_challenge_length = match INPUT_IS_COMPRESSED { - UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, - UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE, + UseCompression::Yes => parameters.contribution_size, + UseCompression::No => parameters.accumulator_size, }; if metadata.len() != (expected_challenge_length as u64) { @@ -113,11 +116,8 @@ fn main() { .expect("unable to create response file"); let required_output_length = match COMPRESS_THE_OUTPUT { - UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, - UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - + Bn256CeremonyParameters::PUBLIC_KEY_SIZE - } + UseCompression::Yes => parameters.contribution_size, + UseCompression::No => parameters.accumulator_size + parameters.public_key_size, }; writer @@ -136,8 +136,7 @@ fn main() { UseCompression::No == INPUT_IS_COMPRESSED, "Hashing the compressed file in not yet defined" ); - let current_accumulator_hash = - BatchedAccumulator::::calculate_hash(&readable_map); + let current_accumulator_hash = BatchedAccumulator::::calculate_hash(&readable_map); { println!("`challenge` file contains decompressed points and has a hash:"); @@ -190,13 +189,14 @@ fn main() { println!("Computing and writing your contribution, this could take a while..."); // this computes a transformation and writes it - BatchedAccumulator::::transform( + BatchedAccumulator::::transform( &readable_map, &mut writable_map, INPUT_IS_COMPRESSED, COMPRESS_THE_OUTPUT, CHECK_INPUT_CORRECTNESS, &privkey, + ¶meters, ) .expect("must transform with the key"); @@ -204,7 +204,7 @@ fn main() { // Write the public key pubkey - .write::(&mut writable_map, COMPRESS_THE_OUTPUT) + .write(&mut writable_map, COMPRESS_THE_OUTPUT, ¶meters) .expect("unable to write public key"); writable_map.flush().expect("must flush a memory map"); @@ -213,8 +213,7 @@ fn main() { let output_readonly = writable_map .make_read_only() .expect("must make a map readonly"); - let contribution_hash = - BatchedAccumulator::::calculate_hash(&output_readonly); + let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly); print!( "Done!\n\n\ diff --git a/powersoftau/src/bin/new.rs b/powersoftau/src/bin/new.rs deleted file mode 100644 index e1f6d91..0000000 --- a/powersoftau/src/bin/new.rs +++ /dev/null @@ -1,41 +0,0 @@ -use powersoftau::accumulator::Accumulator; -use powersoftau::bn256::Bn256CeremonyParameters; -use powersoftau::parameters::UseCompression; -use powersoftau::utils::blank_hash; - -use bellman_ce::pairing::bn256::Bn256; -use std::fs::OpenOptions; -use std::io::{BufWriter, Write}; - -fn main() { - let args: Vec = std::env::args().collect(); - if args.len() != 2 { - println!("Usage: \n"); - std::process::exit(exitcode::USAGE); - } - let challenge_filename = &args[1]; - - let file = OpenOptions::new() - .read(false) - .write(true) - .create_new(true) - .open(challenge_filename) - .expect("unable to create challenge file"); - - let mut writer = BufWriter::new(file); - - // Write a blank BLAKE2b hash: - writer - .write_all(&blank_hash().as_slice()) - .expect("unable to write blank hash to challenge file"); - - let parameters = Bn256CeremonyParameters {}; - - let acc: Accumulator = Accumulator::new(parameters); - println!("Writing an empty accumulator to disk"); - acc.serialize(&mut writer, UseCompression::No) - .expect("unable to write fresh accumulator to challenge file"); - writer.flush().expect("unable to flush accumulator to disk"); - - println!("Wrote a fresh accumulator to challenge file"); -} diff --git a/powersoftau/src/bin/new_constrained.rs b/powersoftau/src/bin/new_constrained.rs index 181ce7b..c8ac223 100644 --- a/powersoftau/src/bin/new_constrained.rs +++ b/powersoftau/src/bin/new_constrained.rs @@ -1,5 +1,3 @@ -use powersoftau::bn256::Bn256CeremonyParameters; - use powersoftau::batched_accumulator::BatchedAccumulator; use powersoftau::parameters::UseCompression; use powersoftau::utils::blank_hash; @@ -9,25 +7,29 @@ use memmap::*; use std::fs::OpenOptions; use std::io::Write; -use powersoftau::parameters::PowersOfTauParameters; +use powersoftau::parameters::{CeremonyParams, CurveKind}; const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No; fn main() { let args: Vec = std::env::args().collect(); - if args.len() != 2 { - println!("Usage: \n"); + if args.len() != 4 { + println!("Usage: \n "); std::process::exit(exitcode::USAGE); } let challenge_filename = &args[1]; + let circuit_power = args[2].parse().expect("could not parse circuit power"); + let batch_size = args[3].parse().expect("could not parse batch size"); + + let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size); println!( "Will generate an empty accumulator for 2^{} powers of tau", - Bn256CeremonyParameters::REQUIRED_POWER + parameters.size ); println!( "In total will generate up to {} powers", - Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH + parameters.powers_g1_length ); let file = OpenOptions::new() @@ -38,11 +40,8 @@ fn main() { .expect("unable to create challenge file"); let expected_challenge_length = match COMPRESS_NEW_CHALLENGE { - UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - - Bn256CeremonyParameters::PUBLIC_KEY_SIZE - } - UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE, + UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, + UseCompression::No => parameters.accumulator_size, }; file.set_len(expected_challenge_length as u64) @@ -75,9 +74,10 @@ fn main() { println!(); } - BatchedAccumulator::::generate_initial( + BatchedAccumulator::::generate_initial( &mut writable_map, COMPRESS_NEW_CHALLENGE, + ¶meters, ) .expect("generation of initial accumulator is successful"); writable_map @@ -88,8 +88,7 @@ fn main() { let output_readonly = writable_map .make_read_only() .expect("must make a map readonly"); - let contribution_hash = - BatchedAccumulator::::calculate_hash(&output_readonly); + let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly); println!("Empty contribution is formed with a hash:"); diff --git a/powersoftau/src/bin/prepare_phase2.rs b/powersoftau/src/bin/prepare_phase2.rs index 2acd491..fdc1f83 100644 --- a/powersoftau/src/bin/prepare_phase2.rs +++ b/powersoftau/src/bin/prepare_phase2.rs @@ -2,7 +2,7 @@ use bellman_ce::pairing::bn256::Bn256; use bellman_ce::pairing::bn256::{G1, G2}; use bellman_ce::pairing::{CurveAffine, CurveProjective}; use powersoftau::batched_accumulator::*; -use powersoftau::bn256::Bn256CeremonyParameters; +use powersoftau::parameters::{CeremonyParams, CurveKind}; use powersoftau::*; use crate::parameters::*; @@ -25,6 +25,12 @@ fn log_2(x: u64) -> u32 { } fn main() { + let parameters = CeremonyParams::new( + CurveKind::Bn256, + 28, // turn this to 10 for the small test + 21, // turn this to 8 for the small test + ); + let args: Vec = std::env::args().collect(); if args.len() != 2 { println!("Usage: \n"); @@ -43,10 +49,11 @@ fn main() { .expect("unable to create a memory map for input") }; - let current_accumulator = BatchedAccumulator::::deserialize( + let current_accumulator = BatchedAccumulator::::deserialize( &response_readable_map, CheckForCorrectness::Yes, UseCompression::Yes, + ¶meters, ) .expect("unable to read uncompressed accumulator"); @@ -182,7 +189,7 @@ fn main() { // Lagrange coefficients in G1 (for constructing // LC/IC queries and precomputing polynomials for A) - for coeff in g1_coeffs { + for coeff in g1_coeffs.clone() { // Was normalized earlier in parallel let coeff = coeff.into_affine(); diff --git a/powersoftau/src/bin/reduce_powers.rs b/powersoftau/src/bin/reduce_powers.rs index 86b3551..602d33b 100644 --- a/powersoftau/src/bin/reduce_powers.rs +++ b/powersoftau/src/bin/reduce_powers.rs @@ -1,8 +1,7 @@ use bellman_ce::pairing::bn256::Bn256; use powersoftau::{ batched_accumulator::BatchedAccumulator, - bn256::Bn256CeremonyParameters, - parameters::{CheckForCorrectness, PowersOfTauParameters, UseCompression}, + parameters::{CeremonyParams, CheckForCorrectness, CurveKind, UseCompression}, utils::reduced_hash, }; @@ -11,19 +10,6 @@ use std::io::Write; use memmap::MmapOptions; -#[derive(Clone)] -pub struct Bn256ReducedCeremonyParameters {} - -impl PowersOfTauParameters for Bn256ReducedCeremonyParameters { - const REQUIRED_POWER: usize = 10; - - // This ceremony is based on the BN256 elliptic curve construction. - const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64; - const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128; - const G1_COMPRESSED_BYTE_SIZE: usize = 32; - const G2_COMPRESSED_BYTE_SIZE: usize = 64; -} - const fn num_bits() -> usize { std::mem::size_of::() * 8 } @@ -34,6 +20,12 @@ pub fn log_2(x: u64) -> u32 { } fn main() { + let parameters = CeremonyParams::new( + CurveKind::Bn256, + 10, // here we use 10 since it's the reduced ceremony + 21, + ); + // Try to load `./challenge` from disk. let reader = OpenOptions::new() .read(true) @@ -45,27 +37,23 @@ fn main() { .expect("unable to create a memory map for input") }; - let current_accumulator = BatchedAccumulator::::deserialize( + let current_accumulator = BatchedAccumulator::::deserialize( &challenge_readable_map, CheckForCorrectness::Yes, UseCompression::No, + ¶meters, ) .expect("unable to read compressed accumulator"); - let mut reduced_accumulator = - BatchedAccumulator::::empty(); - reduced_accumulator.tau_powers_g1 = current_accumulator.tau_powers_g1 - [..Bn256ReducedCeremonyParameters::TAU_POWERS_G1_LENGTH] - .to_vec(); - reduced_accumulator.tau_powers_g2 = current_accumulator.tau_powers_g2 - [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH] - .to_vec(); - reduced_accumulator.alpha_tau_powers_g1 = current_accumulator.alpha_tau_powers_g1 - [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH] - .to_vec(); - reduced_accumulator.beta_tau_powers_g1 = current_accumulator.beta_tau_powers_g1 - [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH] - .to_vec(); + let mut reduced_accumulator = BatchedAccumulator::::empty(¶meters); + reduced_accumulator.tau_powers_g1 = + current_accumulator.tau_powers_g1[..parameters.powers_g1_length].to_vec(); + reduced_accumulator.tau_powers_g2 = + current_accumulator.tau_powers_g2[..parameters.powers_length].to_vec(); + reduced_accumulator.alpha_tau_powers_g1 = + current_accumulator.alpha_tau_powers_g1[..parameters.powers_length].to_vec(); + reduced_accumulator.beta_tau_powers_g1 = + current_accumulator.beta_tau_powers_g1[..parameters.powers_length].to_vec(); reduced_accumulator.beta_g2 = current_accumulator.beta_g2; let writer = OpenOptions::new() @@ -77,7 +65,7 @@ fn main() { // Recomputation stips the public key and uses hashing to link with the previous contibution after decompression writer - .set_len(Bn256ReducedCeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64) + .set_len(parameters.accumulator_size as u64) .expect("must make output file large enough"); let mut writable_map = unsafe { @@ -87,8 +75,8 @@ fn main() { }; let hash = reduced_hash( - Bn256CeremonyParameters::REQUIRED_POWER as u8, - Bn256ReducedCeremonyParameters::REQUIRED_POWER as u8, + 28, // this is the full size of the hash + parameters.size as u8, ); (&mut writable_map[0..]) .write_all(hash.as_slice()) @@ -110,17 +98,14 @@ fn main() { } reduced_accumulator - .serialize(&mut writable_map, UseCompression::No) + .serialize(&mut writable_map, UseCompression::No, ¶meters) .unwrap(); // Get the hash of the contribution, so the user can compare later let output_readonly = writable_map .make_read_only() .expect("must make a map readonly"); - let contribution_hash = - BatchedAccumulator::::calculate_hash( - &output_readonly, - ); + let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly); println!("Reduced contribution is formed with a hash:"); diff --git a/powersoftau/src/bin/verify.rs b/powersoftau/src/bin/verify.rs index 45f5126..a890a70 100644 --- a/powersoftau/src/bin/verify.rs +++ b/powersoftau/src/bin/verify.rs @@ -1,10 +1,9 @@ use bellman_ce::pairing::bn256::Bn256; use bellman_ce::pairing::bn256::{G1, G2}; use bellman_ce::pairing::{CurveAffine, CurveProjective}; -use powersoftau::accumulator::HashWriter; use powersoftau::batched_accumulator::*; -use powersoftau::bn256::Bn256CeremonyParameters; use powersoftau::*; +use powersoftau::parameters::{CeremonyParams, CurveKind}; use crate::keypair::*; use crate::parameters::*; @@ -17,6 +16,10 @@ use std::fs::{remove_file, OpenOptions}; use std::io::{self, BufWriter, Read, Write}; use std::path::Path; +use blake2::{Blake2b, Digest}; +use generic_array::GenericArray; +use typenum::U64; + use memmap::*; const fn num_bits() -> usize { @@ -28,13 +31,51 @@ fn log_2(x: u64) -> u32 { num_bits::() as u32 - x.leading_zeros() - 1 } +/// Abstraction over a writer which hashes the data being written. +pub struct HashWriter { + writer: W, + hasher: Blake2b, +} + +impl HashWriter { + /// Construct a new `HashWriter` given an existing `writer` by value. + pub fn new(writer: W) -> Self { + HashWriter { + writer, + hasher: Blake2b::default(), + } + } + + /// Destroy this writer and return the hash of what was written. + pub fn into_hash(self) -> GenericArray { + self.hasher.result() + } +} + +impl Write for HashWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + let bytes = self.writer.write(buf)?; + + if bytes > 0 { + self.hasher.input(&buf[0..bytes]); + } + + Ok(bytes) + } + + fn flush(&mut self) -> io::Result<()> { + self.writer.flush() + } +} + // Computes the hash of the challenge file for the player, // given the current state of the accumulator and the last // response file hash. fn get_challenge_file_hash( - acc: &mut BatchedAccumulator, + acc: &mut BatchedAccumulator, last_response_file_hash: &[u8; 64], is_initial: bool, + parameters: &CeremonyParams, ) -> [u8; 64] { let sink = io::sink(); let mut sink = HashWriter::new(sink); @@ -53,7 +94,7 @@ fn get_challenge_file_hash( .expect("unable to create temporary tmp_challenge_file_hash"); writer - .set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64) + .set_len(parameters.accumulator_size as u64) .expect("must make output file large enough"); let mut writable_map = unsafe { MmapOptions::new() @@ -69,13 +110,14 @@ fn get_challenge_file_hash( .expect("unable to write blank hash to challenge file"); if is_initial { - BatchedAccumulator::::generate_initial( + BatchedAccumulator::::generate_initial( &mut writable_map, UseCompression::No, + parameters, ) .expect("generation of initial accumulator is successful"); } else { - acc.serialize(&mut writable_map, UseCompression::No) + acc.serialize(&mut writable_map, UseCompression::No, parameters) .unwrap(); } @@ -102,9 +144,10 @@ fn get_challenge_file_hash( // accumulator, the player's public key, and the challenge // file's hash. fn get_response_file_hash( - acc: &mut BatchedAccumulator, + acc: &mut BatchedAccumulator, pubkey: &PublicKey, last_challenge_file_hash: &[u8; 64], + parameters: &CeremonyParams, ) -> [u8; 64] { let sink = io::sink(); let mut sink = HashWriter::new(sink); @@ -122,7 +165,7 @@ fn get_response_file_hash( .expect("unable to create temporary tmp_response_file_hash"); writer - .set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64) + .set_len(parameters.contribution_size as u64) .expect("must make output file large enough"); let mut writable_map = unsafe { MmapOptions::new() @@ -137,11 +180,11 @@ fn get_response_file_hash( .flush() .expect("unable to write blank hash to challenge file"); - acc.serialize(&mut writable_map, UseCompression::Yes) + acc.serialize(&mut writable_map, UseCompression::Yes, parameters) .unwrap(); pubkey - .write::(&mut writable_map, UseCompression::Yes) + .write(&mut writable_map, UseCompression::Yes, parameters) .expect("unable to write public key"); writable_map.flush().expect("must flush the memory map"); } @@ -162,7 +205,7 @@ fn get_response_file_hash( tmp } -fn new_accumulator_for_verify() -> BatchedAccumulator { +fn new_accumulator_for_verify(parameters: &CeremonyParams) -> BatchedAccumulator { let file_name = "tmp_initial_challenge"; { if Path::new(file_name).exists() { @@ -176,7 +219,7 @@ fn new_accumulator_for_verify() -> BatchedAccumulator BatchedAccumulator::generate_initial( + BatchedAccumulator::::generate_initial( &mut writable_map, UseCompression::No, + ¶meters, ) .expect("generation of initial accumulator is successful"); writable_map @@ -206,17 +250,26 @@ fn new_accumulator_for_verify() -> BatchedAccumulator = std::env::args().collect(); - if args.len() != 2 { - println!("Usage: \n"); + if args.len() != 4 { + println!("Usage: \n "); std::process::exit(exitcode::USAGE); } let transcript_filename = &args[1]; + let circuit_power = args[2].parse().expect("could not parse circuit power"); + let batch_size = args[3].parse().expect("could not parse batch size"); + + let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size); // Try to load transcript file from disk. let reader = OpenOptions::new() @@ -231,7 +284,7 @@ fn main() { }; // Initialize the accumulator - let mut current_accumulator = new_accumulator_for_verify(); + let mut current_accumulator = new_accumulator_for_verify(¶meters); // The "last response file hash" is just a blank BLAKE2b hash // at the beginning of the hash chain. @@ -249,10 +302,7 @@ fn main() { } let memory_slice = transcript_readable_map - .get( - i * Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - ..(i + 1) * Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, - ) + .get(i * parameters.contribution_size..(i + 1) * parameters.contribution_size) .expect("must read point data from file"); let writer = OpenOptions::new() .read(true) @@ -262,7 +312,7 @@ fn main() { .expect("unable to create temporary tmp_response"); writer - .set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64) + .set_len(parameters.contribution_size as u64) .expect("must make output file large enough"); let mut writable_map = unsafe { MmapOptions::new() @@ -279,8 +329,12 @@ fn main() { .make_read_only() .expect("must make a map readonly"); - let last_challenge_file_hash = - get_challenge_file_hash(&mut current_accumulator, &last_response_file_hash, i == 0); + let last_challenge_file_hash = get_challenge_file_hash( + &mut current_accumulator, + &last_response_file_hash, + i == 0, + ¶meters, + ); // Deserialize the accumulator provided by the player in // their response file. It's stored in the transcript in @@ -291,14 +345,13 @@ fn main() { &response_readable_map, CheckForCorrectness::Yes, UseCompression::Yes, + ¶meters, ) .expect("unable to read uncompressed accumulator"); - let response_file_pubkey = PublicKey::::read::( - &response_readable_map, - UseCompression::Yes, - ) - .unwrap(); + let response_file_pubkey = + PublicKey::::read(&response_readable_map, UseCompression::Yes, ¶meters) + .unwrap(); // Compute the hash of the response file. (we had it in uncompressed // form in the transcript, but the response file is compressed to save // participants bandwidth.) @@ -306,6 +359,7 @@ fn main() { &mut response_file_accumulator, &response_file_pubkey, &last_challenge_file_hash, + ¶meters, ); // Verify the transformation from the previous accumulator to the new diff --git a/powersoftau/src/bin/verify_transform_constrained.rs b/powersoftau/src/bin/verify_transform_constrained.rs index c05c334..a1b5879 100644 --- a/powersoftau/src/bin/verify_transform_constrained.rs +++ b/powersoftau/src/bin/verify_transform_constrained.rs @@ -1,5 +1,4 @@ use powersoftau::batched_accumulator::BatchedAccumulator; -use powersoftau::bn256::Bn256CeremonyParameters; use powersoftau::keypair::PublicKey; use powersoftau::parameters::{CheckForCorrectness, UseCompression}; @@ -9,7 +8,7 @@ use std::fs::OpenOptions; use std::io::{Read, Write}; -use powersoftau::parameters::PowersOfTauParameters; +use powersoftau::parameters::{CeremonyParams, CurveKind}; const PREVIOUS_CHALLENGE_IS_COMPRESSED: UseCompression = UseCompression::No; const CONTRIBUTION_IS_COMPRESSED: UseCompression = UseCompression::Yes; @@ -17,17 +16,21 @@ const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No; fn main() { let args: Vec = std::env::args().collect(); - if args.len() != 4 { - println!("Usage: \n "); + if args.len() != 6 { + println!("Usage: \n "); std::process::exit(exitcode::USAGE); } let challenge_filename = &args[1]; let response_filename = &args[2]; let new_challenge_filename = &args[3]; + let circuit_power = args[4].parse().expect("could not parse circuit power"); + let batch_size = args[5].parse().expect("could not parse batch size"); + + let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size); println!( "Will verify and decompress a contribution to accumulator for 2^{} powers of tau", - Bn256CeremonyParameters::REQUIRED_POWER + parameters.size ); // Try to load challenge file from disk. @@ -41,11 +44,8 @@ fn main() { .metadata() .expect("unable to get filesystem metadata for challenge file"); let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED { - UseCompression::Yes => { - Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - - Bn256CeremonyParameters::PUBLIC_KEY_SIZE - } - UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE, + UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, + UseCompression::No => parameters.accumulator_size, }; if metadata.len() != (expected_challenge_length as u64) { panic!( @@ -73,11 +73,8 @@ fn main() { .metadata() .expect("unable to get filesystem metadata for response file"); let expected_response_length = match CONTRIBUTION_IS_COMPRESSED { - UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE, - UseCompression::No => { - Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - + Bn256CeremonyParameters::PUBLIC_KEY_SIZE - } + UseCompression::Yes => parameters.contribution_size, + UseCompression::No => parameters.accumulator_size + parameters.public_key_size, }; if metadata.len() != (expected_response_length as u64) { panic!( @@ -99,9 +96,7 @@ fn main() { // Check that contribution is correct let current_accumulator_hash = - BatchedAccumulator::::calculate_hash( - &challenge_readable_map, - ); + BatchedAccumulator::::calculate_hash(&challenge_readable_map); println!("Hash of the `challenge` file for verification:"); for line in current_accumulator_hash.as_slice().chunks(16) { @@ -142,9 +137,7 @@ fn main() { } } - let response_hash = BatchedAccumulator::::calculate_hash( - &response_readable_map, - ); + let response_hash = BatchedAccumulator::::calculate_hash(&response_readable_map); println!("Hash of the response file for verification:"); for line in response_hash.as_slice().chunks(16) { @@ -159,9 +152,10 @@ fn main() { } // get the contributor's public key - let public_key = PublicKey::::read::( + let public_key = PublicKey::::read( &response_readable_map, CONTRIBUTION_IS_COMPRESSED, + ¶meters, ) .expect("wasn't able to deserialize the response file's public key"); @@ -171,7 +165,7 @@ fn main() { "Verifying a contribution to contain proper powers and correspond to the public key..." ); - let valid = BatchedAccumulator::::verify_transformation( + let valid = BatchedAccumulator::::verify_transformation( &challenge_readable_map, &response_readable_map, &public_key, @@ -180,6 +174,7 @@ fn main() { CONTRIBUTION_IS_COMPRESSED, CheckForCorrectness::No, CheckForCorrectness::Yes, + ¶meters, ); if !valid { @@ -206,7 +201,7 @@ fn main() { // Recomputation strips the public key and uses hashing to link with the previous contribution after decompression writer - .set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64) + .set_len(parameters.accumulator_size as u64) .expect("must make output file large enough"); let mut writable_map = unsafe { @@ -225,10 +220,11 @@ fn main() { .expect("unable to write hash to new challenge file"); } - BatchedAccumulator::::decompress( + BatchedAccumulator::::decompress( &response_readable_map, &mut writable_map, CheckForCorrectness::No, + ¶meters, ) .expect("must decompress a response for a new challenge"); @@ -239,9 +235,7 @@ fn main() { .expect("must make a map readonly"); let recompressed_hash = - BatchedAccumulator::::calculate_hash( - &new_challenge_readable_map, - ); + BatchedAccumulator::::calculate_hash(&new_challenge_readable_map); println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:"); diff --git a/powersoftau/src/bn256/mod.rs b/powersoftau/src/bn256/mod.rs deleted file mode 100644 index 33693c0..0000000 --- a/powersoftau/src/bn256/mod.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::parameters::PowersOfTauParameters; - -#[derive(Clone)] -pub struct Bn256CeremonyParameters {} - -impl PowersOfTauParameters for Bn256CeremonyParameters { - #[cfg(not(feature = "smalltest"))] - const REQUIRED_POWER: usize = 28; - - #[cfg(feature = "smalltest")] - const REQUIRED_POWER: usize = 10; - #[cfg(feature = "smalltest")] - const EMPIRICAL_BATCH_SIZE: usize = 1 << 8; - - // This ceremony is based on the BN256 elliptic curve construction. - const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64; - const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128; - const G1_COMPRESSED_BYTE_SIZE: usize = 32; - const G2_COMPRESSED_BYTE_SIZE: usize = 64; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::accumulator::*; - use crate::{ - keypair::{keypair, PublicKey}, - parameters::{CheckForCorrectness, UseCompression}, - utils::{power_pairs, same_ratio}, - }; - use bellman_ce::pairing::{ - bn256::{Bn256, Fr, G1Affine, G2Affine}, - ff::Field, - CurveAffine, CurveProjective, - }; - use rand::{thread_rng, Rand, Rng}; - - #[test] - fn test_pubkey_serialization() { - let rng = &mut thread_rng(); - let digest = (0..64).map(|_| rng.gen()).collect::>(); - let (pk, _) = keypair::<_, Bn256>(rng, &digest); - let mut v = vec![]; - pk.serialize(&mut v).unwrap(); - assert_eq!(v.len(), Bn256CeremonyParameters::PUBLIC_KEY_SIZE); - let deserialized = PublicKey::::deserialize(&mut &v[..]).unwrap(); - assert!(pk == deserialized); - } - - #[test] - fn test_power_pairs() { - let rng = &mut thread_rng(); - - let mut v = vec![]; - let x = Fr::rand(rng); - let mut acc = Fr::one(); - for _ in 0..100 { - v.push(G1Affine::one().mul(acc).into_affine()); - acc.mul_assign(&x); - } - - let gx = G2Affine::one().mul(x).into_affine(); - - assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx))); - - v[1] = v[1].mul(Fr::rand(rng)).into_affine(); - - assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx))); - } - - #[test] - fn test_same_ratio() { - let rng = &mut thread_rng(); - - let s = Fr::rand(rng); - let g1 = G1Affine::one(); - let g2 = G2Affine::one(); - let g1_s = g1.mul(s).into_affine(); - let g2_s = g2.mul(s).into_affine(); - - assert!(same_ratio((g1, g1_s), (g2, g2_s))); - assert!(!same_ratio((g1_s, g1), (g2, g2_s))); - } - - #[test] - fn test_accumulator_serialization() { - let rng = &mut thread_rng(); - let mut digest = (0..64).map(|_| rng.gen()).collect::>(); - let params = Bn256CeremonyParameters {}; - let mut acc = Accumulator::::new(params.clone()); - let before = acc.clone(); - let (pk, sk) = keypair::<_, Bn256>(rng, &digest); - acc.transform(&sk); - assert!(verify_transform(&before, &acc, &pk, &digest)); - digest[0] = !digest[0]; - assert!(!verify_transform(&before, &acc, &pk, &digest)); - let mut v = Vec::with_capacity(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64); - acc.serialize(&mut v, UseCompression::No).unwrap(); - assert_eq!(v.len(), Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64); - let deserialized = Accumulator::deserialize( - &mut &v[..], - UseCompression::No, - CheckForCorrectness::No, - params, - ) - .unwrap(); - assert!(acc == deserialized); - } -} diff --git a/powersoftau/src/keypair.rs b/powersoftau/src/keypair.rs index 44cffd6..33ffe87 100644 --- a/powersoftau/src/keypair.rs +++ b/powersoftau/src/keypair.rs @@ -9,7 +9,7 @@ use std::io::{self, Read, Write}; use typenum::consts::U64; -use super::parameters::{DeserializationError, PowersOfTauParameters, UseCompression}; +use super::parameters::{CeremonyParams, DeserializationError, UseCompression}; use super::utils::{hash_to_g2, write_point}; /// Contains terms of the form (s1, s1x, H(s1x)2, H(s1x)2x) @@ -167,42 +167,43 @@ impl PublicKey { /// This function is intended to write the key to the memory map and calculates /// a position for writing into the file itself based on information whether /// contribution was output in compressed on uncompressed form - pub fn write

( + pub fn write( &self, output_map: &mut MmapMut, accumulator_was_compressed: UseCompression, - ) -> io::Result<()> - where - P: PowersOfTauParameters, - { + parameters: &CeremonyParams, + ) -> io::Result<()> { let mut position = match accumulator_was_compressed { - UseCompression::Yes => P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE, - UseCompression::No => P::ACCUMULATOR_BYTE_SIZE, + UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, + UseCompression::No => parameters.accumulator_size, }; + let g1_size = parameters.curve.g1; + let g2_size = parameters.curve.g2; + (&mut output_map[position..]).write_all(&self.tau_g1.0.into_uncompressed().as_ref())?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; (&mut output_map[position..]).write_all(&self.tau_g1.1.into_uncompressed().as_ref())?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; (&mut output_map[position..]).write_all(&self.alpha_g1.0.into_uncompressed().as_ref())?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; (&mut output_map[position..]).write_all(&self.alpha_g1.1.into_uncompressed().as_ref())?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; (&mut output_map[position..]).write_all(&self.beta_g1.0.into_uncompressed().as_ref())?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; (&mut output_map[position..]).write_all(&self.beta_g1.1.into_uncompressed().as_ref())?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; (&mut output_map[position..]).write_all(&self.tau_g2.into_uncompressed().as_ref())?; - position += P::G2_UNCOMPRESSED_BYTE_SIZE; + position += g2_size; (&mut output_map[position..]).write_all(&self.alpha_g2.into_uncompressed().as_ref())?; - position += P::G2_UNCOMPRESSED_BYTE_SIZE; + position += g2_size; (&mut output_map[position..]).write_all(&self.beta_g2.into_uncompressed().as_ref())?; @@ -214,13 +215,11 @@ impl PublicKey { /// Deserialize the public key. Points are always in uncompressed form, and /// always checked, since there aren't very many of them. Does not allow any /// points at infinity. - pub fn read

( + pub fn read( input_map: &Mmap, accumulator_was_compressed: UseCompression, - ) -> Result - where - P: PowersOfTauParameters, - { + parameters: &CeremonyParams, + ) -> Result { fn read_uncompressed>( input_map: &Mmap, position: usize, @@ -241,33 +240,36 @@ impl PublicKey { } let mut position = match accumulator_was_compressed { - UseCompression::Yes => P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE, - UseCompression::No => P::ACCUMULATOR_BYTE_SIZE, + UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, + UseCompression::No => parameters.accumulator_size, }; + let g1_size = parameters.curve.g1; + let g2_size = parameters.curve.g2; + let tau_g1_s = read_uncompressed::(input_map, position)?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; let tau_g1_s_tau = read_uncompressed::(input_map, position)?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; let alpha_g1_s = read_uncompressed::(input_map, position)?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; let alpha_g1_s_alpha = read_uncompressed::(input_map, position)?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; let beta_g1_s = read_uncompressed::(input_map, position)?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; let beta_g1_s_beta = read_uncompressed::(input_map, position)?; - position += P::G1_UNCOMPRESSED_BYTE_SIZE; + position += g1_size; let tau_g2 = read_uncompressed::(input_map, position)?; - position += P::G2_UNCOMPRESSED_BYTE_SIZE; + position += g2_size; let alpha_g2 = read_uncompressed::(input_map, position)?; - position += P::G2_UNCOMPRESSED_BYTE_SIZE; + position += g2_size; let beta_g2 = read_uncompressed::(input_map, position)?; @@ -281,3 +283,35 @@ impl PublicKey { }) } } + +#[cfg(test)] +mod tests { + use super::*; + use rand::{thread_rng, Rng}; + + mod bn256 { + use super::*; + use crate::parameters::{CurveKind, CurveParams}; + use bellman_ce::pairing::bn256::Bn256; + + #[test] + fn test_pubkey_serialization() { + let curve = CurveParams::new(CurveKind::Bn256); + let public_key_size = 6 * curve.g1 + 3 * curve.g2; + + // Generate a random public key + let rng = &mut thread_rng(); + let digest = (0..64).map(|_| rng.gen()).collect::>(); + let (pk, _) = keypair::<_, Bn256>(rng, &digest); + + // Serialize it + let mut v = vec![]; + pk.serialize(&mut v).unwrap(); + assert_eq!(v.len(), public_key_size); + + // Deserialize it and check that it matchesj + let deserialized = PublicKey::::deserialize(&mut &v[..]).unwrap(); + assert!(pk == deserialized); + } + } +} diff --git a/powersoftau/src/lib.rs b/powersoftau/src/lib.rs index f4ac2b3..fa0298f 100644 --- a/powersoftau/src/lib.rs +++ b/powersoftau/src/lib.rs @@ -1,6 +1,4 @@ -pub mod accumulator; pub mod batched_accumulator; -pub mod bn256; pub mod keypair; pub mod parameters; pub mod utils; diff --git a/powersoftau/src/parameters.rs b/powersoftau/src/parameters.rs index cee1b49..f1e01f6 100644 --- a/powersoftau/src/parameters.rs +++ b/powersoftau/src/parameters.rs @@ -2,42 +2,125 @@ use bellman_ce::pairing::GroupDecodingError; use std::fmt; use std::io; -pub trait PowersOfTauParameters: Clone { - const REQUIRED_POWER: usize; - - const G1_UNCOMPRESSED_BYTE_SIZE: usize; - const G2_UNCOMPRESSED_BYTE_SIZE: usize; - const G1_COMPRESSED_BYTE_SIZE: usize; - const G2_COMPRESSED_BYTE_SIZE: usize; - - const TAU_POWERS_LENGTH: usize = (1 << Self::REQUIRED_POWER); - - const TAU_POWERS_G1_LENGTH: usize = (Self::TAU_POWERS_LENGTH << 1) - 1; - - const ACCUMULATOR_BYTE_SIZE: usize = (Self::TAU_POWERS_G1_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) + // g1 tau powers - (Self::TAU_POWERS_LENGTH * Self::G2_UNCOMPRESSED_BYTE_SIZE) + // g2 tau powers - (Self::TAU_POWERS_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) + // alpha tau powers - (Self::TAU_POWERS_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) // beta tau powers - + Self::G2_UNCOMPRESSED_BYTE_SIZE // beta in g2 - + Self::HASH_SIZE; // blake2b hash of previous contribution - - const PUBLIC_KEY_SIZE: usize = 3 * Self::G2_UNCOMPRESSED_BYTE_SIZE + // tau, alpha, and beta in g2 - 6 * Self::G1_UNCOMPRESSED_BYTE_SIZE; // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1 - - const CONTRIBUTION_BYTE_SIZE: usize = (Self::TAU_POWERS_G1_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) + // g1 tau powers - (Self::TAU_POWERS_LENGTH * Self::G2_COMPRESSED_BYTE_SIZE) + // g2 tau powers - (Self::TAU_POWERS_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) + // alpha tau powers - (Self::TAU_POWERS_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) // beta tau powers - + Self::G2_COMPRESSED_BYTE_SIZE // beta in g2 - + Self::HASH_SIZE // blake2b hash of input accumulator - + Self::PUBLIC_KEY_SIZE; // public key - - // Blake2b hash size - const HASH_SIZE: usize = 64; - - const EMPIRICAL_BATCH_SIZE: usize = 1 << 21; +/// The sizes of the group elements of a curev +#[derive(Clone, PartialEq, Eq)] +pub struct CurveParams { + pub g1: usize, + pub g2: usize, + pub g1_compressed: usize, + pub g2_compressed: usize, } +/// The types of curves we support +#[derive(Clone, PartialEq, Eq)] +pub enum CurveKind { + Bn256, +} + +impl CurveParams { + /// Creates a new curve based on the provided CurveKind + pub fn new(kind: CurveKind) -> Self { + let (g1, g2) = match kind { + CurveKind::Bn256 => (64, 128), + }; + + CurveParams { + g1, + g2, + g1_compressed: g1 / 2, + g2_compressed: g2 / 2, + } + } +} + +#[derive(Clone, PartialEq, Eq)] +/// The parameters used for the trusted setup ceremony +pub struct CeremonyParams { + /// The type of the curve being used (currently only supports BN256) + pub curve: CurveParams, + /// The number of Powers of Tau G1 elements which will be accumulated + pub powers_g1_length: usize, + /// The number of Powers of Tau Alpha/Beta/G2 elements which will be accumulated + pub powers_length: usize, + /// The circuit size exponent (ie length will be 2^size), depends on the computation you want to support + pub size: usize, + /// The empirical batch size for the batched accumulator. + /// This is a hyper parameter and may be different for each + /// curve. + pub batch_size: usize, + // Size of the used public key + pub public_key_size: usize, + /// Total size of the accumulator used for the ceremony + pub accumulator_size: usize, + /// Total size of the contribution + pub contribution_size: usize, + /// Size of the hash of the previous contribution + pub hash_size: usize, +} + +impl CeremonyParams { + /// Constructs a new ceremony parameters object from the type of provided curve + pub fn new(kind: CurveKind, size: usize, batch_size: usize) -> Self { + // create the curve + let curve = CurveParams::new(kind); + Self::new_with_curve(curve, size, batch_size) + } + + /// Constructs a new ceremony parameters object from the directly provided curve with parameters + /// Consider using the `new` method if you want to use one of the pre-implemented curves + pub fn new_with_curve(curve: CurveParams, size: usize, batch_size: usize) -> Self { + // asume we're using a 64 byte long hash function such as Blake + let hash_size = 64; + + // 2^{size} + let powers_length = 1 << size; + // 2^{size+1} - 1 + let powers_g1_length = (powers_length << 1) - 1; + + let accumulator_size = + // G1 Tau powers + powers_g1_length * curve.g1 + + // G2 Tau Powers + Alpha Tau powers + Beta Tau powers + powers_length * (curve.g2 + (curve.g1 * 2)) + + // Beta in G2 + curve.g2 + + // Hash of the previous contribution + hash_size; + + let public_key_size = + // tau, alpha, beta in g2 + 3 * curve.g2 + + // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1 + 6 * curve.g1; + + let contribution_size = + // G1 Tau powers (compressed) + powers_g1_length * curve.g1_compressed + + // G2 Tau Powers + Alpha Tau powers + Beta Tau powers (compressed) + powers_length * (curve.g2_compressed + (curve.g1_compressed * 2)) + + // Beta in G2 + curve.g2_compressed + + // Hash of the previous contribution + hash_size + + // The public key of the previous contributor + public_key_size; + + Self { + curve, + size, + batch_size, + accumulator_size, + public_key_size, + contribution_size, + hash_size, + powers_length, + powers_g1_length, + } + } +} + +// TODO: Add tests! + /// Determines if point compression should be used. #[derive(Copy, Clone, PartialEq)] pub enum UseCompression { diff --git a/powersoftau/src/utils.rs b/powersoftau/src/utils.rs index 064db39..05601f7 100644 --- a/powersoftau/src/utils.rs +++ b/powersoftau/src/utils.rs @@ -31,12 +31,13 @@ pub fn hash_to_g2(mut digest: &[u8]) -> E::G2 { } #[cfg(test)] -mod tests { +mod bn256_tests { use super::*; - use bellman_ce::pairing::bn256::Bn256; + use bellman_ce::pairing::bn256::{Bn256, Fr, G1Affine, G2Affine}; + use rand::{thread_rng, Rand}; #[test] - fn test_hash_to_g2() { + fn test_hash_to_g2_bn256() { assert!( hash_to_g2::(&[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -57,6 +58,41 @@ mod tests { ]) ); } + + #[test] + fn test_same_ratio_bn256() { + let rng = &mut thread_rng(); + + let s = Fr::rand(rng); + let g1 = G1Affine::one(); + let g2 = G2Affine::one(); + let g1_s = g1.mul(s).into_affine(); + let g2_s = g2.mul(s).into_affine(); + + assert!(same_ratio((g1, g1_s), (g2, g2_s))); + assert!(!same_ratio((g1_s, g1), (g2, g2_s))); + } + + #[test] + fn test_power_pairs() { + let rng = &mut thread_rng(); + + let mut v = vec![]; + let x = Fr::rand(rng); + let mut acc = Fr::one(); + for _ in 0..100 { + v.push(G1Affine::one().mul(acc).into_affine()); + acc.mul_assign(&x); + } + + let gx = G2Affine::one().mul(x).into_affine(); + + assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx))); + + v[1] = v[1].mul(Fr::rand(rng)).into_affine(); + + assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx))); + } } fn merge_pairs>( diff --git a/powersoftau/test.sh b/powersoftau/test.sh index f2af161..624cfe6 100755 --- a/powersoftau/test.sh +++ b/powersoftau/test.sh @@ -8,18 +8,21 @@ rm tmp_* set -e -cargo run --release --features smalltest --bin new_constrained challenge1 -yes | cargo run --release --features smalltest --bin compute_constrained challenge1 response1 -cargo run --release --features smalltest --bin verify_transform_constrained challenge1 response1 challenge2 +SIZE=10 +BATCH=256 -yes | cargo run --release --features smalltest --bin compute_constrained challenge2 response2 -cargo run --release --features smalltest --bin verify_transform_constrained challenge2 response2 challenge3 +cargo run --release --bin new_constrained challenge1 $SIZE $BATCH +yes | cargo run --release --bin compute_constrained challenge1 response1 $SIZE $BATCH +cargo run --release --bin verify_transform_constrained challenge1 response1 challenge2 $SIZE $BATCH -yes | cargo run --release --features smalltest --bin compute_constrained challenge3 response3 -cargo run --release --features smalltest --bin verify_transform_constrained challenge3 response3 challenge4 +yes | cargo run --release --bin compute_constrained challenge2 response2 $SIZE $BATCH +cargo run --release --bin verify_transform_constrained challenge2 response2 challenge3 $SIZE $BATCH -cargo run --release --features smalltest --bin beacon_constrained challenge4 response4 -cargo run --release --features smalltest --bin verify_transform_constrained challenge4 response4 challenge5 +yes | cargo run --release --bin compute_constrained challenge3 response3 $SIZE $BATCH +cargo run --release --bin verify_transform_constrained challenge3 response3 challenge4 $SIZE $BATCH + +cargo run --release --bin beacon_constrained challenge4 response4 $SIZE $BATCH +cargo run --release --bin verify_transform_constrained challenge4 response4 challenge5 $SIZE $BATCH cat response1 response2 response3 response4 > transcript -cargo run --release --features smalltest --bin verify transcript +cargo run --release --bin verify transcript $SIZE $BATCH