Make ceremony params configurable at runtime (#10)
* feat(parameters): Replace trait with a params struct We define a CeremonyParams struct which contains a curve and setup-specific parameters The curve is a CurveParams struct which contains the sizes of the compressed and uncompressed group elements This will allow us to easily extend the implementations over multiple curves and constraint numbers (currently these are hard coded and cannot be easily chagned) * feat(keypair): Use the CeremonyParams struct instead of being generic over the PowersOfTauParams trait * feat(accumulator): Use the CeremonyParams struct instead of being generic over the PowersOfTauParams trait * feat(batched-accumulator): Use the CeremonyParams struct instead of being generic over the PowersOfTauParams trait driveby-change: also replace println's with error/info logs * feat(batched-accumulator): Use the CeremonyParams struct instead of being generic over the PowersOfTauParams trait driveby-change: also replace println's with error/info logs * refactor(bn256): delete bn256 module and move tests to better locations * fix(bin): make all binaries build with the CeremonyParams object * test(e2e): run the test with circuit power and batch size provided at runtime * chore: remove unused accumulator.rs and new.rs
This commit is contained in:
parent
32bbd5f35c
commit
614b4b899d
10
powersoftau/Cargo.lock
generated
10
powersoftau/Cargo.lock
generated
@ -249,6 +249,14 @@ name = "libc"
|
|||||||
version = "0.2.49"
|
version = "0.2.49"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "log"
|
||||||
|
version = "0.4.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "memmap"
|
name = "memmap"
|
||||||
version = "0.7.0"
|
version = "0.7.0"
|
||||||
@ -319,6 +327,7 @@ dependencies = [
|
|||||||
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -510,6 +519,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358"
|
"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358"
|
||||||
"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
|
"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
|
||||||
"checksum libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)" = "413f3dfc802c5dc91dc570b05125b6cda9855edfaa9825c9849807876376e70e"
|
"checksum libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)" = "413f3dfc802c5dc91dc570b05125b6cda9855edfaa9825c9849807876376e70e"
|
||||||
|
"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
|
||||||
"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
|
"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
|
||||||
"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
|
"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
|
||||||
"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945"
|
"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945"
|
||||||
|
@ -26,7 +26,4 @@ memmap = "0.7.0"
|
|||||||
itertools = "0.8.0"
|
itertools = "0.8.0"
|
||||||
|
|
||||||
bellman_ce = { path = "../bellman" }
|
bellman_ce = { path = "../bellman" }
|
||||||
|
log = "0.4.8"
|
||||||
[features]
|
|
||||||
smalltest = []
|
|
||||||
|
|
||||||
|
@ -1,500 +0,0 @@
|
|||||||
//! This ceremony constructs the "powers of tau" for Jens Groth's 2016 zk-SNARK proving
|
|
||||||
//! system using the BLS12-381 pairing-friendly elliptic curve construction.
|
|
||||||
//!
|
|
||||||
//! # Overview
|
|
||||||
//!
|
|
||||||
//! Participants of the ceremony receive a "challenge" file containing:
|
|
||||||
//!
|
|
||||||
//! * the BLAKE2b hash of the last file entered into the transcript
|
|
||||||
//! * an `Accumulator` (with curve points encoded in uncompressed form for fast deserialization)
|
|
||||||
//!
|
|
||||||
//! The participant runs a tool which generates a random keypair (`PublicKey`, `PrivateKey`)
|
|
||||||
//! used for modifying the `Accumulator` from the "challenge" file. The keypair is then used to
|
|
||||||
//! transform the `Accumulator`, and a "response" file is generated containing:
|
|
||||||
//!
|
|
||||||
//! * the BLAKE2b hash of the "challenge" file (thus forming a hash chain over the entire transcript)
|
|
||||||
//! * an `Accumulator` (with curve points encoded in compressed form for fast uploading)
|
|
||||||
//! * the `PublicKey`
|
|
||||||
//!
|
|
||||||
//! This "challenge" file is entered into the protocol transcript. A given transcript is valid
|
|
||||||
//! if the transformations between consecutive `Accumulator`s verify with their respective
|
|
||||||
//! `PublicKey`s. Participants (and the public) can ensure that their contribution to the
|
|
||||||
//! `Accumulator` was accepted by ensuring the transcript contains their "response" file, ideally
|
|
||||||
//! by comparison of the BLAKE2b hash of the "response" file.
|
|
||||||
//!
|
|
||||||
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
|
|
||||||
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
|
|
||||||
//! public parameters for all circuits within a bounded size.
|
|
||||||
use bellman_ce::pairing::{
|
|
||||||
ff::{Field, PrimeField},
|
|
||||||
CurveAffine, CurveProjective, EncodedPoint, Engine, Wnaf,
|
|
||||||
};
|
|
||||||
use blake2::{Blake2b, Digest};
|
|
||||||
|
|
||||||
use generic_array::GenericArray;
|
|
||||||
|
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use typenum::consts::U64;
|
|
||||||
|
|
||||||
use super::keypair::{PrivateKey, PublicKey};
|
|
||||||
use super::parameters::{
|
|
||||||
CheckForCorrectness, DeserializationError, PowersOfTauParameters, UseCompression,
|
|
||||||
};
|
|
||||||
use super::utils::{hash_to_g2, power_pairs, same_ratio, write_point};
|
|
||||||
|
|
||||||
/// The `Accumulator` is an object that participants of the ceremony contribute
|
|
||||||
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
|
|
||||||
/// fixed generators, and additionally in G1 over two other generators of exponents
|
|
||||||
/// `alpha` and `beta` over those fixed generators. In other words:
|
|
||||||
///
|
|
||||||
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
|
||||||
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
|
||||||
#[derive(Eq, Clone)]
|
|
||||||
pub struct Accumulator<E: Engine, P: PowersOfTauParameters> {
|
|
||||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
|
||||||
pub tau_powers_g1: Vec<E::G1Affine>,
|
|
||||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
|
||||||
pub tau_powers_g2: Vec<E::G2Affine>,
|
|
||||||
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
|
|
||||||
pub alpha_tau_powers_g1: Vec<E::G1Affine>,
|
|
||||||
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
|
|
||||||
pub beta_tau_powers_g1: Vec<E::G1Affine>,
|
|
||||||
/// beta
|
|
||||||
pub beta_g2: E::G2Affine,
|
|
||||||
/// Keep parameters here
|
|
||||||
pub parameters: P,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> PartialEq for Accumulator<E, P> {
|
|
||||||
fn eq(&self, other: &Accumulator<E, P>) -> bool {
|
|
||||||
self.tau_powers_g1.eq(&other.tau_powers_g1)
|
|
||||||
&& self.tau_powers_g2.eq(&other.tau_powers_g2)
|
|
||||||
&& self.alpha_tau_powers_g1.eq(&other.alpha_tau_powers_g1)
|
|
||||||
&& self.beta_tau_powers_g1.eq(&other.beta_tau_powers_g1)
|
|
||||||
&& self.beta_g2 == other.beta_g2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|
||||||
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
|
|
||||||
pub fn new(parameters: P) -> Self {
|
|
||||||
Accumulator {
|
|
||||||
tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_G1_LENGTH],
|
|
||||||
tau_powers_g2: vec![E::G2Affine::one(); P::TAU_POWERS_LENGTH],
|
|
||||||
alpha_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
|
|
||||||
beta_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
|
|
||||||
beta_g2: E::G2Affine::one(),
|
|
||||||
parameters,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write the accumulator with some compression behavior.
|
|
||||||
pub fn serialize<W: Write>(
|
|
||||||
&self,
|
|
||||||
writer: &mut W,
|
|
||||||
compression: UseCompression,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
fn write_all<W: Write, C: CurveAffine>(
|
|
||||||
writer: &mut W,
|
|
||||||
c: &[C],
|
|
||||||
compression: UseCompression,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
for c in c {
|
|
||||||
write_point(writer, c, compression)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
write_all(writer, &self.tau_powers_g1, compression)?;
|
|
||||||
write_all(writer, &self.tau_powers_g2, compression)?;
|
|
||||||
write_all(writer, &self.alpha_tau_powers_g1, compression)?;
|
|
||||||
write_all(writer, &self.beta_tau_powers_g1, compression)?;
|
|
||||||
write_all(writer, &[self.beta_g2], compression)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read the accumulator from disk with some compression behavior. `checked`
|
|
||||||
/// indicates whether we should check it's a valid element of the group and
|
|
||||||
/// not the point at infinity.
|
|
||||||
pub fn deserialize<R: Read>(
|
|
||||||
reader: &mut R,
|
|
||||||
compression: UseCompression,
|
|
||||||
checked: CheckForCorrectness,
|
|
||||||
parameters: P,
|
|
||||||
) -> Result<Self, DeserializationError> {
|
|
||||||
fn read_all<EE: Engine, R: Read, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
|
|
||||||
reader: &mut R,
|
|
||||||
size: usize,
|
|
||||||
compression: UseCompression,
|
|
||||||
checked: CheckForCorrectness,
|
|
||||||
) -> Result<Vec<C>, DeserializationError> {
|
|
||||||
fn decompress_all<R: Read, ENC: EncodedPoint>(
|
|
||||||
reader: &mut R,
|
|
||||||
size: usize,
|
|
||||||
checked: CheckForCorrectness,
|
|
||||||
) -> Result<Vec<ENC::Affine>, DeserializationError> {
|
|
||||||
// Read the encoded elements
|
|
||||||
let mut res = vec![ENC::empty(); size];
|
|
||||||
|
|
||||||
for encoded in &mut res {
|
|
||||||
reader.read_exact(encoded.as_mut())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate space for the deserialized elements
|
|
||||||
let mut res_affine = vec![ENC::Affine::zero(); size];
|
|
||||||
|
|
||||||
let mut chunk_size = res.len() / num_cpus::get();
|
|
||||||
if chunk_size == 0 {
|
|
||||||
chunk_size = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If any of our threads encounter a deserialization/IO error, catch
|
|
||||||
// it with this.
|
|
||||||
let decoding_error = Arc::new(Mutex::new(None));
|
|
||||||
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for (source, target) in res
|
|
||||||
.chunks(chunk_size)
|
|
||||||
.zip(res_affine.chunks_mut(chunk_size))
|
|
||||||
{
|
|
||||||
let decoding_error = decoding_error.clone();
|
|
||||||
|
|
||||||
scope.spawn(move || {
|
|
||||||
for (source, target) in source.iter().zip(target.iter_mut()) {
|
|
||||||
match {
|
|
||||||
// If we're a participant, we don't need to check all of the
|
|
||||||
// elements in the accumulator, which saves a lot of time.
|
|
||||||
// The hash chain prevents this from being a problem: the
|
|
||||||
// transcript guarantees that the accumulator was properly
|
|
||||||
// formed.
|
|
||||||
match checked {
|
|
||||||
CheckForCorrectness::Yes => {
|
|
||||||
// Points at infinity are never expected in the accumulator
|
|
||||||
source.into_affine().map_err(|e| e.into()).and_then(
|
|
||||||
|source| {
|
|
||||||
if source.is_zero() {
|
|
||||||
Err(DeserializationError::PointAtInfinity)
|
|
||||||
} else {
|
|
||||||
Ok(source)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
CheckForCorrectness::No => {
|
|
||||||
source.into_affine_unchecked().map_err(|e| e.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} {
|
|
||||||
Ok(source) => {
|
|
||||||
*target = source;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
*decoding_error.lock().unwrap() = Some(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
match Arc::try_unwrap(decoding_error)
|
|
||||||
.unwrap()
|
|
||||||
.into_inner()
|
|
||||||
.unwrap()
|
|
||||||
{
|
|
||||||
Some(e) => Err(e),
|
|
||||||
None => Ok(res_affine),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match compression {
|
|
||||||
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
|
|
||||||
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let tau_powers_g1 =
|
|
||||||
read_all::<E, _, _>(reader, P::TAU_POWERS_G1_LENGTH, compression, checked)?;
|
|
||||||
let tau_powers_g2 =
|
|
||||||
read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
|
||||||
let alpha_tau_powers_g1 =
|
|
||||||
read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
|
||||||
let beta_tau_powers_g1 =
|
|
||||||
read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
|
||||||
let beta_g2 = read_all::<E, _, _>(reader, 1, compression, checked)?[0];
|
|
||||||
|
|
||||||
Ok(Accumulator {
|
|
||||||
tau_powers_g1,
|
|
||||||
tau_powers_g2,
|
|
||||||
alpha_tau_powers_g1,
|
|
||||||
beta_tau_powers_g1,
|
|
||||||
beta_g2,
|
|
||||||
parameters,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Transforms the accumulator with a private key.
|
|
||||||
pub fn transform(&mut self, key: &PrivateKey<E>) {
|
|
||||||
// Construct the powers of tau
|
|
||||||
let mut taupowers = vec![E::Fr::zero(); P::TAU_POWERS_G1_LENGTH];
|
|
||||||
let chunk_size = P::TAU_POWERS_G1_LENGTH / num_cpus::get();
|
|
||||||
|
|
||||||
// Construct exponents in parallel
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
|
|
||||||
scope.spawn(move || {
|
|
||||||
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
|
|
||||||
|
|
||||||
for t in taupowers {
|
|
||||||
*t = acc;
|
|
||||||
acc.mul_assign(&key.tau);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
|
||||||
/// exponent.
|
|
||||||
fn batch_exp<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
|
|
||||||
bases: &mut [C],
|
|
||||||
exp: &[C::Scalar],
|
|
||||||
coeff: Option<&C::Scalar>,
|
|
||||||
) {
|
|
||||||
assert_eq!(bases.len(), exp.len());
|
|
||||||
let mut projective = vec![C::Projective::zero(); bases.len()];
|
|
||||||
let chunk_size = bases.len() / num_cpus::get();
|
|
||||||
|
|
||||||
// Perform wNAF over multiple cores, placing results into `projective`.
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for ((bases, exp), projective) in bases
|
|
||||||
.chunks_mut(chunk_size)
|
|
||||||
.zip(exp.chunks(chunk_size))
|
|
||||||
.zip(projective.chunks_mut(chunk_size))
|
|
||||||
{
|
|
||||||
scope.spawn(move || {
|
|
||||||
let mut wnaf = Wnaf::new();
|
|
||||||
|
|
||||||
for ((base, exp), projective) in
|
|
||||||
bases.iter_mut().zip(exp.iter()).zip(projective.iter_mut())
|
|
||||||
{
|
|
||||||
let mut exp = *exp;
|
|
||||||
if let Some(coeff) = coeff {
|
|
||||||
exp.mul_assign(coeff);
|
|
||||||
}
|
|
||||||
|
|
||||||
*projective =
|
|
||||||
wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Perform batch normalization
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for projective in projective.chunks_mut(chunk_size) {
|
|
||||||
scope.spawn(move || {
|
|
||||||
C::Projective::batch_normalization(projective);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Turn it all back into affine points
|
|
||||||
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
|
||||||
*affine = projective.into_affine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
batch_exp::<E, _>(&mut self.tau_powers_g1, &taupowers[0..], None);
|
|
||||||
batch_exp::<E, _>(
|
|
||||||
&mut self.tau_powers_g2,
|
|
||||||
&taupowers[0..P::TAU_POWERS_LENGTH],
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
batch_exp::<E, _>(
|
|
||||||
&mut self.alpha_tau_powers_g1,
|
|
||||||
&taupowers[0..P::TAU_POWERS_LENGTH],
|
|
||||||
Some(&key.alpha),
|
|
||||||
);
|
|
||||||
batch_exp::<E, _>(
|
|
||||||
&mut self.beta_tau_powers_g1,
|
|
||||||
&taupowers[0..P::TAU_POWERS_LENGTH],
|
|
||||||
Some(&key.beta),
|
|
||||||
);
|
|
||||||
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
|
||||||
pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(
|
|
||||||
before: &Accumulator<E, P>,
|
|
||||||
after: &Accumulator<E, P>,
|
|
||||||
key: &PublicKey<E>,
|
|
||||||
digest: &[u8],
|
|
||||||
) -> bool {
|
|
||||||
assert_eq!(digest.len(), 64);
|
|
||||||
|
|
||||||
let compute_g2_s = |g1_s: E::G1Affine, g1_s_x: E::G1Affine, personalization: u8| {
|
|
||||||
let mut h = Blake2b::default();
|
|
||||||
h.input(&[personalization]);
|
|
||||||
h.input(digest);
|
|
||||||
h.input(g1_s.into_uncompressed().as_ref());
|
|
||||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
|
||||||
hash_to_g2::<E>(h.result().as_ref()).into_affine()
|
|
||||||
};
|
|
||||||
|
|
||||||
let tau_g2_s = compute_g2_s(key.tau_g1.0, key.tau_g1.1, 0);
|
|
||||||
let alpha_g2_s = compute_g2_s(key.alpha_g1.0, key.alpha_g1.1, 1);
|
|
||||||
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
|
|
||||||
|
|
||||||
// Check the proofs-of-knowledge for tau/alpha/beta
|
|
||||||
|
|
||||||
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
|
|
||||||
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the correctness of the generators for tau powers
|
|
||||||
if after.tau_powers_g1[0] != E::G1Affine::one() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if after.tau_powers_g2[0] != E::G2Affine::one() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Did the participant multiply the previous tau by the new one?
|
|
||||||
if !same_ratio(
|
|
||||||
(before.tau_powers_g1[1], after.tau_powers_g1[1]),
|
|
||||||
(tau_g2_s, key.tau_g2),
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Did the participant multiply the previous alpha by the new one?
|
|
||||||
if !same_ratio(
|
|
||||||
(before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]),
|
|
||||||
(alpha_g2_s, key.alpha_g2),
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Did the participant multiply the previous beta by the new one?
|
|
||||||
if !same_ratio(
|
|
||||||
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
|
|
||||||
(beta_g2_s, key.beta_g2),
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(
|
|
||||||
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
|
|
||||||
(before.beta_g2, after.beta_g2),
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Are the powers of tau correct?
|
|
||||||
if !same_ratio(
|
|
||||||
power_pairs(&after.tau_powers_g1),
|
|
||||||
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(
|
|
||||||
power_pairs(&after.tau_powers_g2),
|
|
||||||
(after.tau_powers_g1[0], after.tau_powers_g1[1]),
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(
|
|
||||||
power_pairs(&after.alpha_tau_powers_g1),
|
|
||||||
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(
|
|
||||||
power_pairs(&after.beta_tau_powers_g1),
|
|
||||||
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Abstraction over a reader which hashes the data being read.
|
|
||||||
pub struct HashReader<R: Read> {
|
|
||||||
reader: R,
|
|
||||||
hasher: Blake2b,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R: Read> HashReader<R> {
|
|
||||||
/// Construct a new `HashReader` given an existing `reader` by value.
|
|
||||||
pub fn new(reader: R) -> Self {
|
|
||||||
HashReader {
|
|
||||||
reader,
|
|
||||||
hasher: Blake2b::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Destroy this reader and return the hash of what was read.
|
|
||||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
|
||||||
self.hasher.result()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R: Read> Read for HashReader<R> {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
let bytes = self.reader.read(buf)?;
|
|
||||||
|
|
||||||
if bytes > 0 {
|
|
||||||
self.hasher.input(&buf[0..bytes]);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Abstraction over a writer which hashes the data being written.
|
|
||||||
pub struct HashWriter<W: Write> {
|
|
||||||
writer: W,
|
|
||||||
hasher: Blake2b,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<W: Write> HashWriter<W> {
|
|
||||||
/// Construct a new `HashWriter` given an existing `writer` by value.
|
|
||||||
pub fn new(writer: W) -> Self {
|
|
||||||
HashWriter {
|
|
||||||
writer,
|
|
||||||
hasher: Blake2b::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Destroy this writer and return the hash of what was written.
|
|
||||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
|
||||||
self.hasher.result()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<W: Write> Write for HashWriter<W> {
|
|
||||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
|
||||||
let bytes = self.writer.write(buf)?;
|
|
||||||
|
|
||||||
if bytes > 0 {
|
|
||||||
self.hasher.input(&buf[0..bytes]);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&mut self) -> io::Result<()> {
|
|
||||||
self.writer.flush()
|
|
||||||
}
|
|
||||||
}
|
|
@ -3,6 +3,7 @@
|
|||||||
use bellman_ce::pairing::ff::{Field, PrimeField};
|
use bellman_ce::pairing::ff::{Field, PrimeField};
|
||||||
use bellman_ce::pairing::*;
|
use bellman_ce::pairing::*;
|
||||||
use blake2::{Blake2b, Digest};
|
use blake2::{Blake2b, Digest};
|
||||||
|
use log::{error, info};
|
||||||
|
|
||||||
use generic_array::GenericArray;
|
use generic_array::GenericArray;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
@ -14,7 +15,7 @@ use typenum::consts::U64;
|
|||||||
|
|
||||||
use super::keypair::{PrivateKey, PublicKey};
|
use super::keypair::{PrivateKey, PublicKey};
|
||||||
use super::parameters::{
|
use super::parameters::{
|
||||||
CheckForCorrectness, DeserializationError, ElementType, PowersOfTauParameters, UseCompression,
|
CeremonyParams, CheckForCorrectness, DeserializationError, ElementType, UseCompression,
|
||||||
};
|
};
|
||||||
use super::utils::{blank_hash, compute_g2_s, power_pairs, same_ratio};
|
use super::utils::{blank_hash, compute_g2_s, power_pairs, same_ratio};
|
||||||
|
|
||||||
@ -31,7 +32,7 @@ pub enum AccumulatorState {
|
|||||||
///
|
///
|
||||||
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
||||||
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
||||||
pub struct BatchedAccumulator<E: Engine, P: PowersOfTauParameters> {
|
pub struct BatchedAccumulator<'a, E: Engine> {
|
||||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
||||||
pub tau_powers_g1: Vec<E::G1Affine>,
|
pub tau_powers_g1: Vec<E::G1Affine>,
|
||||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
||||||
@ -44,11 +45,11 @@ pub struct BatchedAccumulator<E: Engine, P: PowersOfTauParameters> {
|
|||||||
pub beta_g2: E::G2Affine,
|
pub beta_g2: E::G2Affine,
|
||||||
/// Hash chain hash
|
/// Hash chain hash
|
||||||
pub hash: GenericArray<u8, U64>,
|
pub hash: GenericArray<u8, U64>,
|
||||||
/// Keep parameters here as a marker
|
/// The parameters used for the setup of this accumulator
|
||||||
marker: std::marker::PhantomData<P>,
|
pub parameters: &'a CeremonyParams,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
impl<'a, E: Engine> BatchedAccumulator<'a, E> {
|
||||||
/// Calculate the contribution hash from the resulting file. Original powers of tau implementation
|
/// Calculate the contribution hash from the resulting file. Original powers of tau implementation
|
||||||
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
|
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
|
||||||
/// implementation now writes without a particular order, so plain recalculation at the end
|
/// implementation now writes without a particular order, so plain recalculation at the end
|
||||||
@ -61,10 +62,8 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
hasher.result()
|
hasher.result()
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
pub fn empty(parameters: &'a CeremonyParams) -> Self {
|
||||||
pub fn empty() -> Self {
|
|
||||||
Self {
|
Self {
|
||||||
tau_powers_g1: vec![],
|
tau_powers_g1: vec![],
|
||||||
tau_powers_g2: vec![],
|
tau_powers_g2: vec![],
|
||||||
@ -72,32 +71,30 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
beta_tau_powers_g1: vec![],
|
beta_tau_powers_g1: vec![],
|
||||||
beta_g2: E::G2Affine::zero(),
|
beta_g2: E::G2Affine::zero(),
|
||||||
hash: blank_hash(),
|
hash: blank_hash(),
|
||||||
marker: std::marker::PhantomData::<P> {},
|
parameters,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
fn g1_size(&self, compression: UseCompression) -> usize {
|
||||||
fn g1_size(compression: UseCompression) -> usize {
|
|
||||||
match compression {
|
match compression {
|
||||||
UseCompression::Yes => P::G1_COMPRESSED_BYTE_SIZE,
|
UseCompression::Yes => self.parameters.curve.g1_compressed,
|
||||||
UseCompression::No => P::G1_UNCOMPRESSED_BYTE_SIZE,
|
UseCompression::No => self.parameters.curve.g1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn g2_size(compression: UseCompression) -> usize {
|
fn g2_size(&self, compression: UseCompression) -> usize {
|
||||||
match compression {
|
match compression {
|
||||||
UseCompression::Yes => P::G2_COMPRESSED_BYTE_SIZE,
|
UseCompression::Yes => self.parameters.curve.g2_compressed,
|
||||||
UseCompression::No => P::G2_UNCOMPRESSED_BYTE_SIZE,
|
UseCompression::No => self.parameters.curve.g2,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_size(element_type: ElementType, compression: UseCompression) -> usize {
|
fn get_size(&self, element_type: ElementType, compression: UseCompression) -> usize {
|
||||||
match element_type {
|
match element_type {
|
||||||
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => {
|
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => {
|
||||||
Self::g1_size(compression)
|
self.g1_size(compression)
|
||||||
}
|
}
|
||||||
ElementType::BetaG2 | ElementType::TauG2 => Self::g2_size(compression),
|
ElementType::BetaG2 | ElementType::TauG2 => self.g2_size(compression),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,24 +108,25 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
/// Public key appended to the end of file, but it's irrelevant for an accumulator itself
|
/// Public key appended to the end of file, but it's irrelevant for an accumulator itself
|
||||||
|
|
||||||
fn calculate_mmap_position(
|
fn calculate_mmap_position(
|
||||||
|
&self,
|
||||||
index: usize,
|
index: usize,
|
||||||
element_type: ElementType,
|
element_type: ElementType,
|
||||||
compression: UseCompression,
|
compression: UseCompression,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
let g1_size = Self::g1_size(compression);
|
let g1_size = self.g1_size(compression);
|
||||||
let g2_size = Self::g2_size(compression);
|
let g2_size = self.g2_size(compression);
|
||||||
let required_tau_g1_power = P::TAU_POWERS_G1_LENGTH;
|
let required_tau_g1_power = self.parameters.powers_g1_length;
|
||||||
let required_power = P::TAU_POWERS_LENGTH;
|
let required_power = self.parameters.powers_length;
|
||||||
|
let parameters = &self.parameters;
|
||||||
let position = match element_type {
|
let position = match element_type {
|
||||||
ElementType::TauG1 => {
|
ElementType::TauG1 => {
|
||||||
let mut position = 0;
|
let mut position = 0;
|
||||||
position += g1_size * index;
|
position += g1_size * index;
|
||||||
assert!(
|
assert!(
|
||||||
index < P::TAU_POWERS_G1_LENGTH,
|
index < parameters.powers_g1_length,
|
||||||
format!(
|
format!(
|
||||||
"Index of TauG1 element written must not exceed {}, while it's {}",
|
"Index of TauG1 element written must not exceed {}, while it's {}",
|
||||||
P::TAU_POWERS_G1_LENGTH,
|
parameters.powers_g1_length, index
|
||||||
index
|
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -138,11 +136,10 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
let mut position = 0;
|
let mut position = 0;
|
||||||
position += g1_size * required_tau_g1_power;
|
position += g1_size * required_tau_g1_power;
|
||||||
assert!(
|
assert!(
|
||||||
index < P::TAU_POWERS_LENGTH,
|
index < required_power,
|
||||||
format!(
|
format!(
|
||||||
"Index of TauG2 element written must not exceed {}, while it's {}",
|
"Index of TauG2 element written must not exceed {}, while it's {}",
|
||||||
P::TAU_POWERS_LENGTH,
|
required_power, index
|
||||||
index
|
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
position += g2_size * index;
|
position += g2_size * index;
|
||||||
@ -154,11 +151,10 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
position += g1_size * required_tau_g1_power;
|
position += g1_size * required_tau_g1_power;
|
||||||
position += g2_size * required_power;
|
position += g2_size * required_power;
|
||||||
assert!(
|
assert!(
|
||||||
index < P::TAU_POWERS_LENGTH,
|
index < required_power,
|
||||||
format!(
|
format!(
|
||||||
"Index of AlphaG1 element written must not exceed {}, while it's {}",
|
"Index of AlphaG1 element written must not exceed {}, while it's {}",
|
||||||
P::TAU_POWERS_LENGTH,
|
required_power, index
|
||||||
index
|
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
position += g1_size * index;
|
position += g1_size * index;
|
||||||
@ -171,11 +167,10 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
position += g2_size * required_power;
|
position += g2_size * required_power;
|
||||||
position += g1_size * required_power;
|
position += g1_size * required_power;
|
||||||
assert!(
|
assert!(
|
||||||
index < P::TAU_POWERS_LENGTH,
|
index < required_power,
|
||||||
format!(
|
format!(
|
||||||
"Index of BetaG1 element written must not exceed {}, while it's {}",
|
"Index of BetaG1 element written must not exceed {}, while it's {}",
|
||||||
P::TAU_POWERS_LENGTH,
|
required_power, index
|
||||||
index
|
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
position += g1_size * index;
|
position += g1_size * index;
|
||||||
@ -193,14 +188,14 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
position + P::HASH_SIZE
|
position + self.parameters.hash_size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
/// Verifies a transformation of the `BatchedAccumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||||
pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(
|
pub fn verify_transform<E: Engine>(
|
||||||
before: &BatchedAccumulator<E, P>,
|
before: &BatchedAccumulator<E>,
|
||||||
after: &BatchedAccumulator<E, P>,
|
after: &BatchedAccumulator<E>,
|
||||||
key: &PublicKey<E>,
|
key: &PublicKey<E>,
|
||||||
digest: &[u8],
|
digest: &[u8],
|
||||||
) -> bool {
|
) -> bool {
|
||||||
@ -290,9 +285,9 @@ pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
impl<'a, E: Engine> BatchedAccumulator<'a, E> {
|
||||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments, clippy::cognitive_complexity)]
|
||||||
pub fn verify_transformation(
|
pub fn verify_transformation(
|
||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
output_map: &Mmap,
|
output_map: &Mmap,
|
||||||
@ -302,6 +297,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
output_is_compressed: UseCompression,
|
output_is_compressed: UseCompression,
|
||||||
check_input_for_correctness: CheckForCorrectness,
|
check_input_for_correctness: CheckForCorrectness,
|
||||||
check_output_for_correctness: CheckForCorrectness,
|
check_output_for_correctness: CheckForCorrectness,
|
||||||
|
parameters: &'a CeremonyParams,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
use itertools::MinMaxResult::MinMax;
|
use itertools::MinMaxResult::MinMax;
|
||||||
assert_eq!(digest.len(), 64);
|
assert_eq!(digest.len(), 64);
|
||||||
@ -314,22 +310,22 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
|
|
||||||
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
|
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
|
||||||
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
||||||
println!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)");
|
error!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
|
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
|
||||||
println!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)");
|
error!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
|
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
|
||||||
println!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)");
|
error!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load accumulators AND perform computations
|
// Load accumulators AND perform computations
|
||||||
|
|
||||||
let mut before = Self::empty();
|
let mut before = Self::empty(parameters);
|
||||||
let mut after = Self::empty();
|
let mut after = Self::empty(parameters);
|
||||||
|
|
||||||
// these checks only touch a part of the accumulator, so read two elements
|
// these checks only touch a part of the accumulator, so read two elements
|
||||||
|
|
||||||
@ -356,11 +352,11 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
|
|
||||||
// Check the correctness of the generators for tau powers
|
// Check the correctness of the generators for tau powers
|
||||||
if after.tau_powers_g1[0] != E::G1Affine::one() {
|
if after.tau_powers_g1[0] != E::G1Affine::one() {
|
||||||
println!("tau_powers_g1[0] != 1");
|
error!("tau_powers_g1[0] != 1");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if after.tau_powers_g2[0] != E::G2Affine::one() {
|
if after.tau_powers_g2[0] != E::G2Affine::one() {
|
||||||
println!("tau_powers_g2[0] != 1");
|
error!("tau_powers_g2[0] != 1");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,7 +365,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
(before.tau_powers_g1[1], after.tau_powers_g1[1]),
|
(before.tau_powers_g1[1], after.tau_powers_g1[1]),
|
||||||
(tau_g2_s, key.tau_g2),
|
(tau_g2_s, key.tau_g2),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)");
|
error!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -378,7 +374,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
(before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]),
|
(before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]),
|
||||||
(alpha_g2_s, key.alpha_g2),
|
(alpha_g2_s, key.alpha_g2),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)");
|
error!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -387,14 +383,14 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
|
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
|
||||||
(beta_g2_s, key.beta_g2),
|
(beta_g2_s, key.beta_g2),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)");
|
error!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(
|
if !same_ratio(
|
||||||
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
|
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
|
||||||
(before.beta_g2, after.beta_g2),
|
(before.beta_g2, after.beta_g2),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)");
|
error!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -408,16 +404,11 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
// one does not need to care about some overlapping
|
// one does not need to care about some overlapping
|
||||||
|
|
||||||
let mut tau_powers_last_first_chunks = vec![E::G1Affine::zero(); 2];
|
let mut tau_powers_last_first_chunks = vec![E::G1Affine::zero(); 2];
|
||||||
for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
|
let tau_powers_length = parameters.powers_length;
|
||||||
|
for chunk in &(0..tau_powers_length).chunks(parameters.batch_size) {
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
// extra 1 to ensure intersection between chunks and ensure we don't overflow
|
// extra 1 to ensure intersection between chunks and ensure we don't overflow
|
||||||
let size = end - start
|
let size = end - start + 1 + if end == tau_powers_length - 1 { 0 } else { 1 };
|
||||||
+ 1
|
|
||||||
+ if end == P::TAU_POWERS_LENGTH - 1 {
|
|
||||||
0
|
|
||||||
} else {
|
|
||||||
1
|
|
||||||
};
|
|
||||||
before
|
before
|
||||||
.read_chunk(
|
.read_chunk(
|
||||||
start,
|
start,
|
||||||
@ -452,47 +443,46 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
power_pairs(&after.tau_powers_g1),
|
power_pairs(&after.tau_powers_g1),
|
||||||
(tau_powers_g2_0, tau_powers_g2_1),
|
(tau_powers_g2_0, tau_powers_g2_1),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(
|
if !same_ratio(
|
||||||
power_pairs(&after.tau_powers_g2),
|
power_pairs(&after.tau_powers_g2),
|
||||||
(tau_powers_g1_0, tau_powers_g1_1),
|
(tau_powers_g1_0, tau_powers_g1_1),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)");
|
error!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(
|
if !same_ratio(
|
||||||
power_pairs(&after.alpha_tau_powers_g1),
|
power_pairs(&after.alpha_tau_powers_g1),
|
||||||
(tau_powers_g2_0, tau_powers_g2_1),
|
(tau_powers_g2_0, tau_powers_g2_1),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
error!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(
|
if !same_ratio(
|
||||||
power_pairs(&after.beta_tau_powers_g1),
|
power_pairs(&after.beta_tau_powers_g1),
|
||||||
(tau_powers_g2_0, tau_powers_g2_1),
|
(tau_powers_g2_0, tau_powers_g2_1),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
error!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if end == P::TAU_POWERS_LENGTH - 1 {
|
if end == tau_powers_length - 1 {
|
||||||
tau_powers_last_first_chunks[0] = after.tau_powers_g1[size - 1];
|
tau_powers_last_first_chunks[0] = after.tau_powers_g1[size - 1];
|
||||||
}
|
}
|
||||||
println!("Done processing {} powers of tau", end);
|
info!("Done processing {} powers of tau", end);
|
||||||
} else {
|
} else {
|
||||||
panic!("Chunk does not have a min and max");
|
panic!("Chunk does not have a min and max");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for chunk in
|
for chunk in &(tau_powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
|
||||||
&(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
|
|
||||||
{
|
{
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
// extra 1 to ensure intersection between chunks and ensure we don't overflow
|
// extra 1 to ensure intersection between chunks and ensure we don't overflow
|
||||||
let size = end - start
|
let size = end - start
|
||||||
+ 1
|
+ 1
|
||||||
+ if end == P::TAU_POWERS_G1_LENGTH - 1 {
|
+ if end == parameters.powers_g1_length - 1 {
|
||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
1
|
1
|
||||||
@ -542,13 +532,13 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
power_pairs(&after.tau_powers_g1),
|
power_pairs(&after.tau_powers_g1),
|
||||||
(tau_powers_g2_0, tau_powers_g2_1),
|
(tau_powers_g2_0, tau_powers_g2_1),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution");
|
error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if start == P::TAU_POWERS_LENGTH {
|
if start == parameters.powers_length {
|
||||||
tau_powers_last_first_chunks[1] = after.tau_powers_g1[0];
|
tau_powers_last_first_chunks[1] = after.tau_powers_g1[0];
|
||||||
}
|
}
|
||||||
println!("Done processing {} powers of tau", end);
|
info!("Done processing {} powers of tau", end);
|
||||||
} else {
|
} else {
|
||||||
panic!("Chunk does not have a min and max");
|
panic!("Chunk does not have a min and max");
|
||||||
}
|
}
|
||||||
@ -558,7 +548,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
power_pairs(&tau_powers_last_first_chunks),
|
power_pairs(&tau_powers_last_first_chunks),
|
||||||
(tau_powers_g2_0, tau_powers_g2_1),
|
(tau_powers_g2_0, tau_powers_g2_1),
|
||||||
) {
|
) {
|
||||||
println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in TauG1 contribution intersection");
|
error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in TauG1 contribution intersection");
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
@ -567,12 +557,13 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
output_map: &mut MmapMut,
|
output_map: &mut MmapMut,
|
||||||
check_input_for_correctness: CheckForCorrectness,
|
check_input_for_correctness: CheckForCorrectness,
|
||||||
|
parameters: &'a CeremonyParams,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
use itertools::MinMaxResult::MinMax;
|
use itertools::MinMaxResult::MinMax;
|
||||||
|
|
||||||
let mut accumulator = Self::empty();
|
let mut accumulator = Self::empty(parameters);
|
||||||
|
|
||||||
for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
|
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let size = end - start + 1;
|
let size = end - start + 1;
|
||||||
accumulator
|
accumulator
|
||||||
@ -596,7 +587,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for chunk in
|
for chunk in
|
||||||
&(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
|
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
|
||||||
{
|
{
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let size = end - start + 1;
|
let size = end - start + 1;
|
||||||
@ -643,10 +634,11 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
check_input_for_correctness: CheckForCorrectness,
|
check_input_for_correctness: CheckForCorrectness,
|
||||||
compression: UseCompression,
|
compression: UseCompression,
|
||||||
) -> io::Result<BatchedAccumulator<E, P>> {
|
parameters: &'a CeremonyParams,
|
||||||
|
) -> io::Result<BatchedAccumulator<'a, E>> {
|
||||||
use itertools::MinMaxResult::MinMax;
|
use itertools::MinMaxResult::MinMax;
|
||||||
|
|
||||||
let mut accumulator = Self::empty();
|
let mut accumulator = Self::empty(parameters);
|
||||||
|
|
||||||
let mut tau_powers_g1 = vec![];
|
let mut tau_powers_g1 = vec![];
|
||||||
let mut tau_powers_g2 = vec![];
|
let mut tau_powers_g2 = vec![];
|
||||||
@ -654,7 +646,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
let mut beta_tau_powers_g1 = vec![];
|
let mut beta_tau_powers_g1 = vec![];
|
||||||
let mut beta_g2 = vec![];
|
let mut beta_g2 = vec![];
|
||||||
|
|
||||||
for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
|
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let size = end - start + 1;
|
let size = end - start + 1;
|
||||||
accumulator
|
accumulator
|
||||||
@ -684,7 +676,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for chunk in
|
for chunk in
|
||||||
&(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
|
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
|
||||||
{
|
{
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let size = end - start + 1;
|
let size = end - start + 1;
|
||||||
@ -734,7 +726,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
beta_tau_powers_g1,
|
beta_tau_powers_g1,
|
||||||
beta_g2: beta_g2[0],
|
beta_g2: beta_g2[0],
|
||||||
hash: blank_hash(),
|
hash: blank_hash(),
|
||||||
marker: std::marker::PhantomData::<P> {},
|
parameters,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -742,19 +734,20 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
output_map: &mut MmapMut,
|
output_map: &mut MmapMut,
|
||||||
compression: UseCompression,
|
compression: UseCompression,
|
||||||
|
parameters: &CeremonyParams,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
use itertools::MinMaxResult::MinMax;
|
use itertools::MinMaxResult::MinMax;
|
||||||
|
|
||||||
for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
|
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let mut tmp_acc = BatchedAccumulator::<E, P> {
|
let mut tmp_acc = BatchedAccumulator::<E> {
|
||||||
tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(),
|
tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(),
|
||||||
tau_powers_g2: (&self.tau_powers_g2[start..=end]).to_vec(),
|
tau_powers_g2: (&self.tau_powers_g2[start..=end]).to_vec(),
|
||||||
alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..=end]).to_vec(),
|
alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..=end]).to_vec(),
|
||||||
beta_tau_powers_g1: (&self.beta_tau_powers_g1[start..=end]).to_vec(),
|
beta_tau_powers_g1: (&self.beta_tau_powers_g1[start..=end]).to_vec(),
|
||||||
beta_g2: self.beta_g2,
|
beta_g2: self.beta_g2,
|
||||||
hash: self.hash,
|
hash: self.hash,
|
||||||
marker: std::marker::PhantomData::<P> {},
|
parameters,
|
||||||
};
|
};
|
||||||
tmp_acc.write_chunk(start, compression, output_map)?;
|
tmp_acc.write_chunk(start, compression, output_map)?;
|
||||||
} else {
|
} else {
|
||||||
@ -763,17 +756,17 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for chunk in
|
for chunk in
|
||||||
&(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
|
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
|
||||||
{
|
{
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let mut tmp_acc = BatchedAccumulator::<E, P> {
|
let mut tmp_acc = BatchedAccumulator::<E> {
|
||||||
tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(),
|
tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(),
|
||||||
tau_powers_g2: vec![],
|
tau_powers_g2: vec![],
|
||||||
alpha_tau_powers_g1: vec![],
|
alpha_tau_powers_g1: vec![],
|
||||||
beta_tau_powers_g1: vec![],
|
beta_tau_powers_g1: vec![],
|
||||||
beta_g2: self.beta_g2,
|
beta_g2: self.beta_g2,
|
||||||
hash: self.hash,
|
hash: self.hash,
|
||||||
marker: std::marker::PhantomData::<P> {},
|
parameters,
|
||||||
};
|
};
|
||||||
tmp_acc.write_chunk(start, compression, output_map)?;
|
tmp_acc.write_chunk(start, compression, output_map)?;
|
||||||
} else {
|
} else {
|
||||||
@ -783,9 +776,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|
||||||
pub fn read_chunk(
|
pub fn read_chunk(
|
||||||
&mut self,
|
&mut self,
|
||||||
from: usize,
|
from: usize,
|
||||||
@ -924,7 +915,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
let index = from + i;
|
let index = from + i;
|
||||||
match element_type {
|
match element_type {
|
||||||
ElementType::TauG1 => {
|
ElementType::TauG1 => {
|
||||||
if index >= P::TAU_POWERS_G1_LENGTH {
|
if index >= self.parameters.powers_g1_length {
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -932,13 +923,13 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
| ElementType::BetaG1
|
| ElementType::BetaG1
|
||||||
| ElementType::BetaG2
|
| ElementType::BetaG2
|
||||||
| ElementType::TauG2 => {
|
| ElementType::TauG2 => {
|
||||||
if index >= P::TAU_POWERS_LENGTH {
|
if index >= self.parameters.powers_length {
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let position = Self::calculate_mmap_position(index, element_type, compression);
|
let position = self.calculate_mmap_position(index, element_type, compression);
|
||||||
let element_size = Self::get_size(element_type, compression);
|
let element_size = self.get_size(element_type, compression);
|
||||||
let mut memory_slice = input_map
|
let mut memory_slice = input_map
|
||||||
.get(position..position + element_size)
|
.get(position..position + element_size)
|
||||||
.expect("must read point data from file");
|
.expect("must read point data from file");
|
||||||
@ -1021,9 +1012,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
None => Ok(res_affine),
|
None => Ok(res_affine),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|
||||||
fn write_all(
|
fn write_all(
|
||||||
&mut self,
|
&mut self,
|
||||||
chunk_start: usize,
|
chunk_start: usize,
|
||||||
@ -1086,7 +1075,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
{
|
{
|
||||||
match element_type {
|
match element_type {
|
||||||
ElementType::TauG1 => {
|
ElementType::TauG1 => {
|
||||||
if index >= P::TAU_POWERS_G1_LENGTH {
|
if index >= self.parameters.powers_g1_length {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1094,7 +1083,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
| ElementType::BetaG1
|
| ElementType::BetaG1
|
||||||
| ElementType::BetaG2
|
| ElementType::BetaG2
|
||||||
| ElementType::TauG2 => {
|
| ElementType::TauG2 => {
|
||||||
if index >= P::TAU_POWERS_LENGTH {
|
if index >= self.parameters.powers_length {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1102,12 +1091,12 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
|
|
||||||
match compression {
|
match compression {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
let position = Self::calculate_mmap_position(index, element_type, compression);
|
let position = self.calculate_mmap_position(index, element_type, compression);
|
||||||
// let size = self.get_size(element_type, compression);
|
// let size = self.get_size(element_type, compression);
|
||||||
(&mut output_map[position..]).write_all(p.into_compressed().as_ref())?;
|
(&mut output_map[position..]).write_all(p.into_compressed().as_ref())?;
|
||||||
}
|
}
|
||||||
UseCompression::No => {
|
UseCompression::No => {
|
||||||
let position = Self::calculate_mmap_position(index, element_type, compression);
|
let position = self.calculate_mmap_position(index, element_type, compression);
|
||||||
// let size = self.get_size(element_type, compression);
|
// let size = self.get_size(element_type, compression);
|
||||||
(&mut output_map[position..]).write_all(p.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(p.into_uncompressed().as_ref())?;
|
||||||
}
|
}
|
||||||
@ -1124,7 +1113,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
output_map: &mut MmapMut,
|
output_map: &mut MmapMut,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
self.write_all(chunk_start, compression, ElementType::TauG1, output_map)?;
|
self.write_all(chunk_start, compression, ElementType::TauG1, output_map)?;
|
||||||
if chunk_start < P::TAU_POWERS_LENGTH {
|
if chunk_start < self.parameters.powers_length {
|
||||||
self.write_all(chunk_start, compression, ElementType::TauG2, output_map)?;
|
self.write_all(chunk_start, compression, ElementType::TauG2, output_map)?;
|
||||||
self.write_all(chunk_start, compression, ElementType::AlphaG1, output_map)?;
|
self.write_all(chunk_start, compression, ElementType::AlphaG1, output_map)?;
|
||||||
self.write_all(chunk_start, compression, ElementType::BetaG1, output_map)?;
|
self.write_all(chunk_start, compression, ElementType::BetaG1, output_map)?;
|
||||||
@ -1133,9 +1122,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|
||||||
/// Transforms the accumulator with a private key.
|
/// Transforms the accumulator with a private key.
|
||||||
/// Due to large amount of data in a previous accumulator even in the compressed form
|
/// Due to large amount of data in a previous accumulator even in the compressed form
|
||||||
/// this function can now work on compressed input. Output can be made in any form
|
/// this function can now work on compressed input. Output can be made in any form
|
||||||
@ -1149,6 +1136,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
compress_the_output: UseCompression,
|
compress_the_output: UseCompression,
|
||||||
check_input_for_correctness: CheckForCorrectness,
|
check_input_for_correctness: CheckForCorrectness,
|
||||||
key: &PrivateKey<E>,
|
key: &PrivateKey<E>,
|
||||||
|
parameters: &'a CeremonyParams,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
||||||
/// exponent.
|
/// exponent.
|
||||||
@ -1205,11 +1193,11 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut accumulator = Self::empty();
|
let mut accumulator = Self::empty(parameters);
|
||||||
|
|
||||||
use itertools::MinMaxResult::MinMax;
|
use itertools::MinMaxResult::MinMax;
|
||||||
|
|
||||||
for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
|
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let size = end - start + 1;
|
let size = end - start + 1;
|
||||||
accumulator
|
accumulator
|
||||||
@ -1258,14 +1246,14 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
"your contribution happened to produce a point at infinity, please re-run"
|
"your contribution happened to produce a point at infinity, please re-run"
|
||||||
);
|
);
|
||||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||||
println!("Done processing {} powers of tau", end);
|
info!("Done processing {} powers of tau", end);
|
||||||
} else {
|
} else {
|
||||||
panic!("Chunk does not have a min and max");
|
panic!("Chunk does not have a min and max");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for chunk in
|
for chunk in
|
||||||
&(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
|
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
|
||||||
{
|
{
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let size = end - start + 1;
|
let size = end - start + 1;
|
||||||
@ -1307,7 +1295,7 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
//assert!(!accumulator.beta_g2.is_zero(), "your contribution happened to produce a point at infinity, please re-run");
|
//assert!(!accumulator.beta_g2.is_zero(), "your contribution happened to produce a point at infinity, please re-run");
|
||||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||||
|
|
||||||
println!("Done processing {} powers of tau", end);
|
info!("Done processing {} powers of tau", end);
|
||||||
} else {
|
} else {
|
||||||
panic!("Chunk does not have a min and max");
|
panic!("Chunk does not have a min and max");
|
||||||
}
|
}
|
||||||
@ -1315,17 +1303,17 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|
||||||
/// Transforms the accumulator with a private key.
|
/// Transforms the accumulator with a private key.
|
||||||
pub fn generate_initial(
|
pub fn generate_initial(
|
||||||
output_map: &mut MmapMut,
|
output_map: &mut MmapMut,
|
||||||
compress_the_output: UseCompression,
|
compress_the_output: UseCompression,
|
||||||
|
parameters: &'a CeremonyParams,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
use itertools::MinMaxResult::MinMax;
|
use itertools::MinMaxResult::MinMax;
|
||||||
|
|
||||||
for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
|
// Write the first Tau powers in chunks where every initial element is a G1 or G2 `one`
|
||||||
|
for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let size = end - start + 1;
|
let size = end - start + 1;
|
||||||
let mut accumulator = Self {
|
let mut accumulator = Self {
|
||||||
@ -1335,18 +1323,19 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
beta_tau_powers_g1: vec![E::G1Affine::one(); size],
|
beta_tau_powers_g1: vec![E::G1Affine::one(); size],
|
||||||
beta_g2: E::G2Affine::one(),
|
beta_g2: E::G2Affine::one(),
|
||||||
hash: blank_hash(),
|
hash: blank_hash(),
|
||||||
marker: std::marker::PhantomData::<P> {},
|
parameters,
|
||||||
};
|
};
|
||||||
|
|
||||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||||
println!("Done processing {} powers of tau", end);
|
info!("Done processing {} powers of tau", end);
|
||||||
} else {
|
} else {
|
||||||
panic!("Chunk does not have a min and max");
|
panic!("Chunk does not have a min and max");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write the next `G1 length` elements
|
||||||
for chunk in
|
for chunk in
|
||||||
&(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
|
&(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
|
||||||
{
|
{
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let size = end - start + 1;
|
let size = end - start + 1;
|
||||||
@ -1357,11 +1346,11 @@ impl<E: Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
|||||||
beta_tau_powers_g1: vec![],
|
beta_tau_powers_g1: vec![],
|
||||||
beta_g2: E::G2Affine::one(),
|
beta_g2: E::G2Affine::one(),
|
||||||
hash: blank_hash(),
|
hash: blank_hash(),
|
||||||
marker: std::marker::PhantomData::<P> {},
|
parameters,
|
||||||
};
|
};
|
||||||
|
|
||||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||||
println!("Done processing {} powers of tau", end);
|
info!("Done processing {} powers of tau", end);
|
||||||
} else {
|
} else {
|
||||||
panic!("Chunk does not have a min and max");
|
panic!("Chunk does not have a min and max");
|
||||||
}
|
}
|
||||||
|
@ -1,17 +1,15 @@
|
|||||||
use powersoftau::bn256::Bn256CeremonyParameters;
|
use powersoftau::{
|
||||||
|
batched_accumulator::BatchedAccumulator,
|
||||||
use powersoftau::batched_accumulator::BatchedAccumulator;
|
keypair::keypair,
|
||||||
use powersoftau::keypair::keypair;
|
parameters::{CeremonyParams, CheckForCorrectness, CurveKind, UseCompression},
|
||||||
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
|
};
|
||||||
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use memmap::*;
|
use memmap::MmapOptions;
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
use powersoftau::parameters::PowersOfTauParameters;
|
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate hex_literal;
|
extern crate hex_literal;
|
||||||
|
|
||||||
@ -22,20 +20,24 @@ const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
|
|||||||
#[allow(clippy::modulo_one)]
|
#[allow(clippy::modulo_one)]
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 3 {
|
if args.len() != 5 {
|
||||||
println!("Usage: \n<challenge_file> <response_file>");
|
println!("Usage: \n<challenge_file> <response_file> <circuit_power> <batch_size>");
|
||||||
std::process::exit(exitcode::USAGE);
|
std::process::exit(exitcode::USAGE);
|
||||||
}
|
}
|
||||||
let challenge_filename = &args[1];
|
let challenge_filename = &args[1];
|
||||||
let response_filename = &args[2];
|
let response_filename = &args[2];
|
||||||
|
let circuit_power = args[3].parse().expect("could not parse circuit power");
|
||||||
|
let batch_size = args[4].parse().expect("could not parse batch size");
|
||||||
|
|
||||||
|
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
"Will contribute a random beacon to accumulator for 2^{} powers of tau",
|
"Will contribute a random beacon to accumulator for 2^{} powers of tau",
|
||||||
Bn256CeremonyParameters::REQUIRED_POWER
|
parameters.size,
|
||||||
);
|
);
|
||||||
println!(
|
println!(
|
||||||
"In total will generate up to {} powers",
|
"In total will generate up to {} powers",
|
||||||
Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
|
parameters.powers_g1_length,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Create an RNG based on the outcome of the random beacon
|
// Create an RNG based on the outcome of the random beacon
|
||||||
@ -102,8 +104,8 @@ fn main() {
|
|||||||
.metadata()
|
.metadata()
|
||||||
.expect("unable to get filesystem metadata for challenge file");
|
.expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
||||||
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
UseCompression::Yes => parameters.contribution_size,
|
||||||
UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
|
UseCompression::No => parameters.accumulator_size,
|
||||||
};
|
};
|
||||||
|
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
@ -130,11 +132,8 @@ fn main() {
|
|||||||
.expect("unable to create response file in this directory");
|
.expect("unable to create response file in this directory");
|
||||||
|
|
||||||
let required_output_length = match COMPRESS_THE_OUTPUT {
|
let required_output_length = match COMPRESS_THE_OUTPUT {
|
||||||
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
UseCompression::Yes => parameters.contribution_size,
|
||||||
UseCompression::No => {
|
UseCompression::No => parameters.accumulator_size + parameters.public_key_size,
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
|
||||||
+ Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
writer
|
writer
|
||||||
@ -149,8 +148,7 @@ fn main() {
|
|||||||
|
|
||||||
println!("Calculating previous contribution hash...");
|
println!("Calculating previous contribution hash...");
|
||||||
|
|
||||||
let current_accumulator_hash =
|
let current_accumulator_hash = BatchedAccumulator::<Bn256>::calculate_hash(&readable_map);
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
println!("Contributing on top of the hash:");
|
println!("Contributing on top of the hash:");
|
||||||
@ -181,28 +179,28 @@ fn main() {
|
|||||||
println!("Computing and writing your contribution, this could take a while...");
|
println!("Computing and writing your contribution, this could take a while...");
|
||||||
|
|
||||||
// this computes a transformation and writes it
|
// this computes a transformation and writes it
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
BatchedAccumulator::<Bn256>::transform(
|
||||||
&readable_map,
|
&readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
INPUT_IS_COMPRESSED,
|
INPUT_IS_COMPRESSED,
|
||||||
COMPRESS_THE_OUTPUT,
|
COMPRESS_THE_OUTPUT,
|
||||||
CHECK_INPUT_CORRECTNESS,
|
CHECK_INPUT_CORRECTNESS,
|
||||||
&privkey,
|
&privkey,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("must transform with the key");
|
.expect("must transform with the key");
|
||||||
println!("Finishing writing your contribution to response file...");
|
println!("Finishing writing your contribution to response file...");
|
||||||
|
|
||||||
// Write the public key
|
// Write the public key
|
||||||
pubkey
|
pubkey
|
||||||
.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT)
|
.write(&mut writable_map, COMPRESS_THE_OUTPUT, ¶meters)
|
||||||
.expect("unable to write public key");
|
.expect("unable to write public key");
|
||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map
|
let output_readonly = writable_map
|
||||||
.make_read_only()
|
.make_read_only()
|
||||||
.expect("must make a map readonly");
|
.expect("must make a map readonly");
|
||||||
let contribution_hash =
|
let contribution_hash = BatchedAccumulator::<Bn256>::calculate_hash(&output_readonly);
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
|
||||||
|
|
||||||
print!(
|
print!(
|
||||||
"Done!\n\n\
|
"Done!\n\n\
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use powersoftau::batched_accumulator::BatchedAccumulator;
|
use powersoftau::batched_accumulator::BatchedAccumulator;
|
||||||
use powersoftau::bn256::Bn256CeremonyParameters;
|
|
||||||
use powersoftau::keypair::keypair;
|
use powersoftau::keypair::keypair;
|
||||||
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
|
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
|
||||||
|
|
||||||
@ -9,7 +8,7 @@ use std::fs::OpenOptions;
|
|||||||
|
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
use powersoftau::parameters::PowersOfTauParameters;
|
use powersoftau::parameters::{CeremonyParams, CurveKind};
|
||||||
|
|
||||||
const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No;
|
const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No;
|
||||||
const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
|
const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
|
||||||
@ -17,20 +16,24 @@ const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 3 {
|
if args.len() != 5 {
|
||||||
println!("Usage: \n<challenge_file> <response_file>");
|
println!("Usage: \n<challenge_file> <response_file> <circuit_power> <batch_size>");
|
||||||
std::process::exit(exitcode::USAGE);
|
std::process::exit(exitcode::USAGE);
|
||||||
}
|
}
|
||||||
let challenge_filename = &args[1];
|
let challenge_filename = &args[1];
|
||||||
let response_filename = &args[2];
|
let response_filename = &args[2];
|
||||||
|
let circuit_power = args[3].parse().expect("could not parse circuit power");
|
||||||
|
let batch_size = args[4].parse().expect("could not parse batch size");
|
||||||
|
|
||||||
|
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
"Will contribute to accumulator for 2^{} powers of tau",
|
"Will contribute to accumulator for 2^{} powers of tau",
|
||||||
Bn256CeremonyParameters::REQUIRED_POWER
|
parameters.size
|
||||||
);
|
);
|
||||||
println!(
|
println!(
|
||||||
"In total will generate up to {} powers",
|
"In total will generate up to {} powers",
|
||||||
Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
|
parameters.powers_g1_length
|
||||||
);
|
);
|
||||||
|
|
||||||
// Create an RNG based on a mixture of system randomness and user provided randomness
|
// Create an RNG based on a mixture of system randomness and user provided randomness
|
||||||
@ -85,8 +88,8 @@ fn main() {
|
|||||||
.metadata()
|
.metadata()
|
||||||
.expect("unable to get filesystem metadata for challenge file");
|
.expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
||||||
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
UseCompression::Yes => parameters.contribution_size,
|
||||||
UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
|
UseCompression::No => parameters.accumulator_size,
|
||||||
};
|
};
|
||||||
|
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
@ -113,11 +116,8 @@ fn main() {
|
|||||||
.expect("unable to create response file");
|
.expect("unable to create response file");
|
||||||
|
|
||||||
let required_output_length = match COMPRESS_THE_OUTPUT {
|
let required_output_length = match COMPRESS_THE_OUTPUT {
|
||||||
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
UseCompression::Yes => parameters.contribution_size,
|
||||||
UseCompression::No => {
|
UseCompression::No => parameters.accumulator_size + parameters.public_key_size,
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
|
||||||
+ Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
writer
|
writer
|
||||||
@ -136,8 +136,7 @@ fn main() {
|
|||||||
UseCompression::No == INPUT_IS_COMPRESSED,
|
UseCompression::No == INPUT_IS_COMPRESSED,
|
||||||
"Hashing the compressed file in not yet defined"
|
"Hashing the compressed file in not yet defined"
|
||||||
);
|
);
|
||||||
let current_accumulator_hash =
|
let current_accumulator_hash = BatchedAccumulator::<Bn256>::calculate_hash(&readable_map);
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
println!("`challenge` file contains decompressed points and has a hash:");
|
println!("`challenge` file contains decompressed points and has a hash:");
|
||||||
@ -190,13 +189,14 @@ fn main() {
|
|||||||
println!("Computing and writing your contribution, this could take a while...");
|
println!("Computing and writing your contribution, this could take a while...");
|
||||||
|
|
||||||
// this computes a transformation and writes it
|
// this computes a transformation and writes it
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
BatchedAccumulator::<Bn256>::transform(
|
||||||
&readable_map,
|
&readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
INPUT_IS_COMPRESSED,
|
INPUT_IS_COMPRESSED,
|
||||||
COMPRESS_THE_OUTPUT,
|
COMPRESS_THE_OUTPUT,
|
||||||
CHECK_INPUT_CORRECTNESS,
|
CHECK_INPUT_CORRECTNESS,
|
||||||
&privkey,
|
&privkey,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("must transform with the key");
|
.expect("must transform with the key");
|
||||||
|
|
||||||
@ -204,7 +204,7 @@ fn main() {
|
|||||||
|
|
||||||
// Write the public key
|
// Write the public key
|
||||||
pubkey
|
pubkey
|
||||||
.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT)
|
.write(&mut writable_map, COMPRESS_THE_OUTPUT, ¶meters)
|
||||||
.expect("unable to write public key");
|
.expect("unable to write public key");
|
||||||
|
|
||||||
writable_map.flush().expect("must flush a memory map");
|
writable_map.flush().expect("must flush a memory map");
|
||||||
@ -213,8 +213,7 @@ fn main() {
|
|||||||
let output_readonly = writable_map
|
let output_readonly = writable_map
|
||||||
.make_read_only()
|
.make_read_only()
|
||||||
.expect("must make a map readonly");
|
.expect("must make a map readonly");
|
||||||
let contribution_hash =
|
let contribution_hash = BatchedAccumulator::<Bn256>::calculate_hash(&output_readonly);
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
|
||||||
|
|
||||||
print!(
|
print!(
|
||||||
"Done!\n\n\
|
"Done!\n\n\
|
||||||
|
@ -1,41 +0,0 @@
|
|||||||
use powersoftau::accumulator::Accumulator;
|
|
||||||
use powersoftau::bn256::Bn256CeremonyParameters;
|
|
||||||
use powersoftau::parameters::UseCompression;
|
|
||||||
use powersoftau::utils::blank_hash;
|
|
||||||
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
|
||||||
use std::fs::OpenOptions;
|
|
||||||
use std::io::{BufWriter, Write};
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let args: Vec<String> = std::env::args().collect();
|
|
||||||
if args.len() != 2 {
|
|
||||||
println!("Usage: \n<challenge_file>");
|
|
||||||
std::process::exit(exitcode::USAGE);
|
|
||||||
}
|
|
||||||
let challenge_filename = &args[1];
|
|
||||||
|
|
||||||
let file = OpenOptions::new()
|
|
||||||
.read(false)
|
|
||||||
.write(true)
|
|
||||||
.create_new(true)
|
|
||||||
.open(challenge_filename)
|
|
||||||
.expect("unable to create challenge file");
|
|
||||||
|
|
||||||
let mut writer = BufWriter::new(file);
|
|
||||||
|
|
||||||
// Write a blank BLAKE2b hash:
|
|
||||||
writer
|
|
||||||
.write_all(&blank_hash().as_slice())
|
|
||||||
.expect("unable to write blank hash to challenge file");
|
|
||||||
|
|
||||||
let parameters = Bn256CeremonyParameters {};
|
|
||||||
|
|
||||||
let acc: Accumulator<Bn256, _> = Accumulator::new(parameters);
|
|
||||||
println!("Writing an empty accumulator to disk");
|
|
||||||
acc.serialize(&mut writer, UseCompression::No)
|
|
||||||
.expect("unable to write fresh accumulator to challenge file");
|
|
||||||
writer.flush().expect("unable to flush accumulator to disk");
|
|
||||||
|
|
||||||
println!("Wrote a fresh accumulator to challenge file");
|
|
||||||
}
|
|
@ -1,5 +1,3 @@
|
|||||||
use powersoftau::bn256::Bn256CeremonyParameters;
|
|
||||||
|
|
||||||
use powersoftau::batched_accumulator::BatchedAccumulator;
|
use powersoftau::batched_accumulator::BatchedAccumulator;
|
||||||
use powersoftau::parameters::UseCompression;
|
use powersoftau::parameters::UseCompression;
|
||||||
use powersoftau::utils::blank_hash;
|
use powersoftau::utils::blank_hash;
|
||||||
@ -9,25 +7,29 @@ use memmap::*;
|
|||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
use powersoftau::parameters::PowersOfTauParameters;
|
use powersoftau::parameters::{CeremonyParams, CurveKind};
|
||||||
|
|
||||||
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
|
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 2 {
|
if args.len() != 4 {
|
||||||
println!("Usage: \n<challenge_file>");
|
println!("Usage: \n<challenge_file> <ceremony_size> <batch_size>");
|
||||||
std::process::exit(exitcode::USAGE);
|
std::process::exit(exitcode::USAGE);
|
||||||
}
|
}
|
||||||
let challenge_filename = &args[1];
|
let challenge_filename = &args[1];
|
||||||
|
let circuit_power = args[2].parse().expect("could not parse circuit power");
|
||||||
|
let batch_size = args[3].parse().expect("could not parse batch size");
|
||||||
|
|
||||||
|
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
"Will generate an empty accumulator for 2^{} powers of tau",
|
"Will generate an empty accumulator for 2^{} powers of tau",
|
||||||
Bn256CeremonyParameters::REQUIRED_POWER
|
parameters.size
|
||||||
);
|
);
|
||||||
println!(
|
println!(
|
||||||
"In total will generate up to {} powers",
|
"In total will generate up to {} powers",
|
||||||
Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
|
parameters.powers_g1_length
|
||||||
);
|
);
|
||||||
|
|
||||||
let file = OpenOptions::new()
|
let file = OpenOptions::new()
|
||||||
@ -38,11 +40,8 @@ fn main() {
|
|||||||
.expect("unable to create challenge file");
|
.expect("unable to create challenge file");
|
||||||
|
|
||||||
let expected_challenge_length = match COMPRESS_NEW_CHALLENGE {
|
let expected_challenge_length = match COMPRESS_NEW_CHALLENGE {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => parameters.contribution_size - parameters.public_key_size,
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
UseCompression::No => parameters.accumulator_size,
|
||||||
- Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
|
||||||
}
|
|
||||||
UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
file.set_len(expected_challenge_length as u64)
|
file.set_len(expected_challenge_length as u64)
|
||||||
@ -75,9 +74,10 @@ fn main() {
|
|||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(
|
BatchedAccumulator::<Bn256>::generate_initial(
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
COMPRESS_NEW_CHALLENGE,
|
COMPRESS_NEW_CHALLENGE,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("generation of initial accumulator is successful");
|
.expect("generation of initial accumulator is successful");
|
||||||
writable_map
|
writable_map
|
||||||
@ -88,8 +88,7 @@ fn main() {
|
|||||||
let output_readonly = writable_map
|
let output_readonly = writable_map
|
||||||
.make_read_only()
|
.make_read_only()
|
||||||
.expect("must make a map readonly");
|
.expect("must make a map readonly");
|
||||||
let contribution_hash =
|
let contribution_hash = BatchedAccumulator::<Bn256>::calculate_hash(&output_readonly);
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
|
||||||
|
|
||||||
println!("Empty contribution is formed with a hash:");
|
println!("Empty contribution is formed with a hash:");
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ use bellman_ce::pairing::bn256::Bn256;
|
|||||||
use bellman_ce::pairing::bn256::{G1, G2};
|
use bellman_ce::pairing::bn256::{G1, G2};
|
||||||
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
||||||
use powersoftau::batched_accumulator::*;
|
use powersoftau::batched_accumulator::*;
|
||||||
use powersoftau::bn256::Bn256CeremonyParameters;
|
use powersoftau::parameters::{CeremonyParams, CurveKind};
|
||||||
use powersoftau::*;
|
use powersoftau::*;
|
||||||
|
|
||||||
use crate::parameters::*;
|
use crate::parameters::*;
|
||||||
@ -25,6 +25,12 @@ fn log_2(x: u64) -> u32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
let parameters = CeremonyParams::new(
|
||||||
|
CurveKind::Bn256,
|
||||||
|
28, // turn this to 10 for the small test
|
||||||
|
21, // turn this to 8 for the small test
|
||||||
|
);
|
||||||
|
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 2 {
|
if args.len() != 2 {
|
||||||
println!("Usage: \n<response_filename>");
|
println!("Usage: \n<response_filename>");
|
||||||
@ -43,10 +49,11 @@ fn main() {
|
|||||||
.expect("unable to create a memory map for input")
|
.expect("unable to create a memory map for input")
|
||||||
};
|
};
|
||||||
|
|
||||||
let current_accumulator = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::deserialize(
|
let current_accumulator = BatchedAccumulator::<Bn256>::deserialize(
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::Yes,
|
UseCompression::Yes,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("unable to read uncompressed accumulator");
|
.expect("unable to read uncompressed accumulator");
|
||||||
|
|
||||||
@ -182,7 +189,7 @@ fn main() {
|
|||||||
|
|
||||||
// Lagrange coefficients in G1 (for constructing
|
// Lagrange coefficients in G1 (for constructing
|
||||||
// LC/IC queries and precomputing polynomials for A)
|
// LC/IC queries and precomputing polynomials for A)
|
||||||
for coeff in g1_coeffs {
|
for coeff in g1_coeffs.clone() {
|
||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use powersoftau::{
|
use powersoftau::{
|
||||||
batched_accumulator::BatchedAccumulator,
|
batched_accumulator::BatchedAccumulator,
|
||||||
bn256::Bn256CeremonyParameters,
|
parameters::{CeremonyParams, CheckForCorrectness, CurveKind, UseCompression},
|
||||||
parameters::{CheckForCorrectness, PowersOfTauParameters, UseCompression},
|
|
||||||
utils::reduced_hash,
|
utils::reduced_hash,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -11,19 +10,6 @@ use std::io::Write;
|
|||||||
|
|
||||||
use memmap::MmapOptions;
|
use memmap::MmapOptions;
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Bn256ReducedCeremonyParameters {}
|
|
||||||
|
|
||||||
impl PowersOfTauParameters for Bn256ReducedCeremonyParameters {
|
|
||||||
const REQUIRED_POWER: usize = 10;
|
|
||||||
|
|
||||||
// This ceremony is based on the BN256 elliptic curve construction.
|
|
||||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
|
|
||||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128;
|
|
||||||
const G1_COMPRESSED_BYTE_SIZE: usize = 32;
|
|
||||||
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
|
||||||
}
|
|
||||||
|
|
||||||
const fn num_bits<T>() -> usize {
|
const fn num_bits<T>() -> usize {
|
||||||
std::mem::size_of::<T>() * 8
|
std::mem::size_of::<T>() * 8
|
||||||
}
|
}
|
||||||
@ -34,6 +20,12 @@ pub fn log_2(x: u64) -> u32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
let parameters = CeremonyParams::new(
|
||||||
|
CurveKind::Bn256,
|
||||||
|
10, // here we use 10 since it's the reduced ceremony
|
||||||
|
21,
|
||||||
|
);
|
||||||
|
|
||||||
// Try to load `./challenge` from disk.
|
// Try to load `./challenge` from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
@ -45,27 +37,23 @@ fn main() {
|
|||||||
.expect("unable to create a memory map for input")
|
.expect("unable to create a memory map for input")
|
||||||
};
|
};
|
||||||
|
|
||||||
let current_accumulator = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::deserialize(
|
let current_accumulator = BatchedAccumulator::<Bn256>::deserialize(
|
||||||
&challenge_readable_map,
|
&challenge_readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::No,
|
UseCompression::No,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("unable to read compressed accumulator");
|
.expect("unable to read compressed accumulator");
|
||||||
|
|
||||||
let mut reduced_accumulator =
|
let mut reduced_accumulator = BatchedAccumulator::<Bn256>::empty(¶meters);
|
||||||
BatchedAccumulator::<Bn256, Bn256ReducedCeremonyParameters>::empty();
|
reduced_accumulator.tau_powers_g1 =
|
||||||
reduced_accumulator.tau_powers_g1 = current_accumulator.tau_powers_g1
|
current_accumulator.tau_powers_g1[..parameters.powers_g1_length].to_vec();
|
||||||
[..Bn256ReducedCeremonyParameters::TAU_POWERS_G1_LENGTH]
|
reduced_accumulator.tau_powers_g2 =
|
||||||
.to_vec();
|
current_accumulator.tau_powers_g2[..parameters.powers_length].to_vec();
|
||||||
reduced_accumulator.tau_powers_g2 = current_accumulator.tau_powers_g2
|
reduced_accumulator.alpha_tau_powers_g1 =
|
||||||
[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
|
current_accumulator.alpha_tau_powers_g1[..parameters.powers_length].to_vec();
|
||||||
.to_vec();
|
reduced_accumulator.beta_tau_powers_g1 =
|
||||||
reduced_accumulator.alpha_tau_powers_g1 = current_accumulator.alpha_tau_powers_g1
|
current_accumulator.beta_tau_powers_g1[..parameters.powers_length].to_vec();
|
||||||
[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
|
|
||||||
.to_vec();
|
|
||||||
reduced_accumulator.beta_tau_powers_g1 = current_accumulator.beta_tau_powers_g1
|
|
||||||
[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
|
|
||||||
.to_vec();
|
|
||||||
reduced_accumulator.beta_g2 = current_accumulator.beta_g2;
|
reduced_accumulator.beta_g2 = current_accumulator.beta_g2;
|
||||||
|
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
@ -77,7 +65,7 @@ fn main() {
|
|||||||
|
|
||||||
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
|
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
|
||||||
writer
|
writer
|
||||||
.set_len(Bn256ReducedCeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64)
|
.set_len(parameters.accumulator_size as u64)
|
||||||
.expect("must make output file large enough");
|
.expect("must make output file large enough");
|
||||||
|
|
||||||
let mut writable_map = unsafe {
|
let mut writable_map = unsafe {
|
||||||
@ -87,8 +75,8 @@ fn main() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let hash = reduced_hash(
|
let hash = reduced_hash(
|
||||||
Bn256CeremonyParameters::REQUIRED_POWER as u8,
|
28, // this is the full size of the hash
|
||||||
Bn256ReducedCeremonyParameters::REQUIRED_POWER as u8,
|
parameters.size as u8,
|
||||||
);
|
);
|
||||||
(&mut writable_map[0..])
|
(&mut writable_map[0..])
|
||||||
.write_all(hash.as_slice())
|
.write_all(hash.as_slice())
|
||||||
@ -110,17 +98,14 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
reduced_accumulator
|
reduced_accumulator
|
||||||
.serialize(&mut writable_map, UseCompression::No)
|
.serialize(&mut writable_map, UseCompression::No, ¶meters)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map
|
let output_readonly = writable_map
|
||||||
.make_read_only()
|
.make_read_only()
|
||||||
.expect("must make a map readonly");
|
.expect("must make a map readonly");
|
||||||
let contribution_hash =
|
let contribution_hash = BatchedAccumulator::<Bn256>::calculate_hash(&output_readonly);
|
||||||
BatchedAccumulator::<Bn256, Bn256ReducedCeremonyParameters>::calculate_hash(
|
|
||||||
&output_readonly,
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("Reduced contribution is formed with a hash:");
|
println!("Reduced contribution is formed with a hash:");
|
||||||
|
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use bellman_ce::pairing::bn256::{G1, G2};
|
use bellman_ce::pairing::bn256::{G1, G2};
|
||||||
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
||||||
use powersoftau::accumulator::HashWriter;
|
|
||||||
use powersoftau::batched_accumulator::*;
|
use powersoftau::batched_accumulator::*;
|
||||||
use powersoftau::bn256::Bn256CeremonyParameters;
|
|
||||||
use powersoftau::*;
|
use powersoftau::*;
|
||||||
|
use powersoftau::parameters::{CeremonyParams, CurveKind};
|
||||||
|
|
||||||
use crate::keypair::*;
|
use crate::keypair::*;
|
||||||
use crate::parameters::*;
|
use crate::parameters::*;
|
||||||
@ -17,6 +16,10 @@ use std::fs::{remove_file, OpenOptions};
|
|||||||
use std::io::{self, BufWriter, Read, Write};
|
use std::io::{self, BufWriter, Read, Write};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
use blake2::{Blake2b, Digest};
|
||||||
|
use generic_array::GenericArray;
|
||||||
|
use typenum::U64;
|
||||||
|
|
||||||
use memmap::*;
|
use memmap::*;
|
||||||
|
|
||||||
const fn num_bits<T>() -> usize {
|
const fn num_bits<T>() -> usize {
|
||||||
@ -28,13 +31,51 @@ fn log_2(x: u64) -> u32 {
|
|||||||
num_bits::<u64>() as u32 - x.leading_zeros() - 1
|
num_bits::<u64>() as u32 - x.leading_zeros() - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Abstraction over a writer which hashes the data being written.
|
||||||
|
pub struct HashWriter<W: Write> {
|
||||||
|
writer: W,
|
||||||
|
hasher: Blake2b,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<W: Write> HashWriter<W> {
|
||||||
|
/// Construct a new `HashWriter` given an existing `writer` by value.
|
||||||
|
pub fn new(writer: W) -> Self {
|
||||||
|
HashWriter {
|
||||||
|
writer,
|
||||||
|
hasher: Blake2b::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Destroy this writer and return the hash of what was written.
|
||||||
|
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
||||||
|
self.hasher.result()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<W: Write> Write for HashWriter<W> {
|
||||||
|
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||||
|
let bytes = self.writer.write(buf)?;
|
||||||
|
|
||||||
|
if bytes > 0 {
|
||||||
|
self.hasher.input(&buf[0..bytes]);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self) -> io::Result<()> {
|
||||||
|
self.writer.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Computes the hash of the challenge file for the player,
|
// Computes the hash of the challenge file for the player,
|
||||||
// given the current state of the accumulator and the last
|
// given the current state of the accumulator and the last
|
||||||
// response file hash.
|
// response file hash.
|
||||||
fn get_challenge_file_hash(
|
fn get_challenge_file_hash(
|
||||||
acc: &mut BatchedAccumulator<Bn256, Bn256CeremonyParameters>,
|
acc: &mut BatchedAccumulator<Bn256>,
|
||||||
last_response_file_hash: &[u8; 64],
|
last_response_file_hash: &[u8; 64],
|
||||||
is_initial: bool,
|
is_initial: bool,
|
||||||
|
parameters: &CeremonyParams,
|
||||||
) -> [u8; 64] {
|
) -> [u8; 64] {
|
||||||
let sink = io::sink();
|
let sink = io::sink();
|
||||||
let mut sink = HashWriter::new(sink);
|
let mut sink = HashWriter::new(sink);
|
||||||
@ -53,7 +94,7 @@ fn get_challenge_file_hash(
|
|||||||
.expect("unable to create temporary tmp_challenge_file_hash");
|
.expect("unable to create temporary tmp_challenge_file_hash");
|
||||||
|
|
||||||
writer
|
writer
|
||||||
.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64)
|
.set_len(parameters.accumulator_size as u64)
|
||||||
.expect("must make output file large enough");
|
.expect("must make output file large enough");
|
||||||
let mut writable_map = unsafe {
|
let mut writable_map = unsafe {
|
||||||
MmapOptions::new()
|
MmapOptions::new()
|
||||||
@ -69,13 +110,14 @@ fn get_challenge_file_hash(
|
|||||||
.expect("unable to write blank hash to challenge file");
|
.expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
if is_initial {
|
if is_initial {
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(
|
BatchedAccumulator::<Bn256>::generate_initial(
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
UseCompression::No,
|
UseCompression::No,
|
||||||
|
parameters,
|
||||||
)
|
)
|
||||||
.expect("generation of initial accumulator is successful");
|
.expect("generation of initial accumulator is successful");
|
||||||
} else {
|
} else {
|
||||||
acc.serialize(&mut writable_map, UseCompression::No)
|
acc.serialize(&mut writable_map, UseCompression::No, parameters)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,9 +144,10 @@ fn get_challenge_file_hash(
|
|||||||
// accumulator, the player's public key, and the challenge
|
// accumulator, the player's public key, and the challenge
|
||||||
// file's hash.
|
// file's hash.
|
||||||
fn get_response_file_hash(
|
fn get_response_file_hash(
|
||||||
acc: &mut BatchedAccumulator<Bn256, Bn256CeremonyParameters>,
|
acc: &mut BatchedAccumulator<Bn256>,
|
||||||
pubkey: &PublicKey<Bn256>,
|
pubkey: &PublicKey<Bn256>,
|
||||||
last_challenge_file_hash: &[u8; 64],
|
last_challenge_file_hash: &[u8; 64],
|
||||||
|
parameters: &CeremonyParams,
|
||||||
) -> [u8; 64] {
|
) -> [u8; 64] {
|
||||||
let sink = io::sink();
|
let sink = io::sink();
|
||||||
let mut sink = HashWriter::new(sink);
|
let mut sink = HashWriter::new(sink);
|
||||||
@ -122,7 +165,7 @@ fn get_response_file_hash(
|
|||||||
.expect("unable to create temporary tmp_response_file_hash");
|
.expect("unable to create temporary tmp_response_file_hash");
|
||||||
|
|
||||||
writer
|
writer
|
||||||
.set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64)
|
.set_len(parameters.contribution_size as u64)
|
||||||
.expect("must make output file large enough");
|
.expect("must make output file large enough");
|
||||||
let mut writable_map = unsafe {
|
let mut writable_map = unsafe {
|
||||||
MmapOptions::new()
|
MmapOptions::new()
|
||||||
@ -137,11 +180,11 @@ fn get_response_file_hash(
|
|||||||
.flush()
|
.flush()
|
||||||
.expect("unable to write blank hash to challenge file");
|
.expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
acc.serialize(&mut writable_map, UseCompression::Yes)
|
acc.serialize(&mut writable_map, UseCompression::Yes, parameters)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
pubkey
|
pubkey
|
||||||
.write::<Bn256CeremonyParameters>(&mut writable_map, UseCompression::Yes)
|
.write(&mut writable_map, UseCompression::Yes, parameters)
|
||||||
.expect("unable to write public key");
|
.expect("unable to write public key");
|
||||||
writable_map.flush().expect("must flush the memory map");
|
writable_map.flush().expect("must flush the memory map");
|
||||||
}
|
}
|
||||||
@ -162,7 +205,7 @@ fn get_response_file_hash(
|
|||||||
tmp
|
tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_accumulator_for_verify() -> BatchedAccumulator<Bn256, Bn256CeremonyParameters> {
|
fn new_accumulator_for_verify(parameters: &CeremonyParams) -> BatchedAccumulator<Bn256> {
|
||||||
let file_name = "tmp_initial_challenge";
|
let file_name = "tmp_initial_challenge";
|
||||||
{
|
{
|
||||||
if Path::new(file_name).exists() {
|
if Path::new(file_name).exists() {
|
||||||
@ -176,7 +219,7 @@ fn new_accumulator_for_verify() -> BatchedAccumulator<Bn256, Bn256CeremonyParame
|
|||||||
.open(file_name)
|
.open(file_name)
|
||||||
.expect("unable to create `./tmp_initial_challenge`");
|
.expect("unable to create `./tmp_initial_challenge`");
|
||||||
|
|
||||||
let expected_challenge_length = Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE;
|
let expected_challenge_length = parameters.accumulator_size;
|
||||||
file.set_len(expected_challenge_length as u64)
|
file.set_len(expected_challenge_length as u64)
|
||||||
.expect("unable to allocate large enough file");
|
.expect("unable to allocate large enough file");
|
||||||
|
|
||||||
@ -185,9 +228,10 @@ fn new_accumulator_for_verify() -> BatchedAccumulator<Bn256, Bn256CeremonyParame
|
|||||||
.map_mut(&file)
|
.map_mut(&file)
|
||||||
.expect("unable to create a memory map")
|
.expect("unable to create a memory map")
|
||||||
};
|
};
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(
|
BatchedAccumulator::<Bn256>::generate_initial(
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
UseCompression::No,
|
UseCompression::No,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("generation of initial accumulator is successful");
|
.expect("generation of initial accumulator is successful");
|
||||||
writable_map
|
writable_map
|
||||||
@ -206,17 +250,26 @@ fn new_accumulator_for_verify() -> BatchedAccumulator<Bn256, Bn256CeremonyParame
|
|||||||
.expect("unable to create a memory map for input")
|
.expect("unable to create a memory map for input")
|
||||||
};
|
};
|
||||||
|
|
||||||
BatchedAccumulator::deserialize(&readable_map, CheckForCorrectness::Yes, UseCompression::No)
|
BatchedAccumulator::deserialize(
|
||||||
.expect("unable to read uncompressed accumulator")
|
&readable_map,
|
||||||
|
CheckForCorrectness::Yes,
|
||||||
|
UseCompression::No,
|
||||||
|
¶meters,
|
||||||
|
)
|
||||||
|
.expect("unable to read uncompressed accumulator")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 2 {
|
if args.len() != 4 {
|
||||||
println!("Usage: \n<transcript_file>");
|
println!("Usage: \n<transcript_file> <circuit_power> <batch_size>");
|
||||||
std::process::exit(exitcode::USAGE);
|
std::process::exit(exitcode::USAGE);
|
||||||
}
|
}
|
||||||
let transcript_filename = &args[1];
|
let transcript_filename = &args[1];
|
||||||
|
let circuit_power = args[2].parse().expect("could not parse circuit power");
|
||||||
|
let batch_size = args[3].parse().expect("could not parse batch size");
|
||||||
|
|
||||||
|
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
|
||||||
|
|
||||||
// Try to load transcript file from disk.
|
// Try to load transcript file from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
@ -231,7 +284,7 @@ fn main() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Initialize the accumulator
|
// Initialize the accumulator
|
||||||
let mut current_accumulator = new_accumulator_for_verify();
|
let mut current_accumulator = new_accumulator_for_verify(¶meters);
|
||||||
|
|
||||||
// The "last response file hash" is just a blank BLAKE2b hash
|
// The "last response file hash" is just a blank BLAKE2b hash
|
||||||
// at the beginning of the hash chain.
|
// at the beginning of the hash chain.
|
||||||
@ -249,10 +302,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let memory_slice = transcript_readable_map
|
let memory_slice = transcript_readable_map
|
||||||
.get(
|
.get(i * parameters.contribution_size..(i + 1) * parameters.contribution_size)
|
||||||
i * Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
|
||||||
..(i + 1) * Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
|
||||||
)
|
|
||||||
.expect("must read point data from file");
|
.expect("must read point data from file");
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
@ -262,7 +312,7 @@ fn main() {
|
|||||||
.expect("unable to create temporary tmp_response");
|
.expect("unable to create temporary tmp_response");
|
||||||
|
|
||||||
writer
|
writer
|
||||||
.set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64)
|
.set_len(parameters.contribution_size as u64)
|
||||||
.expect("must make output file large enough");
|
.expect("must make output file large enough");
|
||||||
let mut writable_map = unsafe {
|
let mut writable_map = unsafe {
|
||||||
MmapOptions::new()
|
MmapOptions::new()
|
||||||
@ -279,8 +329,12 @@ fn main() {
|
|||||||
.make_read_only()
|
.make_read_only()
|
||||||
.expect("must make a map readonly");
|
.expect("must make a map readonly");
|
||||||
|
|
||||||
let last_challenge_file_hash =
|
let last_challenge_file_hash = get_challenge_file_hash(
|
||||||
get_challenge_file_hash(&mut current_accumulator, &last_response_file_hash, i == 0);
|
&mut current_accumulator,
|
||||||
|
&last_response_file_hash,
|
||||||
|
i == 0,
|
||||||
|
¶meters,
|
||||||
|
);
|
||||||
|
|
||||||
// Deserialize the accumulator provided by the player in
|
// Deserialize the accumulator provided by the player in
|
||||||
// their response file. It's stored in the transcript in
|
// their response file. It's stored in the transcript in
|
||||||
@ -291,14 +345,13 @@ fn main() {
|
|||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::Yes,
|
UseCompression::Yes,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("unable to read uncompressed accumulator");
|
.expect("unable to read uncompressed accumulator");
|
||||||
|
|
||||||
let response_file_pubkey = PublicKey::<Bn256>::read::<Bn256CeremonyParameters>(
|
let response_file_pubkey =
|
||||||
&response_readable_map,
|
PublicKey::<Bn256>::read(&response_readable_map, UseCompression::Yes, ¶meters)
|
||||||
UseCompression::Yes,
|
.unwrap();
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
// Compute the hash of the response file. (we had it in uncompressed
|
// Compute the hash of the response file. (we had it in uncompressed
|
||||||
// form in the transcript, but the response file is compressed to save
|
// form in the transcript, but the response file is compressed to save
|
||||||
// participants bandwidth.)
|
// participants bandwidth.)
|
||||||
@ -306,6 +359,7 @@ fn main() {
|
|||||||
&mut response_file_accumulator,
|
&mut response_file_accumulator,
|
||||||
&response_file_pubkey,
|
&response_file_pubkey,
|
||||||
&last_challenge_file_hash,
|
&last_challenge_file_hash,
|
||||||
|
¶meters,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Verify the transformation from the previous accumulator to the new
|
// Verify the transformation from the previous accumulator to the new
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use powersoftau::batched_accumulator::BatchedAccumulator;
|
use powersoftau::batched_accumulator::BatchedAccumulator;
|
||||||
use powersoftau::bn256::Bn256CeremonyParameters;
|
|
||||||
use powersoftau::keypair::PublicKey;
|
use powersoftau::keypair::PublicKey;
|
||||||
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
|
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
|
||||||
|
|
||||||
@ -9,7 +8,7 @@ use std::fs::OpenOptions;
|
|||||||
|
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
use powersoftau::parameters::PowersOfTauParameters;
|
use powersoftau::parameters::{CeremonyParams, CurveKind};
|
||||||
|
|
||||||
const PREVIOUS_CHALLENGE_IS_COMPRESSED: UseCompression = UseCompression::No;
|
const PREVIOUS_CHALLENGE_IS_COMPRESSED: UseCompression = UseCompression::No;
|
||||||
const CONTRIBUTION_IS_COMPRESSED: UseCompression = UseCompression::Yes;
|
const CONTRIBUTION_IS_COMPRESSED: UseCompression = UseCompression::Yes;
|
||||||
@ -17,17 +16,21 @@ const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 4 {
|
if args.len() != 6 {
|
||||||
println!("Usage: \n<challenge_file> <response_file> <new_challenge_file>");
|
println!("Usage: \n<challenge_file> <response_file> <new_challenge_file> <circuit_power> <batch_size>");
|
||||||
std::process::exit(exitcode::USAGE);
|
std::process::exit(exitcode::USAGE);
|
||||||
}
|
}
|
||||||
let challenge_filename = &args[1];
|
let challenge_filename = &args[1];
|
||||||
let response_filename = &args[2];
|
let response_filename = &args[2];
|
||||||
let new_challenge_filename = &args[3];
|
let new_challenge_filename = &args[3];
|
||||||
|
let circuit_power = args[4].parse().expect("could not parse circuit power");
|
||||||
|
let batch_size = args[5].parse().expect("could not parse batch size");
|
||||||
|
|
||||||
|
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
"Will verify and decompress a contribution to accumulator for 2^{} powers of tau",
|
"Will verify and decompress a contribution to accumulator for 2^{} powers of tau",
|
||||||
Bn256CeremonyParameters::REQUIRED_POWER
|
parameters.size
|
||||||
);
|
);
|
||||||
|
|
||||||
// Try to load challenge file from disk.
|
// Try to load challenge file from disk.
|
||||||
@ -41,11 +44,8 @@ fn main() {
|
|||||||
.metadata()
|
.metadata()
|
||||||
.expect("unable to get filesystem metadata for challenge file");
|
.expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED {
|
let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => parameters.contribution_size - parameters.public_key_size,
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
UseCompression::No => parameters.accumulator_size,
|
||||||
- Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
|
||||||
}
|
|
||||||
UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
|
|
||||||
};
|
};
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
panic!(
|
panic!(
|
||||||
@ -73,11 +73,8 @@ fn main() {
|
|||||||
.metadata()
|
.metadata()
|
||||||
.expect("unable to get filesystem metadata for response file");
|
.expect("unable to get filesystem metadata for response file");
|
||||||
let expected_response_length = match CONTRIBUTION_IS_COMPRESSED {
|
let expected_response_length = match CONTRIBUTION_IS_COMPRESSED {
|
||||||
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
UseCompression::Yes => parameters.contribution_size,
|
||||||
UseCompression::No => {
|
UseCompression::No => parameters.accumulator_size + parameters.public_key_size,
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
|
||||||
+ Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
if metadata.len() != (expected_response_length as u64) {
|
if metadata.len() != (expected_response_length as u64) {
|
||||||
panic!(
|
panic!(
|
||||||
@ -99,9 +96,7 @@ fn main() {
|
|||||||
// Check that contribution is correct
|
// Check that contribution is correct
|
||||||
|
|
||||||
let current_accumulator_hash =
|
let current_accumulator_hash =
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(
|
BatchedAccumulator::<Bn256>::calculate_hash(&challenge_readable_map);
|
||||||
&challenge_readable_map,
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("Hash of the `challenge` file for verification:");
|
println!("Hash of the `challenge` file for verification:");
|
||||||
for line in current_accumulator_hash.as_slice().chunks(16) {
|
for line in current_accumulator_hash.as_slice().chunks(16) {
|
||||||
@ -142,9 +137,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let response_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(
|
let response_hash = BatchedAccumulator::<Bn256>::calculate_hash(&response_readable_map);
|
||||||
&response_readable_map,
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("Hash of the response file for verification:");
|
println!("Hash of the response file for verification:");
|
||||||
for line in response_hash.as_slice().chunks(16) {
|
for line in response_hash.as_slice().chunks(16) {
|
||||||
@ -159,9 +152,10 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get the contributor's public key
|
// get the contributor's public key
|
||||||
let public_key = PublicKey::<Bn256>::read::<Bn256CeremonyParameters>(
|
let public_key = PublicKey::<Bn256>::read(
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
CONTRIBUTION_IS_COMPRESSED,
|
CONTRIBUTION_IS_COMPRESSED,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("wasn't able to deserialize the response file's public key");
|
.expect("wasn't able to deserialize the response file's public key");
|
||||||
|
|
||||||
@ -171,7 +165,7 @@ fn main() {
|
|||||||
"Verifying a contribution to contain proper powers and correspond to the public key..."
|
"Verifying a contribution to contain proper powers and correspond to the public key..."
|
||||||
);
|
);
|
||||||
|
|
||||||
let valid = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::verify_transformation(
|
let valid = BatchedAccumulator::<Bn256>::verify_transformation(
|
||||||
&challenge_readable_map,
|
&challenge_readable_map,
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
&public_key,
|
&public_key,
|
||||||
@ -180,6 +174,7 @@ fn main() {
|
|||||||
CONTRIBUTION_IS_COMPRESSED,
|
CONTRIBUTION_IS_COMPRESSED,
|
||||||
CheckForCorrectness::No,
|
CheckForCorrectness::No,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
|
¶meters,
|
||||||
);
|
);
|
||||||
|
|
||||||
if !valid {
|
if !valid {
|
||||||
@ -206,7 +201,7 @@ fn main() {
|
|||||||
|
|
||||||
// Recomputation strips the public key and uses hashing to link with the previous contribution after decompression
|
// Recomputation strips the public key and uses hashing to link with the previous contribution after decompression
|
||||||
writer
|
writer
|
||||||
.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64)
|
.set_len(parameters.accumulator_size as u64)
|
||||||
.expect("must make output file large enough");
|
.expect("must make output file large enough");
|
||||||
|
|
||||||
let mut writable_map = unsafe {
|
let mut writable_map = unsafe {
|
||||||
@ -225,10 +220,11 @@ fn main() {
|
|||||||
.expect("unable to write hash to new challenge file");
|
.expect("unable to write hash to new challenge file");
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::decompress(
|
BatchedAccumulator::<Bn256>::decompress(
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
CheckForCorrectness::No,
|
CheckForCorrectness::No,
|
||||||
|
¶meters,
|
||||||
)
|
)
|
||||||
.expect("must decompress a response for a new challenge");
|
.expect("must decompress a response for a new challenge");
|
||||||
|
|
||||||
@ -239,9 +235,7 @@ fn main() {
|
|||||||
.expect("must make a map readonly");
|
.expect("must make a map readonly");
|
||||||
|
|
||||||
let recompressed_hash =
|
let recompressed_hash =
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(
|
BatchedAccumulator::<Bn256>::calculate_hash(&new_challenge_readable_map);
|
||||||
&new_challenge_readable_map,
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:");
|
println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:");
|
||||||
|
|
||||||
|
@ -1,109 +0,0 @@
|
|||||||
use crate::parameters::PowersOfTauParameters;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Bn256CeremonyParameters {}
|
|
||||||
|
|
||||||
impl PowersOfTauParameters for Bn256CeremonyParameters {
|
|
||||||
#[cfg(not(feature = "smalltest"))]
|
|
||||||
const REQUIRED_POWER: usize = 28;
|
|
||||||
|
|
||||||
#[cfg(feature = "smalltest")]
|
|
||||||
const REQUIRED_POWER: usize = 10;
|
|
||||||
#[cfg(feature = "smalltest")]
|
|
||||||
const EMPIRICAL_BATCH_SIZE: usize = 1 << 8;
|
|
||||||
|
|
||||||
// This ceremony is based on the BN256 elliptic curve construction.
|
|
||||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
|
|
||||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128;
|
|
||||||
const G1_COMPRESSED_BYTE_SIZE: usize = 32;
|
|
||||||
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::accumulator::*;
|
|
||||||
use crate::{
|
|
||||||
keypair::{keypair, PublicKey},
|
|
||||||
parameters::{CheckForCorrectness, UseCompression},
|
|
||||||
utils::{power_pairs, same_ratio},
|
|
||||||
};
|
|
||||||
use bellman_ce::pairing::{
|
|
||||||
bn256::{Bn256, Fr, G1Affine, G2Affine},
|
|
||||||
ff::Field,
|
|
||||||
CurveAffine, CurveProjective,
|
|
||||||
};
|
|
||||||
use rand::{thread_rng, Rand, Rng};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_pubkey_serialization() {
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
|
||||||
let (pk, _) = keypair::<_, Bn256>(rng, &digest);
|
|
||||||
let mut v = vec![];
|
|
||||||
pk.serialize(&mut v).unwrap();
|
|
||||||
assert_eq!(v.len(), Bn256CeremonyParameters::PUBLIC_KEY_SIZE);
|
|
||||||
let deserialized = PublicKey::<Bn256>::deserialize(&mut &v[..]).unwrap();
|
|
||||||
assert!(pk == deserialized);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_power_pairs() {
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
|
|
||||||
let mut v = vec![];
|
|
||||||
let x = Fr::rand(rng);
|
|
||||||
let mut acc = Fr::one();
|
|
||||||
for _ in 0..100 {
|
|
||||||
v.push(G1Affine::one().mul(acc).into_affine());
|
|
||||||
acc.mul_assign(&x);
|
|
||||||
}
|
|
||||||
|
|
||||||
let gx = G2Affine::one().mul(x).into_affine();
|
|
||||||
|
|
||||||
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
|
||||||
|
|
||||||
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
|
|
||||||
|
|
||||||
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_same_ratio() {
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
|
|
||||||
let s = Fr::rand(rng);
|
|
||||||
let g1 = G1Affine::one();
|
|
||||||
let g2 = G2Affine::one();
|
|
||||||
let g1_s = g1.mul(s).into_affine();
|
|
||||||
let g2_s = g2.mul(s).into_affine();
|
|
||||||
|
|
||||||
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
|
|
||||||
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_accumulator_serialization() {
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
|
||||||
let params = Bn256CeremonyParameters {};
|
|
||||||
let mut acc = Accumulator::<Bn256, _>::new(params.clone());
|
|
||||||
let before = acc.clone();
|
|
||||||
let (pk, sk) = keypair::<_, Bn256>(rng, &digest);
|
|
||||||
acc.transform(&sk);
|
|
||||||
assert!(verify_transform(&before, &acc, &pk, &digest));
|
|
||||||
digest[0] = !digest[0];
|
|
||||||
assert!(!verify_transform(&before, &acc, &pk, &digest));
|
|
||||||
let mut v = Vec::with_capacity(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
|
|
||||||
acc.serialize(&mut v, UseCompression::No).unwrap();
|
|
||||||
assert_eq!(v.len(), Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
|
|
||||||
let deserialized = Accumulator::deserialize(
|
|
||||||
&mut &v[..],
|
|
||||||
UseCompression::No,
|
|
||||||
CheckForCorrectness::No,
|
|
||||||
params,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert!(acc == deserialized);
|
|
||||||
}
|
|
||||||
}
|
|
@ -9,7 +9,7 @@ use std::io::{self, Read, Write};
|
|||||||
|
|
||||||
use typenum::consts::U64;
|
use typenum::consts::U64;
|
||||||
|
|
||||||
use super::parameters::{DeserializationError, PowersOfTauParameters, UseCompression};
|
use super::parameters::{CeremonyParams, DeserializationError, UseCompression};
|
||||||
use super::utils::{hash_to_g2, write_point};
|
use super::utils::{hash_to_g2, write_point};
|
||||||
|
|
||||||
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
|
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
|
||||||
@ -167,42 +167,43 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
/// This function is intended to write the key to the memory map and calculates
|
/// This function is intended to write the key to the memory map and calculates
|
||||||
/// a position for writing into the file itself based on information whether
|
/// a position for writing into the file itself based on information whether
|
||||||
/// contribution was output in compressed on uncompressed form
|
/// contribution was output in compressed on uncompressed form
|
||||||
pub fn write<P>(
|
pub fn write(
|
||||||
&self,
|
&self,
|
||||||
output_map: &mut MmapMut,
|
output_map: &mut MmapMut,
|
||||||
accumulator_was_compressed: UseCompression,
|
accumulator_was_compressed: UseCompression,
|
||||||
) -> io::Result<()>
|
parameters: &CeremonyParams,
|
||||||
where
|
) -> io::Result<()> {
|
||||||
P: PowersOfTauParameters,
|
|
||||||
{
|
|
||||||
let mut position = match accumulator_was_compressed {
|
let mut position = match accumulator_was_compressed {
|
||||||
UseCompression::Yes => P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE,
|
UseCompression::Yes => parameters.contribution_size - parameters.public_key_size,
|
||||||
UseCompression::No => P::ACCUMULATOR_BYTE_SIZE,
|
UseCompression::No => parameters.accumulator_size,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let g1_size = parameters.curve.g1;
|
||||||
|
let g2_size = parameters.curve.g2;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.tau_g1.0.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.tau_g1.0.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.tau_g1.1.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.tau_g1.1.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.alpha_g1.0.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.alpha_g1.0.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.alpha_g1.1.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.alpha_g1.1.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.beta_g1.0.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.beta_g1.0.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.beta_g1.1.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.beta_g1.1.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.tau_g2.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.tau_g2.into_uncompressed().as_ref())?;
|
||||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
position += g2_size;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.alpha_g2.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.alpha_g2.into_uncompressed().as_ref())?;
|
||||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
position += g2_size;
|
||||||
|
|
||||||
(&mut output_map[position..]).write_all(&self.beta_g2.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.beta_g2.into_uncompressed().as_ref())?;
|
||||||
|
|
||||||
@ -214,13 +215,11 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
/// Deserialize the public key. Points are always in uncompressed form, and
|
/// Deserialize the public key. Points are always in uncompressed form, and
|
||||||
/// always checked, since there aren't very many of them. Does not allow any
|
/// always checked, since there aren't very many of them. Does not allow any
|
||||||
/// points at infinity.
|
/// points at infinity.
|
||||||
pub fn read<P>(
|
pub fn read(
|
||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
accumulator_was_compressed: UseCompression,
|
accumulator_was_compressed: UseCompression,
|
||||||
) -> Result<Self, DeserializationError>
|
parameters: &CeremonyParams,
|
||||||
where
|
) -> Result<Self, DeserializationError> {
|
||||||
P: PowersOfTauParameters,
|
|
||||||
{
|
|
||||||
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
|
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
|
||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
position: usize,
|
position: usize,
|
||||||
@ -241,33 +240,36 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut position = match accumulator_was_compressed {
|
let mut position = match accumulator_was_compressed {
|
||||||
UseCompression::Yes => P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE,
|
UseCompression::Yes => parameters.contribution_size - parameters.public_key_size,
|
||||||
UseCompression::No => P::ACCUMULATOR_BYTE_SIZE,
|
UseCompression::No => parameters.accumulator_size,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let g1_size = parameters.curve.g1;
|
||||||
|
let g2_size = parameters.curve.g2;
|
||||||
|
|
||||||
let tau_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
let tau_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
let tau_g1_s_tau = read_uncompressed::<E, _>(input_map, position)?;
|
let tau_g1_s_tau = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
let alpha_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
let alpha_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
let alpha_g1_s_alpha = read_uncompressed::<E, _>(input_map, position)?;
|
let alpha_g1_s_alpha = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
let beta_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
let beta_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
let beta_g1_s_beta = read_uncompressed::<E, _>(input_map, position)?;
|
let beta_g1_s_beta = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += g1_size;
|
||||||
|
|
||||||
let tau_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
let tau_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
position += g2_size;
|
||||||
|
|
||||||
let alpha_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
let alpha_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
position += g2_size;
|
||||||
|
|
||||||
let beta_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
let beta_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
|
|
||||||
@ -281,3 +283,35 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use rand::{thread_rng, Rng};
|
||||||
|
|
||||||
|
mod bn256 {
|
||||||
|
use super::*;
|
||||||
|
use crate::parameters::{CurveKind, CurveParams};
|
||||||
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_pubkey_serialization() {
|
||||||
|
let curve = CurveParams::new(CurveKind::Bn256);
|
||||||
|
let public_key_size = 6 * curve.g1 + 3 * curve.g2;
|
||||||
|
|
||||||
|
// Generate a random public key
|
||||||
|
let rng = &mut thread_rng();
|
||||||
|
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||||
|
let (pk, _) = keypair::<_, Bn256>(rng, &digest);
|
||||||
|
|
||||||
|
// Serialize it
|
||||||
|
let mut v = vec![];
|
||||||
|
pk.serialize(&mut v).unwrap();
|
||||||
|
assert_eq!(v.len(), public_key_size);
|
||||||
|
|
||||||
|
// Deserialize it and check that it matchesj
|
||||||
|
let deserialized = PublicKey::<Bn256>::deserialize(&mut &v[..]).unwrap();
|
||||||
|
assert!(pk == deserialized);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
pub mod accumulator;
|
|
||||||
pub mod batched_accumulator;
|
pub mod batched_accumulator;
|
||||||
pub mod bn256;
|
|
||||||
pub mod keypair;
|
pub mod keypair;
|
||||||
pub mod parameters;
|
pub mod parameters;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
@ -2,42 +2,125 @@ use bellman_ce::pairing::GroupDecodingError;
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
pub trait PowersOfTauParameters: Clone {
|
/// The sizes of the group elements of a curev
|
||||||
const REQUIRED_POWER: usize;
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
|
pub struct CurveParams {
|
||||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize;
|
pub g1: usize,
|
||||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize;
|
pub g2: usize,
|
||||||
const G1_COMPRESSED_BYTE_SIZE: usize;
|
pub g1_compressed: usize,
|
||||||
const G2_COMPRESSED_BYTE_SIZE: usize;
|
pub g2_compressed: usize,
|
||||||
|
|
||||||
const TAU_POWERS_LENGTH: usize = (1 << Self::REQUIRED_POWER);
|
|
||||||
|
|
||||||
const TAU_POWERS_G1_LENGTH: usize = (Self::TAU_POWERS_LENGTH << 1) - 1;
|
|
||||||
|
|
||||||
const ACCUMULATOR_BYTE_SIZE: usize = (Self::TAU_POWERS_G1_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) + // g1 tau powers
|
|
||||||
(Self::TAU_POWERS_LENGTH * Self::G2_UNCOMPRESSED_BYTE_SIZE) + // g2 tau powers
|
|
||||||
(Self::TAU_POWERS_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) + // alpha tau powers
|
|
||||||
(Self::TAU_POWERS_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) // beta tau powers
|
|
||||||
+ Self::G2_UNCOMPRESSED_BYTE_SIZE // beta in g2
|
|
||||||
+ Self::HASH_SIZE; // blake2b hash of previous contribution
|
|
||||||
|
|
||||||
const PUBLIC_KEY_SIZE: usize = 3 * Self::G2_UNCOMPRESSED_BYTE_SIZE + // tau, alpha, and beta in g2
|
|
||||||
6 * Self::G1_UNCOMPRESSED_BYTE_SIZE; // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1
|
|
||||||
|
|
||||||
const CONTRIBUTION_BYTE_SIZE: usize = (Self::TAU_POWERS_G1_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) + // g1 tau powers
|
|
||||||
(Self::TAU_POWERS_LENGTH * Self::G2_COMPRESSED_BYTE_SIZE) + // g2 tau powers
|
|
||||||
(Self::TAU_POWERS_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) + // alpha tau powers
|
|
||||||
(Self::TAU_POWERS_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) // beta tau powers
|
|
||||||
+ Self::G2_COMPRESSED_BYTE_SIZE // beta in g2
|
|
||||||
+ Self::HASH_SIZE // blake2b hash of input accumulator
|
|
||||||
+ Self::PUBLIC_KEY_SIZE; // public key
|
|
||||||
|
|
||||||
// Blake2b hash size
|
|
||||||
const HASH_SIZE: usize = 64;
|
|
||||||
|
|
||||||
const EMPIRICAL_BATCH_SIZE: usize = 1 << 21;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The types of curves we support
|
||||||
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
|
pub enum CurveKind {
|
||||||
|
Bn256,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CurveParams {
|
||||||
|
/// Creates a new curve based on the provided CurveKind
|
||||||
|
pub fn new(kind: CurveKind) -> Self {
|
||||||
|
let (g1, g2) = match kind {
|
||||||
|
CurveKind::Bn256 => (64, 128),
|
||||||
|
};
|
||||||
|
|
||||||
|
CurveParams {
|
||||||
|
g1,
|
||||||
|
g2,
|
||||||
|
g1_compressed: g1 / 2,
|
||||||
|
g2_compressed: g2 / 2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
|
/// The parameters used for the trusted setup ceremony
|
||||||
|
pub struct CeremonyParams {
|
||||||
|
/// The type of the curve being used (currently only supports BN256)
|
||||||
|
pub curve: CurveParams,
|
||||||
|
/// The number of Powers of Tau G1 elements which will be accumulated
|
||||||
|
pub powers_g1_length: usize,
|
||||||
|
/// The number of Powers of Tau Alpha/Beta/G2 elements which will be accumulated
|
||||||
|
pub powers_length: usize,
|
||||||
|
/// The circuit size exponent (ie length will be 2^size), depends on the computation you want to support
|
||||||
|
pub size: usize,
|
||||||
|
/// The empirical batch size for the batched accumulator.
|
||||||
|
/// This is a hyper parameter and may be different for each
|
||||||
|
/// curve.
|
||||||
|
pub batch_size: usize,
|
||||||
|
// Size of the used public key
|
||||||
|
pub public_key_size: usize,
|
||||||
|
/// Total size of the accumulator used for the ceremony
|
||||||
|
pub accumulator_size: usize,
|
||||||
|
/// Total size of the contribution
|
||||||
|
pub contribution_size: usize,
|
||||||
|
/// Size of the hash of the previous contribution
|
||||||
|
pub hash_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CeremonyParams {
|
||||||
|
/// Constructs a new ceremony parameters object from the type of provided curve
|
||||||
|
pub fn new(kind: CurveKind, size: usize, batch_size: usize) -> Self {
|
||||||
|
// create the curve
|
||||||
|
let curve = CurveParams::new(kind);
|
||||||
|
Self::new_with_curve(curve, size, batch_size)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Constructs a new ceremony parameters object from the directly provided curve with parameters
|
||||||
|
/// Consider using the `new` method if you want to use one of the pre-implemented curves
|
||||||
|
pub fn new_with_curve(curve: CurveParams, size: usize, batch_size: usize) -> Self {
|
||||||
|
// asume we're using a 64 byte long hash function such as Blake
|
||||||
|
let hash_size = 64;
|
||||||
|
|
||||||
|
// 2^{size}
|
||||||
|
let powers_length = 1 << size;
|
||||||
|
// 2^{size+1} - 1
|
||||||
|
let powers_g1_length = (powers_length << 1) - 1;
|
||||||
|
|
||||||
|
let accumulator_size =
|
||||||
|
// G1 Tau powers
|
||||||
|
powers_g1_length * curve.g1 +
|
||||||
|
// G2 Tau Powers + Alpha Tau powers + Beta Tau powers
|
||||||
|
powers_length * (curve.g2 + (curve.g1 * 2)) +
|
||||||
|
// Beta in G2
|
||||||
|
curve.g2 +
|
||||||
|
// Hash of the previous contribution
|
||||||
|
hash_size;
|
||||||
|
|
||||||
|
let public_key_size =
|
||||||
|
// tau, alpha, beta in g2
|
||||||
|
3 * curve.g2 +
|
||||||
|
// (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1
|
||||||
|
6 * curve.g1;
|
||||||
|
|
||||||
|
let contribution_size =
|
||||||
|
// G1 Tau powers (compressed)
|
||||||
|
powers_g1_length * curve.g1_compressed +
|
||||||
|
// G2 Tau Powers + Alpha Tau powers + Beta Tau powers (compressed)
|
||||||
|
powers_length * (curve.g2_compressed + (curve.g1_compressed * 2)) +
|
||||||
|
// Beta in G2
|
||||||
|
curve.g2_compressed +
|
||||||
|
// Hash of the previous contribution
|
||||||
|
hash_size +
|
||||||
|
// The public key of the previous contributor
|
||||||
|
public_key_size;
|
||||||
|
|
||||||
|
Self {
|
||||||
|
curve,
|
||||||
|
size,
|
||||||
|
batch_size,
|
||||||
|
accumulator_size,
|
||||||
|
public_key_size,
|
||||||
|
contribution_size,
|
||||||
|
hash_size,
|
||||||
|
powers_length,
|
||||||
|
powers_g1_length,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Add tests!
|
||||||
|
|
||||||
/// Determines if point compression should be used.
|
/// Determines if point compression should be used.
|
||||||
#[derive(Copy, Clone, PartialEq)]
|
#[derive(Copy, Clone, PartialEq)]
|
||||||
pub enum UseCompression {
|
pub enum UseCompression {
|
||||||
|
@ -31,12 +31,13 @@ pub fn hash_to_g2<E: Engine>(mut digest: &[u8]) -> E::G2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod bn256_tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::{Bn256, Fr, G1Affine, G2Affine};
|
||||||
|
use rand::{thread_rng, Rand};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_hash_to_g2() {
|
fn test_hash_to_g2_bn256() {
|
||||||
assert!(
|
assert!(
|
||||||
hash_to_g2::<Bn256>(&[
|
hash_to_g2::<Bn256>(&[
|
||||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||||
@ -57,6 +58,41 @@ mod tests {
|
|||||||
])
|
])
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_same_ratio_bn256() {
|
||||||
|
let rng = &mut thread_rng();
|
||||||
|
|
||||||
|
let s = Fr::rand(rng);
|
||||||
|
let g1 = G1Affine::one();
|
||||||
|
let g2 = G2Affine::one();
|
||||||
|
let g1_s = g1.mul(s).into_affine();
|
||||||
|
let g2_s = g2.mul(s).into_affine();
|
||||||
|
|
||||||
|
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
|
||||||
|
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_power_pairs() {
|
||||||
|
let rng = &mut thread_rng();
|
||||||
|
|
||||||
|
let mut v = vec![];
|
||||||
|
let x = Fr::rand(rng);
|
||||||
|
let mut acc = Fr::one();
|
||||||
|
for _ in 0..100 {
|
||||||
|
v.push(G1Affine::one().mul(acc).into_affine());
|
||||||
|
acc.mul_assign(&x);
|
||||||
|
}
|
||||||
|
|
||||||
|
let gx = G2Affine::one().mul(x).into_affine();
|
||||||
|
|
||||||
|
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||||
|
|
||||||
|
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
|
||||||
|
|
||||||
|
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(
|
fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(
|
||||||
|
@ -8,18 +8,21 @@ rm tmp_*
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
cargo run --release --features smalltest --bin new_constrained challenge1
|
SIZE=10
|
||||||
yes | cargo run --release --features smalltest --bin compute_constrained challenge1 response1
|
BATCH=256
|
||||||
cargo run --release --features smalltest --bin verify_transform_constrained challenge1 response1 challenge2
|
|
||||||
|
|
||||||
yes | cargo run --release --features smalltest --bin compute_constrained challenge2 response2
|
cargo run --release --bin new_constrained challenge1 $SIZE $BATCH
|
||||||
cargo run --release --features smalltest --bin verify_transform_constrained challenge2 response2 challenge3
|
yes | cargo run --release --bin compute_constrained challenge1 response1 $SIZE $BATCH
|
||||||
|
cargo run --release --bin verify_transform_constrained challenge1 response1 challenge2 $SIZE $BATCH
|
||||||
|
|
||||||
yes | cargo run --release --features smalltest --bin compute_constrained challenge3 response3
|
yes | cargo run --release --bin compute_constrained challenge2 response2 $SIZE $BATCH
|
||||||
cargo run --release --features smalltest --bin verify_transform_constrained challenge3 response3 challenge4
|
cargo run --release --bin verify_transform_constrained challenge2 response2 challenge3 $SIZE $BATCH
|
||||||
|
|
||||||
cargo run --release --features smalltest --bin beacon_constrained challenge4 response4
|
yes | cargo run --release --bin compute_constrained challenge3 response3 $SIZE $BATCH
|
||||||
cargo run --release --features smalltest --bin verify_transform_constrained challenge4 response4 challenge5
|
cargo run --release --bin verify_transform_constrained challenge3 response3 challenge4 $SIZE $BATCH
|
||||||
|
|
||||||
|
cargo run --release --bin beacon_constrained challenge4 response4 $SIZE $BATCH
|
||||||
|
cargo run --release --bin verify_transform_constrained challenge4 response4 challenge5 $SIZE $BATCH
|
||||||
|
|
||||||
cat response1 response2 response3 response4 > transcript
|
cat response1 response2 response3 response4 > transcript
|
||||||
cargo run --release --features smalltest --bin verify transcript
|
cargo run --release --bin verify transcript $SIZE $BATCH
|
||||||
|
Loading…
Reference in New Issue
Block a user