chore: cargo fmt + make clippy happy (#9)
This commit is contained in:
parent
b3c18de8a6
commit
32bbd5f35c
@ -62,6 +62,6 @@ pub fn verify_proof<'a, E: Engine>(
|
|||||||
(&proof.a.prepare(), &proof.b.prepare()),
|
(&proof.a.prepare(), &proof.b.prepare()),
|
||||||
(&acc.into_affine().prepare(), &pvk.neg_gamma_g2),
|
(&acc.into_affine().prepare(), &pvk.neg_gamma_g2),
|
||||||
(&proof.c.prepare(), &pvk.neg_delta_g2)
|
(&proof.c.prepare(), &pvk.neg_delta_g2)
|
||||||
].into_iter())
|
].iter())
|
||||||
).unwrap() == pvk.alpha_g1_beta_g2)
|
).unwrap() == pvk.alpha_g1_beta_g2)
|
||||||
}
|
}
|
||||||
|
@ -104,7 +104,7 @@ pub trait Engine: ScalarEngine {
|
|||||||
G2: Into<Self::G2Affine>,
|
G2: Into<Self::G2Affine>,
|
||||||
{
|
{
|
||||||
Self::final_exponentiation(&Self::miller_loop(
|
Self::final_exponentiation(&Self::miller_loop(
|
||||||
[(&(p.into().prepare()), &(q.into().prepare()))].into_iter(),
|
[(&(p.into().prepare()), &(q.into().prepare()))].iter(),
|
||||||
)).unwrap()
|
)).unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
6
powersoftau/Cargo.lock
generated
6
powersoftau/Cargo.lock
generated
@ -216,7 +216,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hex-literal"
|
name = "hex-literal"
|
||||||
version = "0.1.3"
|
version = "0.1.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -317,7 +317,7 @@ dependencies = [
|
|||||||
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"exitcode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"exitcode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hex-literal 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -505,7 +505,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
"checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
|
"checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
|
||||||
"checksum generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fceb69994e330afed50c93524be68c42fa898c2d9fd4ee8da03bd7363acd26f2"
|
"checksum generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fceb69994e330afed50c93524be68c42fa898c2d9fd4ee8da03bd7363acd26f2"
|
||||||
"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
|
"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
|
||||||
"checksum hex-literal 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "27455ce8b4a6666c87220e4b59c9a83995476bdadc10197905e61dbe906e36fa"
|
"checksum hex-literal 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ddc2928beef125e519d69ae1baa8c37ea2e0d3848545217f6db0179c5eb1d639"
|
||||||
"checksum hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d340b6514f232f6db1bd16db65302a5278a04fef9ce867cb932e7e5fa21130a"
|
"checksum hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d340b6514f232f6db1bd16db65302a5278a04fef9ce867cb932e7e5fa21130a"
|
||||||
"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358"
|
"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358"
|
||||||
"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
|
"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
|
||||||
|
@ -18,7 +18,7 @@ blake2 = "0.6.1"
|
|||||||
generic-array = "0.8.3"
|
generic-array = "0.8.3"
|
||||||
typenum = "1.9.0"
|
typenum = "1.9.0"
|
||||||
byteorder = "1.1.0"
|
byteorder = "1.1.0"
|
||||||
hex-literal = "0.1"
|
hex-literal = "0.1.4"
|
||||||
rust-crypto = "0.2"
|
rust-crypto = "0.2"
|
||||||
exitcode = "1.1.2"
|
exitcode = "1.1.2"
|
||||||
|
|
||||||
|
@ -25,33 +25,23 @@
|
|||||||
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
|
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
|
||||||
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
|
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
|
||||||
//! public parameters for all circuits within a bounded size.
|
//! public parameters for all circuits within a bounded size.
|
||||||
extern crate rand;
|
use bellman_ce::pairing::{
|
||||||
extern crate crossbeam;
|
ff::{Field, PrimeField},
|
||||||
extern crate num_cpus;
|
CurveAffine, CurveProjective, EncodedPoint, Engine, Wnaf,
|
||||||
extern crate blake2;
|
};
|
||||||
extern crate generic_array;
|
use blake2::{Blake2b, Digest};
|
||||||
extern crate typenum;
|
|
||||||
extern crate byteorder;
|
use generic_array::GenericArray;
|
||||||
extern crate bellman_ce;
|
|
||||||
extern crate memmap;
|
|
||||||
|
|
||||||
use memmap::{Mmap, MmapMut};
|
|
||||||
use bellman_ce::pairing::ff::{Field, PrimeField};
|
|
||||||
use byteorder::{ReadBytesExt, BigEndian};
|
|
||||||
use rand::{SeedableRng, Rng, Rand};
|
|
||||||
use rand::chacha::ChaChaRng;
|
|
||||||
use bellman_ce::pairing::bn256::{Bn256};
|
|
||||||
use bellman_ce::pairing::*;
|
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use generic_array::GenericArray;
|
|
||||||
use typenum::consts::U64;
|
use typenum::consts::U64;
|
||||||
use blake2::{Blake2b, Digest};
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use super::keypair::*;
|
use super::keypair::{PrivateKey, PublicKey};
|
||||||
use super::utils::*;
|
use super::parameters::{
|
||||||
use super::parameters::*;
|
CheckForCorrectness, DeserializationError, PowersOfTauParameters, UseCompression,
|
||||||
|
};
|
||||||
|
use super::utils::{hash_to_g2, power_pairs, same_ratio, write_point};
|
||||||
|
|
||||||
/// The `Accumulator` is an object that participants of the ceremony contribute
|
/// The `Accumulator` is an object that participants of the ceremony contribute
|
||||||
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
|
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
|
||||||
@ -73,20 +63,20 @@ pub struct Accumulator<E: Engine, P: PowersOfTauParameters> {
|
|||||||
/// beta
|
/// beta
|
||||||
pub beta_g2: E::G2Affine,
|
pub beta_g2: E::G2Affine,
|
||||||
/// Keep parameters here
|
/// Keep parameters here
|
||||||
pub parameters: P
|
pub parameters: P,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: Engine, P: PowersOfTauParameters> PartialEq for Accumulator<E, P> {
|
impl<E: Engine, P: PowersOfTauParameters> PartialEq for Accumulator<E, P> {
|
||||||
fn eq(&self, other: &Accumulator<E, P>) -> bool {
|
fn eq(&self, other: &Accumulator<E, P>) -> bool {
|
||||||
self.tau_powers_g1.eq(&other.tau_powers_g1) &&
|
self.tau_powers_g1.eq(&other.tau_powers_g1)
|
||||||
self.tau_powers_g2.eq(&other.tau_powers_g2) &&
|
&& self.tau_powers_g2.eq(&other.tau_powers_g2)
|
||||||
self.alpha_tau_powers_g1.eq(&other.alpha_tau_powers_g1) &&
|
&& self.alpha_tau_powers_g1.eq(&other.alpha_tau_powers_g1)
|
||||||
self.beta_tau_powers_g1.eq(&other.beta_tau_powers_g1) &&
|
&& self.beta_tau_powers_g1.eq(&other.beta_tau_powers_g1)
|
||||||
self.beta_g2 == other.beta_g2
|
&& self.beta_g2 == other.beta_g2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
impl<E: Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
||||||
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
|
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
|
||||||
pub fn new(parameters: P) -> Self {
|
pub fn new(parameters: P) -> Self {
|
||||||
Accumulator {
|
Accumulator {
|
||||||
@ -95,7 +85,7 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
alpha_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
|
alpha_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
|
||||||
beta_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
|
beta_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
|
||||||
beta_g2: E::G2Affine::one(),
|
beta_g2: E::G2Affine::one(),
|
||||||
parameters: parameters
|
parameters,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,15 +93,13 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
pub fn serialize<W: Write>(
|
pub fn serialize<W: Write>(
|
||||||
&self,
|
&self,
|
||||||
writer: &mut W,
|
writer: &mut W,
|
||||||
compression: UseCompression
|
compression: UseCompression,
|
||||||
) -> io::Result<()>
|
) -> io::Result<()> {
|
||||||
{
|
|
||||||
fn write_all<W: Write, C: CurveAffine>(
|
fn write_all<W: Write, C: CurveAffine>(
|
||||||
writer: &mut W,
|
writer: &mut W,
|
||||||
c: &[C],
|
c: &[C],
|
||||||
compression: UseCompression
|
compression: UseCompression,
|
||||||
) -> io::Result<()>
|
) -> io::Result<()> {
|
||||||
{
|
|
||||||
for c in c {
|
for c in c {
|
||||||
write_point(writer, c, compression)?;
|
write_point(writer, c, compression)?;
|
||||||
}
|
}
|
||||||
@ -135,22 +123,19 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
compression: UseCompression,
|
compression: UseCompression,
|
||||||
checked: CheckForCorrectness,
|
checked: CheckForCorrectness,
|
||||||
parameters: P
|
parameters: P,
|
||||||
) -> Result<Self, DeserializationError>
|
) -> Result<Self, DeserializationError> {
|
||||||
{
|
fn read_all<EE: Engine, R: Read, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
|
||||||
fn read_all<EE: Engine, R: Read, C: CurveAffine<Engine = EE, Scalar = EE::Fr> > (
|
|
||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
size: usize,
|
size: usize,
|
||||||
compression: UseCompression,
|
compression: UseCompression,
|
||||||
checked: CheckForCorrectness
|
checked: CheckForCorrectness,
|
||||||
) -> Result<Vec<C>, DeserializationError>
|
) -> Result<Vec<C>, DeserializationError> {
|
||||||
{
|
|
||||||
fn decompress_all<R: Read, ENC: EncodedPoint>(
|
fn decompress_all<R: Read, ENC: EncodedPoint>(
|
||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
size: usize,
|
size: usize,
|
||||||
checked: CheckForCorrectness
|
checked: CheckForCorrectness,
|
||||||
) -> Result<Vec<ENC::Affine>, DeserializationError>
|
) -> Result<Vec<ENC::Affine>, DeserializationError> {
|
||||||
{
|
|
||||||
// Read the encoded elements
|
// Read the encoded elements
|
||||||
let mut res = vec![ENC::empty(); size];
|
let mut res = vec![ENC::empty(); size];
|
||||||
|
|
||||||
@ -171,7 +156,10 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
let decoding_error = Arc::new(Mutex::new(None));
|
let decoding_error = Arc::new(Mutex::new(None));
|
||||||
|
|
||||||
crossbeam::scope(|scope| {
|
crossbeam::scope(|scope| {
|
||||||
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
|
for (source, target) in res
|
||||||
|
.chunks(chunk_size)
|
||||||
|
.zip(res_affine.chunks_mut(chunk_size))
|
||||||
|
{
|
||||||
let decoding_error = decoding_error.clone();
|
let decoding_error = decoding_error.clone();
|
||||||
|
|
||||||
scope.spawn(move || {
|
scope.spawn(move || {
|
||||||
@ -185,21 +173,24 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
match checked {
|
match checked {
|
||||||
CheckForCorrectness::Yes => {
|
CheckForCorrectness::Yes => {
|
||||||
// Points at infinity are never expected in the accumulator
|
// Points at infinity are never expected in the accumulator
|
||||||
source.into_affine().map_err(|e| e.into()).and_then(|source| {
|
source.into_affine().map_err(|e| e.into()).and_then(
|
||||||
if source.is_zero() {
|
|source| {
|
||||||
Err(DeserializationError::PointAtInfinity)
|
if source.is_zero() {
|
||||||
} else {
|
Err(DeserializationError::PointAtInfinity)
|
||||||
Ok(source)
|
} else {
|
||||||
}
|
Ok(source)
|
||||||
})
|
}
|
||||||
},
|
},
|
||||||
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
|
)
|
||||||
|
}
|
||||||
|
CheckForCorrectness::No => {
|
||||||
|
source.into_affine_unchecked().map_err(|e| e.into())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
} {
|
||||||
{
|
|
||||||
Ok(source) => {
|
Ok(source) => {
|
||||||
*target = source;
|
*target = source;
|
||||||
},
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
*decoding_error.lock().unwrap() = Some(e);
|
*decoding_error.lock().unwrap() = Some(e);
|
||||||
}
|
}
|
||||||
@ -209,41 +200,44 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
|
match Arc::try_unwrap(decoding_error)
|
||||||
Some(e) => {
|
.unwrap()
|
||||||
Err(e)
|
.into_inner()
|
||||||
},
|
.unwrap()
|
||||||
None => {
|
{
|
||||||
Ok(res_affine)
|
Some(e) => Err(e),
|
||||||
}
|
None => Ok(res_affine),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match compression {
|
match compression {
|
||||||
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
|
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
|
||||||
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked)
|
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_G1_LENGTH, compression, checked)?;
|
let tau_powers_g1 =
|
||||||
let tau_powers_g2 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
read_all::<E, _, _>(reader, P::TAU_POWERS_G1_LENGTH, compression, checked)?;
|
||||||
let alpha_tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
let tau_powers_g2 =
|
||||||
let beta_tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
||||||
|
let alpha_tau_powers_g1 =
|
||||||
|
read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
||||||
|
let beta_tau_powers_g1 =
|
||||||
|
read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
||||||
let beta_g2 = read_all::<E, _, _>(reader, 1, compression, checked)?[0];
|
let beta_g2 = read_all::<E, _, _>(reader, 1, compression, checked)?[0];
|
||||||
|
|
||||||
Ok(Accumulator {
|
Ok(Accumulator {
|
||||||
tau_powers_g1: tau_powers_g1,
|
tau_powers_g1,
|
||||||
tau_powers_g2: tau_powers_g2,
|
tau_powers_g2,
|
||||||
alpha_tau_powers_g1: alpha_tau_powers_g1,
|
alpha_tau_powers_g1,
|
||||||
beta_tau_powers_g1: beta_tau_powers_g1,
|
beta_tau_powers_g1,
|
||||||
beta_g2: beta_g2,
|
beta_g2,
|
||||||
parameters: parameters
|
parameters,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Transforms the accumulator with a private key.
|
/// Transforms the accumulator with a private key.
|
||||||
pub fn transform(&mut self, key: &PrivateKey<E>)
|
pub fn transform(&mut self, key: &PrivateKey<E>) {
|
||||||
{
|
|
||||||
// Construct the powers of tau
|
// Construct the powers of tau
|
||||||
let mut taupowers = vec![E::Fr::zero(); P::TAU_POWERS_G1_LENGTH];
|
let mut taupowers = vec![E::Fr::zero(); P::TAU_POWERS_G1_LENGTH];
|
||||||
let chunk_size = P::TAU_POWERS_G1_LENGTH / num_cpus::get();
|
let chunk_size = P::TAU_POWERS_G1_LENGTH / num_cpus::get();
|
||||||
@ -264,30 +258,35 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
|
|
||||||
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
||||||
/// exponent.
|
/// exponent.
|
||||||
fn batch_exp<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr> >(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
|
fn batch_exp<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
|
||||||
|
bases: &mut [C],
|
||||||
|
exp: &[C::Scalar],
|
||||||
|
coeff: Option<&C::Scalar>,
|
||||||
|
) {
|
||||||
assert_eq!(bases.len(), exp.len());
|
assert_eq!(bases.len(), exp.len());
|
||||||
let mut projective = vec![C::Projective::zero(); bases.len()];
|
let mut projective = vec![C::Projective::zero(); bases.len()];
|
||||||
let chunk_size = bases.len() / num_cpus::get();
|
let chunk_size = bases.len() / num_cpus::get();
|
||||||
|
|
||||||
// Perform wNAF over multiple cores, placing results into `projective`.
|
// Perform wNAF over multiple cores, placing results into `projective`.
|
||||||
crossbeam::scope(|scope| {
|
crossbeam::scope(|scope| {
|
||||||
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
|
for ((bases, exp), projective) in bases
|
||||||
.zip(exp.chunks(chunk_size))
|
.chunks_mut(chunk_size)
|
||||||
.zip(projective.chunks_mut(chunk_size))
|
.zip(exp.chunks(chunk_size))
|
||||||
|
.zip(projective.chunks_mut(chunk_size))
|
||||||
{
|
{
|
||||||
scope.spawn(move || {
|
scope.spawn(move || {
|
||||||
let mut wnaf = Wnaf::new();
|
let mut wnaf = Wnaf::new();
|
||||||
|
|
||||||
for ((base, exp), projective) in bases.iter_mut()
|
for ((base, exp), projective) in
|
||||||
.zip(exp.iter())
|
bases.iter_mut().zip(exp.iter()).zip(projective.iter_mut())
|
||||||
.zip(projective.iter_mut())
|
|
||||||
{
|
{
|
||||||
let mut exp = *exp;
|
let mut exp = *exp;
|
||||||
if let Some(coeff) = coeff {
|
if let Some(coeff) = coeff {
|
||||||
exp.mul_assign(coeff);
|
exp.mul_assign(coeff);
|
||||||
}
|
}
|
||||||
|
|
||||||
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
|
*projective =
|
||||||
|
wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -295,8 +294,7 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
|
|
||||||
// Perform batch normalization
|
// Perform batch normalization
|
||||||
crossbeam::scope(|scope| {
|
crossbeam::scope(|scope| {
|
||||||
for projective in projective.chunks_mut(chunk_size)
|
for projective in projective.chunks_mut(chunk_size) {
|
||||||
{
|
|
||||||
scope.spawn(move || {
|
scope.spawn(move || {
|
||||||
C::Projective::batch_normalization(projective);
|
C::Projective::batch_normalization(projective);
|
||||||
});
|
});
|
||||||
@ -310,16 +308,32 @@ impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
batch_exp::<E, _>(&mut self.tau_powers_g1, &taupowers[0..], None);
|
batch_exp::<E, _>(&mut self.tau_powers_g1, &taupowers[0..], None);
|
||||||
batch_exp::<E, _>(&mut self.tau_powers_g2, &taupowers[0..P::TAU_POWERS_LENGTH], None);
|
batch_exp::<E, _>(
|
||||||
batch_exp::<E, _>(&mut self.alpha_tau_powers_g1, &taupowers[0..P::TAU_POWERS_LENGTH], Some(&key.alpha));
|
&mut self.tau_powers_g2,
|
||||||
batch_exp::<E, _>(&mut self.beta_tau_powers_g1, &taupowers[0..P::TAU_POWERS_LENGTH], Some(&key.beta));
|
&taupowers[0..P::TAU_POWERS_LENGTH],
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
batch_exp::<E, _>(
|
||||||
|
&mut self.alpha_tau_powers_g1,
|
||||||
|
&taupowers[0..P::TAU_POWERS_LENGTH],
|
||||||
|
Some(&key.alpha),
|
||||||
|
);
|
||||||
|
batch_exp::<E, _>(
|
||||||
|
&mut self.beta_tau_powers_g1,
|
||||||
|
&taupowers[0..P::TAU_POWERS_LENGTH],
|
||||||
|
Some(&key.beta),
|
||||||
|
);
|
||||||
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
|
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||||
pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(before: &Accumulator<E, P>, after: &Accumulator<E, P>, key: &PublicKey<E>, digest: &[u8]) -> bool
|
pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(
|
||||||
{
|
before: &Accumulator<E, P>,
|
||||||
|
after: &Accumulator<E, P>,
|
||||||
|
key: &PublicKey<E>,
|
||||||
|
digest: &[u8],
|
||||||
|
) -> bool {
|
||||||
assert_eq!(digest.len(), 64);
|
assert_eq!(digest.len(), 64);
|
||||||
|
|
||||||
let compute_g2_s = |g1_s: E::G1Affine, g1_s_x: E::G1Affine, personalization: u8| {
|
let compute_g2_s = |g1_s: E::G1Affine, g1_s_x: E::G1Affine, personalization: u8| {
|
||||||
@ -336,7 +350,7 @@ pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(before: &Accumulato
|
|||||||
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
|
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
|
||||||
|
|
||||||
// Check the proofs-of-knowledge for tau/alpha/beta
|
// Check the proofs-of-knowledge for tau/alpha/beta
|
||||||
|
|
||||||
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
|
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
|
||||||
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
||||||
return false;
|
return false;
|
||||||
@ -357,54 +371,76 @@ pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(before: &Accumulato
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Did the participant multiply the previous tau by the new one?
|
// Did the participant multiply the previous tau by the new one?
|
||||||
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
|
if !same_ratio(
|
||||||
|
(before.tau_powers_g1[1], after.tau_powers_g1[1]),
|
||||||
|
(tau_g2_s, key.tau_g2),
|
||||||
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Did the participant multiply the previous alpha by the new one?
|
// Did the participant multiply the previous alpha by the new one?
|
||||||
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
|
if !same_ratio(
|
||||||
|
(before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]),
|
||||||
|
(alpha_g2_s, key.alpha_g2),
|
||||||
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Did the participant multiply the previous beta by the new one?
|
// Did the participant multiply the previous beta by the new one?
|
||||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
|
if !same_ratio(
|
||||||
|
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
|
||||||
|
(beta_g2_s, key.beta_g2),
|
||||||
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
|
if !same_ratio(
|
||||||
|
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
|
||||||
|
(before.beta_g2, after.beta_g2),
|
||||||
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Are the powers of tau correct?
|
// Are the powers of tau correct?
|
||||||
if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
if !same_ratio(
|
||||||
|
power_pairs(&after.tau_powers_g1),
|
||||||
|
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
|
||||||
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) {
|
if !same_ratio(
|
||||||
|
power_pairs(&after.tau_powers_g2),
|
||||||
|
(after.tau_powers_g1[0], after.tau_powers_g1[1]),
|
||||||
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
if !same_ratio(
|
||||||
|
power_pairs(&after.alpha_tau_powers_g1),
|
||||||
|
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
|
||||||
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
if !same_ratio(
|
||||||
|
power_pairs(&after.beta_tau_powers_g1),
|
||||||
|
(after.tau_powers_g2[0], after.tau_powers_g2[1]),
|
||||||
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Abstraction over a reader which hashes the data being read.
|
/// Abstraction over a reader which hashes the data being read.
|
||||||
pub struct HashReader<R: Read> {
|
pub struct HashReader<R: Read> {
|
||||||
reader: R,
|
reader: R,
|
||||||
hasher: Blake2b
|
hasher: Blake2b,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<R: Read> HashReader<R> {
|
impl<R: Read> HashReader<R> {
|
||||||
/// Construct a new `HashReader` given an existing `reader` by value.
|
/// Construct a new `HashReader` given an existing `reader` by value.
|
||||||
pub fn new(reader: R) -> Self {
|
pub fn new(reader: R) -> Self {
|
||||||
HashReader {
|
HashReader {
|
||||||
reader: reader,
|
reader,
|
||||||
hasher: Blake2b::default()
|
hasher: Blake2b::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -429,15 +465,15 @@ impl<R: Read> Read for HashReader<R> {
|
|||||||
/// Abstraction over a writer which hashes the data being written.
|
/// Abstraction over a writer which hashes the data being written.
|
||||||
pub struct HashWriter<W: Write> {
|
pub struct HashWriter<W: Write> {
|
||||||
writer: W,
|
writer: W,
|
||||||
hasher: Blake2b
|
hasher: Blake2b,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<W: Write> HashWriter<W> {
|
impl<W: Write> HashWriter<W> {
|
||||||
/// Construct a new `HashWriter` given an existing `writer` by value.
|
/// Construct a new `HashWriter` given an existing `writer` by value.
|
||||||
pub fn new(writer: W) -> Self {
|
pub fn new(writer: W) -> Self {
|
||||||
HashWriter {
|
HashWriter {
|
||||||
writer: writer,
|
writer,
|
||||||
hasher: Blake2b::default()
|
hasher: Blake2b::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,20 +1,12 @@
|
|||||||
extern crate powersoftau;
|
use powersoftau::bn256::Bn256CeremonyParameters;
|
||||||
extern crate bellman_ce;
|
|
||||||
extern crate memmap;
|
|
||||||
extern crate rand;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate crypto;
|
|
||||||
|
|
||||||
use powersoftau::bn256::{Bn256CeremonyParameters};
|
use powersoftau::batched_accumulator::BatchedAccumulator;
|
||||||
|
use powersoftau::keypair::keypair;
|
||||||
|
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
|
||||||
|
|
||||||
use powersoftau::batched_accumulator::{BatchedAccumulator};
|
|
||||||
use powersoftau::keypair::{keypair};
|
|
||||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
|
||||||
|
|
||||||
use std::fs::OpenOptions;
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use memmap::*;
|
use memmap::*;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
@ -27,7 +19,7 @@ const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No;
|
|||||||
const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
|
const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
|
||||||
const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
|
const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
|
||||||
|
|
||||||
|
#[allow(clippy::modulo_one)]
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 3 {
|
if args.len() != 3 {
|
||||||
@ -37,29 +29,36 @@ fn main() {
|
|||||||
let challenge_filename = &args[1];
|
let challenge_filename = &args[1];
|
||||||
let response_filename = &args[2];
|
let response_filename = &args[2];
|
||||||
|
|
||||||
println!("Will contribute a random beacon to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
println!(
|
||||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
"Will contribute a random beacon to accumulator for 2^{} powers of tau",
|
||||||
|
Bn256CeremonyParameters::REQUIRED_POWER
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"In total will generate up to {} powers",
|
||||||
|
Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
|
||||||
|
);
|
||||||
|
|
||||||
// Create an RNG based on the outcome of the random beacon
|
// Create an RNG based on the outcome of the random beacon
|
||||||
let mut rng = {
|
let mut rng = {
|
||||||
use byteorder::{ReadBytesExt, BigEndian};
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
use rand::{SeedableRng};
|
|
||||||
use rand::chacha::ChaChaRng;
|
|
||||||
use crypto::sha2::Sha256;
|
|
||||||
use crypto::digest::Digest;
|
use crypto::digest::Digest;
|
||||||
|
use crypto::sha2::Sha256;
|
||||||
|
use rand::chacha::ChaChaRng;
|
||||||
|
use rand::SeedableRng;
|
||||||
|
|
||||||
// Place block hash here (block number #564321)
|
// Place block hash here (block number #564321)
|
||||||
let mut cur_hash: [u8; 32] = hex!("0000000000000000000a558a61ddc8ee4e488d647a747fe4dcc362fe2026c620");
|
let mut cur_hash: [u8; 32] =
|
||||||
|
hex!("0000000000000000000a558a61ddc8ee4e488d647a747fe4dcc362fe2026c620");
|
||||||
|
|
||||||
// Performs 2^n hash iterations over it
|
// Performs 2^n hash iterations over it
|
||||||
const N: u64 = 10;
|
const N: u64 = 10;
|
||||||
|
|
||||||
for i in 0..(1u64<<N) {
|
for i in 0..(1u64 << N) {
|
||||||
// Print 1024 of the interstitial states
|
// Print 1024 of the interstitial states
|
||||||
// so that verification can be
|
// so that verification can be
|
||||||
// parallelized
|
// parallelized
|
||||||
|
|
||||||
if i % (1u64<<(N-10)) == 0 {
|
if i % (1u64 << (N - 10)) == 0 {
|
||||||
print!("{}: ", i);
|
print!("{}: ", i);
|
||||||
for b in cur_hash.iter() {
|
for b in cur_hash.iter() {
|
||||||
print!("{:02x}", b);
|
print!("{:02x}", b);
|
||||||
@ -81,8 +80,10 @@ fn main() {
|
|||||||
let mut digest = &cur_hash[..];
|
let mut digest = &cur_hash[..];
|
||||||
|
|
||||||
let mut seed = [0u32; 8];
|
let mut seed = [0u32; 8];
|
||||||
for i in 0..8 {
|
for s in &mut seed {
|
||||||
seed[i] = digest.read_u32::<BigEndian>().expect("digest is large enough for this to work");
|
*s = digest
|
||||||
|
.read_u32::<BigEndian>()
|
||||||
|
.expect("digest is large enough for this to work");
|
||||||
}
|
}
|
||||||
|
|
||||||
ChaChaRng::from_seed(&seed)
|
ChaChaRng::from_seed(&seed)
|
||||||
@ -97,22 +98,28 @@ fn main() {
|
|||||||
.expect("unable open challenge file in this directory");
|
.expect("unable open challenge file in this directory");
|
||||||
|
|
||||||
{
|
{
|
||||||
let metadata = reader.metadata().expect("unable to get filesystem metadata for challenge file");
|
let metadata = reader
|
||||||
|
.metadata()
|
||||||
|
.expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
|
||||||
},
|
|
||||||
UseCompression::No => {
|
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
panic!(
|
||||||
|
"The size of challenge file should be {}, but it's {}, so something isn't right.",
|
||||||
|
expected_challenge_length,
|
||||||
|
metadata.len()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let readable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map(&reader)
|
||||||
|
.expect("unable to create a memory map for input")
|
||||||
|
};
|
||||||
|
|
||||||
// Create response file in this directory
|
// Create response file in this directory
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
@ -123,21 +130,27 @@ fn main() {
|
|||||||
.expect("unable to create response file in this directory");
|
.expect("unable to create response file in this directory");
|
||||||
|
|
||||||
let required_output_length = match COMPRESS_THE_OUTPUT {
|
let required_output_length = match COMPRESS_THE_OUTPUT {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
|
||||||
},
|
|
||||||
UseCompression::No => {
|
UseCompression::No => {
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
||||||
|
+ Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
writer.set_len(required_output_length as u64).expect("must make output file large enough");
|
writer
|
||||||
|
.set_len(required_output_length as u64)
|
||||||
|
.expect("must make output file large enough");
|
||||||
|
|
||||||
|
let mut writable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map_mut(&writer)
|
||||||
|
.expect("unable to create a memory map for output")
|
||||||
|
};
|
||||||
|
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
|
||||||
|
|
||||||
println!("Calculating previous contribution hash...");
|
println!("Calculating previous contribution hash...");
|
||||||
|
|
||||||
let current_accumulator_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
let current_accumulator_hash =
|
||||||
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
||||||
|
|
||||||
{
|
{
|
||||||
println!("Contributing on top of the hash:");
|
println!("Contributing on top of the hash:");
|
||||||
@ -152,9 +165,13 @@ fn main() {
|
|||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
|
(&mut writable_map[0..])
|
||||||
|
.write_all(current_accumulator_hash.as_slice())
|
||||||
|
.expect("unable to write a challenge hash to mmap");
|
||||||
|
|
||||||
writable_map.flush().expect("unable to write hash to response file");
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to write hash to response file");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct our keypair using the RNG we created above
|
// Construct our keypair using the RNG we created above
|
||||||
@ -165,25 +182,33 @@ fn main() {
|
|||||||
|
|
||||||
// this computes a transformation and writes it
|
// this computes a transformation and writes it
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
||||||
&readable_map,
|
&readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
INPUT_IS_COMPRESSED,
|
INPUT_IS_COMPRESSED,
|
||||||
COMPRESS_THE_OUTPUT,
|
COMPRESS_THE_OUTPUT,
|
||||||
CHECK_INPUT_CORRECTNESS,
|
CHECK_INPUT_CORRECTNESS,
|
||||||
&privkey
|
&privkey,
|
||||||
).expect("must transform with the key");
|
)
|
||||||
|
.expect("must transform with the key");
|
||||||
println!("Finishing writing your contribution to response file...");
|
println!("Finishing writing your contribution to response file...");
|
||||||
|
|
||||||
// Write the public key
|
// Write the public key
|
||||||
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key");
|
pubkey
|
||||||
|
.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT)
|
||||||
|
.expect("unable to write public key");
|
||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
let output_readonly = writable_map
|
||||||
let contribution_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
.make_read_only()
|
||||||
|
.expect("must make a map readonly");
|
||||||
|
let contribution_hash =
|
||||||
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||||
|
|
||||||
print!("Done!\n\n\
|
print!(
|
||||||
|
"Done!\n\n\
|
||||||
Your contribution has been written to response file\n\n\
|
Your contribution has been written to response file\n\n\
|
||||||
The BLAKE2b hash of response file is:\n");
|
The BLAKE2b hash of response file is:\n"
|
||||||
|
);
|
||||||
|
|
||||||
for line in contribution_hash.as_slice().chunks(16) {
|
for line in contribution_hash.as_slice().chunks(16) {
|
||||||
print!("\t");
|
print!("\t");
|
||||||
|
@ -1,19 +1,11 @@
|
|||||||
extern crate powersoftau;
|
use powersoftau::batched_accumulator::BatchedAccumulator;
|
||||||
extern crate bellman_ce;
|
use powersoftau::bn256::Bn256CeremonyParameters;
|
||||||
extern crate memmap;
|
use powersoftau::keypair::keypair;
|
||||||
extern crate rand;
|
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
|
||||||
extern crate blake2;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate exitcode;
|
|
||||||
|
|
||||||
use powersoftau::bn256::{Bn256CeremonyParameters};
|
|
||||||
use powersoftau::batched_accumulator::{BatchedAccumulator};
|
|
||||||
use powersoftau::keypair::{keypair};
|
|
||||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
|
||||||
|
|
||||||
use std::fs::OpenOptions;
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use memmap::*;
|
use memmap::*;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
@ -32,15 +24,21 @@ fn main() {
|
|||||||
let challenge_filename = &args[1];
|
let challenge_filename = &args[1];
|
||||||
let response_filename = &args[2];
|
let response_filename = &args[2];
|
||||||
|
|
||||||
println!("Will contribute to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
println!(
|
||||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
"Will contribute to accumulator for 2^{} powers of tau",
|
||||||
|
Bn256CeremonyParameters::REQUIRED_POWER
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"In total will generate up to {} powers",
|
||||||
|
Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
|
||||||
|
);
|
||||||
|
|
||||||
// Create an RNG based on a mixture of system randomness and user provided randomness
|
// Create an RNG based on a mixture of system randomness and user provided randomness
|
||||||
let mut rng = {
|
let mut rng = {
|
||||||
use byteorder::{ReadBytesExt, BigEndian};
|
|
||||||
use blake2::{Blake2b, Digest};
|
use blake2::{Blake2b, Digest};
|
||||||
use rand::{SeedableRng, Rng, OsRng};
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
use rand::chacha::ChaChaRng;
|
use rand::chacha::ChaChaRng;
|
||||||
|
use rand::{OsRng, Rng, SeedableRng};
|
||||||
|
|
||||||
let h = {
|
let h = {
|
||||||
let mut system_rng = OsRng::new().unwrap();
|
let mut system_rng = OsRng::new().unwrap();
|
||||||
@ -55,7 +53,9 @@ fn main() {
|
|||||||
// Ask the user to provide some information for additional entropy
|
// Ask the user to provide some information for additional entropy
|
||||||
let mut user_input = String::new();
|
let mut user_input = String::new();
|
||||||
println!("Type some random text and press [ENTER] to provide additional entropy...");
|
println!("Type some random text and press [ENTER] to provide additional entropy...");
|
||||||
std::io::stdin().read_line(&mut user_input).expect("expected to read some random text from the user");
|
std::io::stdin()
|
||||||
|
.read_line(&mut user_input)
|
||||||
|
.expect("expected to read some random text from the user");
|
||||||
|
|
||||||
// Hash it all up to make a seed
|
// Hash it all up to make a seed
|
||||||
h.input(&user_input.as_bytes());
|
h.input(&user_input.as_bytes());
|
||||||
@ -66,8 +66,10 @@ fn main() {
|
|||||||
|
|
||||||
// Interpret the first 32 bytes of the digest as 8 32-bit words
|
// Interpret the first 32 bytes of the digest as 8 32-bit words
|
||||||
let mut seed = [0u32; 8];
|
let mut seed = [0u32; 8];
|
||||||
for i in 0..8 {
|
for s in &mut seed {
|
||||||
seed[i] = digest.read_u32::<BigEndian>().expect("digest is large enough for this to work");
|
*s = digest
|
||||||
|
.read_u32::<BigEndian>()
|
||||||
|
.expect("digest is large enough for this to work");
|
||||||
}
|
}
|
||||||
|
|
||||||
ChaChaRng::from_seed(&seed)
|
ChaChaRng::from_seed(&seed)
|
||||||
@ -75,52 +77,67 @@ fn main() {
|
|||||||
|
|
||||||
// Try to load challenge file from disk.
|
// Try to load challenge file from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(challenge_filename)
|
.open(challenge_filename)
|
||||||
.expect("unable open challenge file");
|
.expect("unable open challenge file");
|
||||||
{
|
{
|
||||||
let metadata = reader.metadata().expect("unable to get filesystem metadata for challenge file");
|
let metadata = reader
|
||||||
|
.metadata()
|
||||||
|
.expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
|
||||||
},
|
|
||||||
UseCompression::No => {
|
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
panic!(
|
||||||
|
"The size of challenge file should be {}, but it's {}, so something isn't right.",
|
||||||
|
expected_challenge_length,
|
||||||
|
metadata.len()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let readable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map(&reader)
|
||||||
|
.expect("unable to create a memory map for input")
|
||||||
|
};
|
||||||
|
|
||||||
// Create response file in this directory
|
// Create response file in this directory
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open(response_filename)
|
.open(response_filename)
|
||||||
.expect("unable to create response file");
|
.expect("unable to create response file");
|
||||||
|
|
||||||
let required_output_length = match COMPRESS_THE_OUTPUT {
|
let required_output_length = match COMPRESS_THE_OUTPUT {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
|
||||||
},
|
|
||||||
UseCompression::No => {
|
UseCompression::No => {
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
||||||
|
+ Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
writer.set_len(required_output_length as u64).expect("must make output file large enough");
|
writer
|
||||||
|
.set_len(required_output_length as u64)
|
||||||
|
.expect("must make output file large enough");
|
||||||
|
|
||||||
|
let mut writable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map_mut(&writer)
|
||||||
|
.expect("unable to create a memory map for output")
|
||||||
|
};
|
||||||
|
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
|
||||||
|
|
||||||
println!("Calculating previous contribution hash...");
|
println!("Calculating previous contribution hash...");
|
||||||
|
|
||||||
assert!(UseCompression::No == INPUT_IS_COMPRESSED, "Hashing the compressed file in not yet defined");
|
assert!(
|
||||||
let current_accumulator_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
UseCompression::No == INPUT_IS_COMPRESSED,
|
||||||
|
"Hashing the compressed file in not yet defined"
|
||||||
|
);
|
||||||
|
let current_accumulator_hash =
|
||||||
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
||||||
|
|
||||||
{
|
{
|
||||||
println!("`challenge` file contains decompressed points and has a hash:");
|
println!("`challenge` file contains decompressed points and has a hash:");
|
||||||
@ -135,15 +152,23 @@ fn main() {
|
|||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
|
(&mut writable_map[0..])
|
||||||
|
.write_all(current_accumulator_hash.as_slice())
|
||||||
|
.expect("unable to write a challenge hash to mmap");
|
||||||
|
|
||||||
writable_map.flush().expect("unable to write hash to response file");
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to write hash to response file");
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut challenge_hash = [0; 64];
|
let mut challenge_hash = [0; 64];
|
||||||
let memory_slice = readable_map.get(0..64).expect("must read point data from file");
|
let mut memory_slice = readable_map
|
||||||
memory_slice.clone().read_exact(&mut challenge_hash).expect("couldn't read hash of challenge file from response file");
|
.get(0..64)
|
||||||
|
.expect("must read point data from file");
|
||||||
|
memory_slice
|
||||||
|
.read_exact(&mut challenge_hash)
|
||||||
|
.expect("couldn't read hash of challenge file from response file");
|
||||||
|
|
||||||
println!("`challenge` file claims (!!! Must not be blindly trusted) that it was based on the original contribution with a hash:");
|
println!("`challenge` file claims (!!! Must not be blindly trusted) that it was based on the original contribution with a hash:");
|
||||||
for line in challenge_hash.chunks(16) {
|
for line in challenge_hash.chunks(16) {
|
||||||
@ -166,28 +191,36 @@ fn main() {
|
|||||||
|
|
||||||
// this computes a transformation and writes it
|
// this computes a transformation and writes it
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
||||||
&readable_map,
|
&readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
INPUT_IS_COMPRESSED,
|
INPUT_IS_COMPRESSED,
|
||||||
COMPRESS_THE_OUTPUT,
|
COMPRESS_THE_OUTPUT,
|
||||||
CHECK_INPUT_CORRECTNESS,
|
CHECK_INPUT_CORRECTNESS,
|
||||||
&privkey
|
&privkey,
|
||||||
).expect("must transform with the key");
|
)
|
||||||
|
.expect("must transform with the key");
|
||||||
|
|
||||||
println!("Finishing writing your contribution to response file...");
|
println!("Finishing writing your contribution to response file...");
|
||||||
|
|
||||||
// Write the public key
|
// Write the public key
|
||||||
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key");
|
pubkey
|
||||||
|
.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT)
|
||||||
|
.expect("unable to write public key");
|
||||||
|
|
||||||
writable_map.flush().expect("must flush a memory map");
|
writable_map.flush().expect("must flush a memory map");
|
||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
let output_readonly = writable_map
|
||||||
let contribution_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
.make_read_only()
|
||||||
|
.expect("must make a map readonly");
|
||||||
|
let contribution_hash =
|
||||||
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||||
|
|
||||||
print!("Done!\n\n\
|
print!(
|
||||||
|
"Done!\n\n\
|
||||||
Your contribution has been written to response file\n\n\
|
Your contribution has been written to response file\n\n\
|
||||||
The BLAKE2b hash of response file is:\n");
|
The BLAKE2b hash of response file is:\n"
|
||||||
|
);
|
||||||
|
|
||||||
for line in contribution_hash.as_slice().chunks(16) {
|
for line in contribution_hash.as_slice().chunks(16) {
|
||||||
print!("\t");
|
print!("\t");
|
||||||
|
@ -1,14 +1,11 @@
|
|||||||
extern crate powersoftau;
|
use powersoftau::accumulator::Accumulator;
|
||||||
extern crate bellman_ce;
|
use powersoftau::bn256::Bn256CeremonyParameters;
|
||||||
|
use powersoftau::parameters::UseCompression;
|
||||||
|
use powersoftau::utils::blank_hash;
|
||||||
|
|
||||||
use powersoftau::bn256::{Bn256CeremonyParameters};
|
|
||||||
use powersoftau::accumulator::{Accumulator};
|
|
||||||
use powersoftau::utils::{blank_hash};
|
|
||||||
use powersoftau::parameters::{UseCompression};
|
|
||||||
|
|
||||||
use std::fs::OpenOptions;
|
|
||||||
use std::io::{Write, BufWriter};
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use std::io::{BufWriter, Write};
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
@ -28,13 +25,16 @@ fn main() {
|
|||||||
let mut writer = BufWriter::new(file);
|
let mut writer = BufWriter::new(file);
|
||||||
|
|
||||||
// Write a blank BLAKE2b hash:
|
// Write a blank BLAKE2b hash:
|
||||||
writer.write_all(&blank_hash().as_slice()).expect("unable to write blank hash to challenge file");
|
writer
|
||||||
|
.write_all(&blank_hash().as_slice())
|
||||||
|
.expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
let parameters = Bn256CeremonyParameters{};
|
let parameters = Bn256CeremonyParameters {};
|
||||||
|
|
||||||
let acc: Accumulator<Bn256, _> = Accumulator::new(parameters);
|
let acc: Accumulator<Bn256, _> = Accumulator::new(parameters);
|
||||||
println!("Writing an empty accumulator to disk");
|
println!("Writing an empty accumulator to disk");
|
||||||
acc.serialize(&mut writer, UseCompression::No).expect("unable to write fresh accumulator to challenge file");
|
acc.serialize(&mut writer, UseCompression::No)
|
||||||
|
.expect("unable to write fresh accumulator to challenge file");
|
||||||
writer.flush().expect("unable to flush accumulator to disk");
|
writer.flush().expect("unable to flush accumulator to disk");
|
||||||
|
|
||||||
println!("Wrote a fresh accumulator to challenge file");
|
println!("Wrote a fresh accumulator to challenge file");
|
||||||
|
@ -1,17 +1,13 @@
|
|||||||
extern crate powersoftau;
|
use powersoftau::bn256::Bn256CeremonyParameters;
|
||||||
extern crate bellman_ce;
|
|
||||||
extern crate memmap;
|
|
||||||
|
|
||||||
use powersoftau::bn256::{Bn256CeremonyParameters};
|
use powersoftau::batched_accumulator::BatchedAccumulator;
|
||||||
|
use powersoftau::parameters::UseCompression;
|
||||||
|
use powersoftau::utils::blank_hash;
|
||||||
|
|
||||||
use powersoftau::batched_accumulator::{BatchedAccumulator};
|
|
||||||
use powersoftau::parameters::{UseCompression};
|
|
||||||
use powersoftau::utils::{blank_hash};
|
|
||||||
|
|
||||||
use std::fs::OpenOptions;
|
|
||||||
use std::io::{Write};
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use memmap::*;
|
use memmap::*;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use std::io::Write;
|
||||||
|
|
||||||
use powersoftau::parameters::PowersOfTauParameters;
|
use powersoftau::parameters::PowersOfTauParameters;
|
||||||
|
|
||||||
@ -25,8 +21,14 @@ fn main() {
|
|||||||
}
|
}
|
||||||
let challenge_filename = &args[1];
|
let challenge_filename = &args[1];
|
||||||
|
|
||||||
println!("Will generate an empty accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
println!(
|
||||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
"Will generate an empty accumulator for 2^{} powers of tau",
|
||||||
|
Bn256CeremonyParameters::REQUIRED_POWER
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"In total will generate up to {} powers",
|
||||||
|
Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
|
||||||
|
);
|
||||||
|
|
||||||
let file = OpenOptions::new()
|
let file = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
@ -34,24 +36,32 @@ fn main() {
|
|||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open(challenge_filename)
|
.open(challenge_filename)
|
||||||
.expect("unable to create challenge file");
|
.expect("unable to create challenge file");
|
||||||
|
|
||||||
let expected_challenge_length = match COMPRESS_NEW_CHALLENGE {
|
let expected_challenge_length = match COMPRESS_NEW_CHALLENGE {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||||
},
|
- Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||||
UseCompression::No => {
|
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
|
||||||
}
|
}
|
||||||
|
UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
file.set_len(expected_challenge_length as u64).expect("unable to allocate large enough file");
|
file.set_len(expected_challenge_length as u64)
|
||||||
|
.expect("unable to allocate large enough file");
|
||||||
|
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&file).expect("unable to create a memory map") };
|
let mut writable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map_mut(&file)
|
||||||
|
.expect("unable to create a memory map")
|
||||||
|
};
|
||||||
|
|
||||||
// Write a blank BLAKE2b hash:
|
// Write a blank BLAKE2b hash:
|
||||||
let hash = blank_hash();
|
let hash = blank_hash();
|
||||||
(&mut writable_map[0..]).write(hash.as_slice()).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..])
|
||||||
writable_map.flush().expect("unable to write blank hash to challenge file");
|
.write_all(hash.as_slice())
|
||||||
|
.expect("unable to write a default hash to mmap");
|
||||||
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
println!("Blank hash for an empty challenge:");
|
println!("Blank hash for an empty challenge:");
|
||||||
for line in hash.as_slice().chunks(16) {
|
for line in hash.as_slice().chunks(16) {
|
||||||
@ -65,12 +75,21 @@ fn main() {
|
|||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, COMPRESS_NEW_CHALLENGE).expect("generation of initial accumulator is successful");
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(
|
||||||
writable_map.flush().expect("unable to flush memmap to disk");
|
&mut writable_map,
|
||||||
|
COMPRESS_NEW_CHALLENGE,
|
||||||
|
)
|
||||||
|
.expect("generation of initial accumulator is successful");
|
||||||
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to flush memmap to disk");
|
||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
let output_readonly = writable_map
|
||||||
let contribution_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
.make_read_only()
|
||||||
|
.expect("must make a map readonly");
|
||||||
|
let contribution_hash =
|
||||||
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||||
|
|
||||||
println!("Empty contribution is formed with a hash:");
|
println!("Empty contribution is formed with a hash:");
|
||||||
|
|
||||||
|
@ -1,27 +1,23 @@
|
|||||||
extern crate powersoftau;
|
|
||||||
extern crate rand;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman_ce;
|
|
||||||
|
|
||||||
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use bellman_ce::pairing::bn256::{G1, G2};
|
use bellman_ce::pairing::bn256::{G1, G2};
|
||||||
use powersoftau::bn256::{Bn256CeremonyParameters};
|
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
||||||
use powersoftau::batched_accumulator::*;
|
use powersoftau::batched_accumulator::*;
|
||||||
|
use powersoftau::bn256::Bn256CeremonyParameters;
|
||||||
use powersoftau::*;
|
use powersoftau::*;
|
||||||
|
|
||||||
use crate::parameters::*;
|
use crate::parameters::*;
|
||||||
|
|
||||||
use bellman_ce::multicore::Worker;
|
|
||||||
use bellman_ce::domain::{EvaluationDomain, Point};
|
use bellman_ce::domain::{EvaluationDomain, Point};
|
||||||
|
use bellman_ce::multicore::Worker;
|
||||||
|
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
use std::io::{BufWriter, Write};
|
use std::io::{BufWriter, Write};
|
||||||
|
|
||||||
use memmap::*;
|
use memmap::*;
|
||||||
|
|
||||||
const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 }
|
const fn num_bits<T>() -> usize {
|
||||||
|
std::mem::size_of::<T>() * 8
|
||||||
|
}
|
||||||
|
|
||||||
fn log_2(x: u64) -> u32 {
|
fn log_2(x: u64) -> u32 {
|
||||||
assert!(x > 0);
|
assert!(x > 0);
|
||||||
@ -38,51 +34,63 @@ fn main() {
|
|||||||
|
|
||||||
// Try to load response file from disk.
|
// Try to load response file from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(response_filename)
|
.open(response_filename)
|
||||||
.expect("unable open response file in this directory");
|
.expect("unable open response file in this directory");
|
||||||
let response_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let response_readable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map(&reader)
|
||||||
|
.expect("unable to create a memory map for input")
|
||||||
|
};
|
||||||
|
|
||||||
let current_accumulator = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::deserialize(
|
let current_accumulator = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::deserialize(
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::Yes,
|
UseCompression::Yes,
|
||||||
).expect("unable to read uncompressed accumulator");
|
)
|
||||||
|
.expect("unable to read uncompressed accumulator");
|
||||||
|
|
||||||
let worker = &Worker::new();
|
let worker = &Worker::new();
|
||||||
|
|
||||||
// Create the parameters for various 2^m circuit depths.
|
// Create the parameters for various 2^m circuit depths.
|
||||||
let max_degree = log_2(current_accumulator.tau_powers_g2.len() as u64);
|
let max_degree = log_2(current_accumulator.tau_powers_g2.len() as u64);
|
||||||
for m in 0..max_degree+1 {
|
for m in 0..=max_degree {
|
||||||
let paramname = format!("phase1radix2m{}", m);
|
let paramname = format!("phase1radix2m{}", m);
|
||||||
println!("Creating {}", paramname);
|
println!("Creating {}", paramname);
|
||||||
|
|
||||||
let degree = 1 << m;
|
let degree = 1 << m;
|
||||||
|
|
||||||
let mut g1_coeffs = EvaluationDomain::from_coeffs(
|
let mut g1_coeffs = EvaluationDomain::from_coeffs(
|
||||||
current_accumulator.tau_powers_g1[0..degree].iter()
|
current_accumulator.tau_powers_g1[0..degree]
|
||||||
|
.iter()
|
||||||
.map(|e| Point(e.into_projective()))
|
.map(|e| Point(e.into_projective()))
|
||||||
.collect()
|
.collect(),
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let mut g2_coeffs = EvaluationDomain::from_coeffs(
|
let mut g2_coeffs = EvaluationDomain::from_coeffs(
|
||||||
current_accumulator.tau_powers_g2[0..degree].iter()
|
current_accumulator.tau_powers_g2[0..degree]
|
||||||
|
.iter()
|
||||||
.map(|e| Point(e.into_projective()))
|
.map(|e| Point(e.into_projective()))
|
||||||
.collect()
|
.collect(),
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let mut g1_alpha_coeffs = EvaluationDomain::from_coeffs(
|
let mut g1_alpha_coeffs = EvaluationDomain::from_coeffs(
|
||||||
current_accumulator.alpha_tau_powers_g1[0..degree].iter()
|
current_accumulator.alpha_tau_powers_g1[0..degree]
|
||||||
|
.iter()
|
||||||
.map(|e| Point(e.into_projective()))
|
.map(|e| Point(e.into_projective()))
|
||||||
.collect()
|
.collect(),
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let mut g1_beta_coeffs = EvaluationDomain::from_coeffs(
|
let mut g1_beta_coeffs = EvaluationDomain::from_coeffs(
|
||||||
current_accumulator.beta_tau_powers_g1[0..degree].iter()
|
current_accumulator.beta_tau_powers_g1[0..degree]
|
||||||
|
.iter()
|
||||||
.map(|e| Point(e.into_projective()))
|
.map(|e| Point(e.into_projective()))
|
||||||
.collect()
|
.collect(),
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// This converts all of the elements into Lagrange coefficients
|
// This converts all of the elements into Lagrange coefficients
|
||||||
// for later construction of interpolation polynomials
|
// for later construction of interpolation polynomials
|
||||||
@ -103,21 +111,13 @@ fn main() {
|
|||||||
|
|
||||||
// Remove the Point() wrappers
|
// Remove the Point() wrappers
|
||||||
|
|
||||||
let mut g1_coeffs = g1_coeffs.into_iter()
|
let mut g1_coeffs = g1_coeffs.into_iter().map(|e| e.0).collect::<Vec<_>>();
|
||||||
.map(|e| e.0)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut g2_coeffs = g2_coeffs.into_iter()
|
let mut g2_coeffs = g2_coeffs.into_iter().map(|e| e.0).collect::<Vec<_>>();
|
||||||
.map(|e| e.0)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter()
|
let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter().map(|e| e.0).collect::<Vec<_>>();
|
||||||
.map(|e| e.0)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut g1_beta_coeffs = g1_beta_coeffs.into_iter()
|
let mut g1_beta_coeffs = g1_beta_coeffs.into_iter().map(|e| e.0).collect::<Vec<_>>();
|
||||||
.map(|e| e.0)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Batch normalize
|
// Batch normalize
|
||||||
G1::batch_normalization(&mut g1_coeffs);
|
G1::batch_normalization(&mut g1_coeffs);
|
||||||
@ -130,7 +130,7 @@ fn main() {
|
|||||||
// x^(i + m) - x^i for i in 0..=(m-2)
|
// x^(i + m) - x^i for i in 0..=(m-2)
|
||||||
// for radix2 evaluation domains
|
// for radix2 evaluation domains
|
||||||
let mut h = Vec::with_capacity(degree - 1);
|
let mut h = Vec::with_capacity(degree - 1);
|
||||||
for i in 0..(degree-1) {
|
for i in 0..(degree - 1) {
|
||||||
let mut tmp = current_accumulator.tau_powers_g1[i + degree].into_projective();
|
let mut tmp = current_accumulator.tau_powers_g1[i + degree].into_projective();
|
||||||
let mut tmp2 = current_accumulator.tau_powers_g1[i].into_projective();
|
let mut tmp2 = current_accumulator.tau_powers_g1[i].into_projective();
|
||||||
tmp2.negate();
|
tmp2.negate();
|
||||||
@ -144,39 +144,41 @@ fn main() {
|
|||||||
|
|
||||||
// Create the parameter file
|
// Create the parameter file
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
.read(false)
|
.read(false)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open(paramname)
|
.open(paramname)
|
||||||
.expect("unable to create parameter file in this directory");
|
.expect("unable to create parameter file in this directory");
|
||||||
|
|
||||||
let mut writer = BufWriter::new(writer);
|
let mut writer = BufWriter::new(writer);
|
||||||
|
|
||||||
// Write alpha (in g1)
|
// Write alpha (in g1)
|
||||||
// Needed by verifier for e(alpha, beta)
|
// Needed by verifier for e(alpha, beta)
|
||||||
// Needed by prover for A and C elements of proof
|
// Needed by prover for A and C elements of proof
|
||||||
writer.write_all(
|
writer
|
||||||
current_accumulator.alpha_tau_powers_g1[0]
|
.write_all(
|
||||||
.into_uncompressed()
|
current_accumulator.alpha_tau_powers_g1[0]
|
||||||
.as_ref()
|
.into_uncompressed()
|
||||||
).unwrap();
|
.as_ref(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Write beta (in g1)
|
// Write beta (in g1)
|
||||||
// Needed by prover for C element of proof
|
// Needed by prover for C element of proof
|
||||||
writer.write_all(
|
writer
|
||||||
current_accumulator.beta_tau_powers_g1[0]
|
.write_all(
|
||||||
.into_uncompressed()
|
current_accumulator.beta_tau_powers_g1[0]
|
||||||
.as_ref()
|
.into_uncompressed()
|
||||||
).unwrap();
|
.as_ref(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Write beta (in g2)
|
// Write beta (in g2)
|
||||||
// Needed by verifier for e(alpha, beta)
|
// Needed by verifier for e(alpha, beta)
|
||||||
// Needed by prover for B element of proof
|
// Needed by prover for B element of proof
|
||||||
writer.write_all(
|
writer
|
||||||
current_accumulator.beta_g2
|
.write_all(current_accumulator.beta_g2.into_uncompressed().as_ref())
|
||||||
.into_uncompressed()
|
.unwrap();
|
||||||
.as_ref()
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// Lagrange coefficients in G1 (for constructing
|
// Lagrange coefficients in G1 (for constructing
|
||||||
// LC/IC queries and precomputing polynomials for A)
|
// LC/IC queries and precomputing polynomials for A)
|
||||||
@ -184,10 +186,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lagrange coefficients in G2 (for precomputing
|
// Lagrange coefficients in G2 (for precomputing
|
||||||
@ -196,10 +197,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lagrange coefficients in G1 with alpha (for
|
// Lagrange coefficients in G1 with alpha (for
|
||||||
@ -208,10 +208,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lagrange coefficients in G1 with beta (for
|
// Lagrange coefficients in G1 with beta (for
|
||||||
@ -220,10 +219,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bases for H polynomial computation
|
// Bases for H polynomial computation
|
||||||
@ -231,10 +229,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,27 +1,18 @@
|
|||||||
extern crate powersoftau;
|
|
||||||
extern crate rand;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman_ce;
|
|
||||||
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use powersoftau::bn256::Bn256CeremonyParameters;
|
use powersoftau::{
|
||||||
use powersoftau::batched_accumulator::*;
|
batched_accumulator::BatchedAccumulator,
|
||||||
use powersoftau::parameters::UseCompression;
|
bn256::Bn256CeremonyParameters,
|
||||||
use powersoftau::utils::reduced_hash;
|
parameters::{CheckForCorrectness, PowersOfTauParameters, UseCompression},
|
||||||
use powersoftau::*;
|
utils::reduced_hash,
|
||||||
|
};
|
||||||
use crate::parameters::*;
|
|
||||||
|
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
use memmap::*;
|
use memmap::MmapOptions;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Bn256ReducedCeremonyParameters {
|
pub struct Bn256ReducedCeremonyParameters {}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PowersOfTauParameters for Bn256ReducedCeremonyParameters {
|
impl PowersOfTauParameters for Bn256ReducedCeremonyParameters {
|
||||||
const REQUIRED_POWER: usize = 10;
|
const REQUIRED_POWER: usize = 10;
|
||||||
@ -33,7 +24,9 @@ impl PowersOfTauParameters for Bn256ReducedCeremonyParameters {
|
|||||||
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
||||||
}
|
}
|
||||||
|
|
||||||
const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 }
|
const fn num_bits<T>() -> usize {
|
||||||
|
std::mem::size_of::<T>() * 8
|
||||||
|
}
|
||||||
|
|
||||||
pub fn log_2(x: u64) -> u32 {
|
pub fn log_2(x: u64) -> u32 {
|
||||||
assert!(x > 0);
|
assert!(x > 0);
|
||||||
@ -43,40 +36,66 @@ pub fn log_2(x: u64) -> u32 {
|
|||||||
fn main() {
|
fn main() {
|
||||||
// Try to load `./challenge` from disk.
|
// Try to load `./challenge` from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open("challenge")
|
.open("challenge")
|
||||||
.expect("unable open `./challenge` in this directory");
|
.expect("unable open `./challenge` in this directory");
|
||||||
let challenge_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let challenge_readable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map(&reader)
|
||||||
|
.expect("unable to create a memory map for input")
|
||||||
|
};
|
||||||
|
|
||||||
let current_accumulator = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::deserialize(
|
let current_accumulator = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::deserialize(
|
||||||
&challenge_readable_map,
|
&challenge_readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::No,
|
UseCompression::No,
|
||||||
).expect("unable to read compressed accumulator");
|
)
|
||||||
|
.expect("unable to read compressed accumulator");
|
||||||
|
|
||||||
let mut reduced_accumulator = BatchedAccumulator::<Bn256, Bn256ReducedCeremonyParameters>::empty();
|
let mut reduced_accumulator =
|
||||||
reduced_accumulator.tau_powers_g1 = current_accumulator.tau_powers_g1[..Bn256ReducedCeremonyParameters::TAU_POWERS_G1_LENGTH].to_vec();
|
BatchedAccumulator::<Bn256, Bn256ReducedCeremonyParameters>::empty();
|
||||||
reduced_accumulator.tau_powers_g2 = current_accumulator.tau_powers_g2[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH].to_vec();
|
reduced_accumulator.tau_powers_g1 = current_accumulator.tau_powers_g1
|
||||||
reduced_accumulator.alpha_tau_powers_g1 = current_accumulator.alpha_tau_powers_g1[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH].to_vec();
|
[..Bn256ReducedCeremonyParameters::TAU_POWERS_G1_LENGTH]
|
||||||
reduced_accumulator.beta_tau_powers_g1 = current_accumulator.beta_tau_powers_g1[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH].to_vec();
|
.to_vec();
|
||||||
|
reduced_accumulator.tau_powers_g2 = current_accumulator.tau_powers_g2
|
||||||
|
[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
|
||||||
|
.to_vec();
|
||||||
|
reduced_accumulator.alpha_tau_powers_g1 = current_accumulator.alpha_tau_powers_g1
|
||||||
|
[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
|
||||||
|
.to_vec();
|
||||||
|
reduced_accumulator.beta_tau_powers_g1 = current_accumulator.beta_tau_powers_g1
|
||||||
|
[..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
|
||||||
|
.to_vec();
|
||||||
reduced_accumulator.beta_g2 = current_accumulator.beta_g2;
|
reduced_accumulator.beta_g2 = current_accumulator.beta_g2;
|
||||||
|
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open("reduced_challenge").expect("unable to create `./reduced_challenge` in this directory");
|
.open("reduced_challenge")
|
||||||
|
.expect("unable to create `./reduced_challenge` in this directory");
|
||||||
|
|
||||||
|
|
||||||
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
|
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
|
||||||
writer.set_len(Bn256ReducedCeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough");
|
writer
|
||||||
|
.set_len(Bn256ReducedCeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64)
|
||||||
|
.expect("must make output file large enough");
|
||||||
|
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
let mut writable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map_mut(&writer)
|
||||||
|
.expect("unable to create a memory map for output")
|
||||||
|
};
|
||||||
|
|
||||||
let hash = reduced_hash(Bn256CeremonyParameters::REQUIRED_POWER as u8, Bn256ReducedCeremonyParameters::REQUIRED_POWER as u8);
|
let hash = reduced_hash(
|
||||||
(&mut writable_map[0..]).write(hash.as_slice()).expect("unable to write a default hash to mmap");
|
Bn256CeremonyParameters::REQUIRED_POWER as u8,
|
||||||
writable_map.flush().expect("unable to write reduced hash to `./reduced_challenge`");
|
Bn256ReducedCeremonyParameters::REQUIRED_POWER as u8,
|
||||||
|
);
|
||||||
|
(&mut writable_map[0..])
|
||||||
|
.write_all(hash.as_slice())
|
||||||
|
.expect("unable to write a default hash to mmap");
|
||||||
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to write reduced hash to `./reduced_challenge`");
|
||||||
|
|
||||||
println!("Reduced hash for a reduced challenge:");
|
println!("Reduced hash for a reduced challenge:");
|
||||||
for line in hash.as_slice().chunks(16) {
|
for line in hash.as_slice().chunks(16) {
|
||||||
@ -87,14 +106,21 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
reduced_accumulator.serialize(&mut writable_map, UseCompression::No).unwrap();
|
reduced_accumulator
|
||||||
|
.serialize(&mut writable_map, UseCompression::No)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
let output_readonly = writable_map
|
||||||
let contribution_hash = BatchedAccumulator::<Bn256, Bn256ReducedCeremonyParameters>::calculate_hash(&output_readonly);
|
.make_read_only()
|
||||||
|
.expect("must make a map readonly");
|
||||||
|
let contribution_hash =
|
||||||
|
BatchedAccumulator::<Bn256, Bn256ReducedCeremonyParameters>::calculate_hash(
|
||||||
|
&output_readonly,
|
||||||
|
);
|
||||||
|
|
||||||
println!("Reduced contribution is formed with a hash:");
|
println!("Reduced contribution is formed with a hash:");
|
||||||
|
|
||||||
@ -106,7 +132,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Wrote a reduced accumulator to `./challenge`");
|
println!("Wrote a reduced accumulator to `./challenge`");
|
||||||
|
@ -1,31 +1,27 @@
|
|||||||
extern crate powersoftau;
|
|
||||||
extern crate rand;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman_ce;
|
|
||||||
|
|
||||||
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use bellman_ce::pairing::bn256::{G1, G2};
|
use bellman_ce::pairing::bn256::{G1, G2};
|
||||||
use powersoftau::bn256::{Bn256CeremonyParameters};
|
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
||||||
use powersoftau::batched_accumulator::*;
|
|
||||||
use powersoftau::accumulator::HashWriter;
|
use powersoftau::accumulator::HashWriter;
|
||||||
|
use powersoftau::batched_accumulator::*;
|
||||||
|
use powersoftau::bn256::Bn256CeremonyParameters;
|
||||||
use powersoftau::*;
|
use powersoftau::*;
|
||||||
|
|
||||||
use crate::utils::*;
|
|
||||||
use crate::parameters::*;
|
|
||||||
use crate::keypair::*;
|
use crate::keypair::*;
|
||||||
|
use crate::parameters::*;
|
||||||
|
use crate::utils::*;
|
||||||
|
|
||||||
use bellman_ce::multicore::Worker;
|
|
||||||
use bellman_ce::domain::{EvaluationDomain, Point};
|
use bellman_ce::domain::{EvaluationDomain, Point};
|
||||||
|
use bellman_ce::multicore::Worker;
|
||||||
|
|
||||||
|
use std::fs::{remove_file, OpenOptions};
|
||||||
|
use std::io::{self, BufWriter, Read, Write};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::fs::{OpenOptions, remove_file};
|
|
||||||
use std::io::{self, Read, BufWriter, Write};
|
|
||||||
|
|
||||||
use memmap::*;
|
use memmap::*;
|
||||||
|
|
||||||
const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 }
|
const fn num_bits<T>() -> usize {
|
||||||
|
std::mem::size_of::<T>() * 8
|
||||||
|
}
|
||||||
|
|
||||||
fn log_2(x: u64) -> u32 {
|
fn log_2(x: u64) -> u32 {
|
||||||
assert!(x > 0);
|
assert!(x > 0);
|
||||||
@ -36,11 +32,10 @@ fn log_2(x: u64) -> u32 {
|
|||||||
// given the current state of the accumulator and the last
|
// given the current state of the accumulator and the last
|
||||||
// response file hash.
|
// response file hash.
|
||||||
fn get_challenge_file_hash(
|
fn get_challenge_file_hash(
|
||||||
acc: &mut BatchedAccumulator::<Bn256, Bn256CeremonyParameters>,
|
acc: &mut BatchedAccumulator<Bn256, Bn256CeremonyParameters>,
|
||||||
last_response_file_hash: &[u8; 64],
|
last_response_file_hash: &[u8; 64],
|
||||||
is_initial: bool,
|
is_initial: bool,
|
||||||
) -> [u8; 64]
|
) -> [u8; 64] {
|
||||||
{
|
|
||||||
let sink = io::sink();
|
let sink = io::sink();
|
||||||
let mut sink = HashWriter::new(sink);
|
let mut sink = HashWriter::new(sink);
|
||||||
|
|
||||||
@ -57,19 +52,31 @@ fn get_challenge_file_hash(
|
|||||||
.open(file_name)
|
.open(file_name)
|
||||||
.expect("unable to create temporary tmp_challenge_file_hash");
|
.expect("unable to create temporary tmp_challenge_file_hash");
|
||||||
|
|
||||||
writer.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough");
|
writer
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64)
|
||||||
|
.expect("must make output file large enough");
|
||||||
|
let mut writable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map_mut(&writer)
|
||||||
|
.expect("unable to create a memory map for output")
|
||||||
|
};
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(&last_response_file_hash[..]).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..])
|
||||||
writable_map.flush().expect("unable to write blank hash to challenge file");
|
.write_all(&last_response_file_hash[..])
|
||||||
|
.expect("unable to write a default hash to mmap");
|
||||||
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
if is_initial {
|
if is_initial {
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, UseCompression::No).expect("generation of initial accumulator is successful");
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(
|
||||||
} else {
|
|
||||||
acc.serialize(
|
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
UseCompression::No
|
UseCompression::No,
|
||||||
).unwrap();
|
)
|
||||||
|
.expect("generation of initial accumulator is successful");
|
||||||
|
} else {
|
||||||
|
acc.serialize(&mut writable_map, UseCompression::No)
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
writable_map.flush().expect("must flush the memory map");
|
writable_map.flush().expect("must flush the memory map");
|
||||||
@ -77,13 +84,13 @@ fn get_challenge_file_hash(
|
|||||||
|
|
||||||
let mut challenge_reader = OpenOptions::new()
|
let mut challenge_reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(file_name).expect("unable to open temporary tmp_challenge_file_hash");
|
.open(file_name)
|
||||||
|
.expect("unable to open temporary tmp_challenge_file_hash");
|
||||||
|
|
||||||
let mut contents = vec![];
|
let mut contents = vec![];
|
||||||
challenge_reader.read_to_end(&mut contents).unwrap();
|
challenge_reader.read_to_end(&mut contents).unwrap();
|
||||||
|
|
||||||
sink.write_all(&contents)
|
sink.write_all(&contents).unwrap();
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut tmp = [0; 64];
|
let mut tmp = [0; 64];
|
||||||
tmp.copy_from_slice(sink.into_hash().as_slice());
|
tmp.copy_from_slice(sink.into_hash().as_slice());
|
||||||
@ -95,11 +102,10 @@ fn get_challenge_file_hash(
|
|||||||
// accumulator, the player's public key, and the challenge
|
// accumulator, the player's public key, and the challenge
|
||||||
// file's hash.
|
// file's hash.
|
||||||
fn get_response_file_hash(
|
fn get_response_file_hash(
|
||||||
acc: &mut BatchedAccumulator::<Bn256, Bn256CeremonyParameters>,
|
acc: &mut BatchedAccumulator<Bn256, Bn256CeremonyParameters>,
|
||||||
pubkey: &PublicKey::<Bn256>,
|
pubkey: &PublicKey<Bn256>,
|
||||||
last_challenge_file_hash: &[u8; 64]
|
last_challenge_file_hash: &[u8; 64],
|
||||||
) -> [u8; 64]
|
) -> [u8; 64] {
|
||||||
{
|
|
||||||
let sink = io::sink();
|
let sink = io::sink();
|
||||||
let mut sink = HashWriter::new(sink);
|
let mut sink = HashWriter::new(sink);
|
||||||
|
|
||||||
@ -115,31 +121,40 @@ fn get_response_file_hash(
|
|||||||
.open(file_name)
|
.open(file_name)
|
||||||
.expect("unable to create temporary tmp_response_file_hash");
|
.expect("unable to create temporary tmp_response_file_hash");
|
||||||
|
|
||||||
writer.set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64).expect("must make output file large enough");
|
writer
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
.set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64)
|
||||||
|
.expect("must make output file large enough");
|
||||||
|
let mut writable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map_mut(&writer)
|
||||||
|
.expect("unable to create a memory map for output")
|
||||||
|
};
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(&last_challenge_file_hash[..]).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..])
|
||||||
writable_map.flush().expect("unable to write blank hash to challenge file");
|
.write_all(&last_challenge_file_hash[..])
|
||||||
|
.expect("unable to write a default hash to mmap");
|
||||||
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
acc.serialize(
|
acc.serialize(&mut writable_map, UseCompression::Yes)
|
||||||
&mut writable_map,
|
.unwrap();
|
||||||
UseCompression::Yes
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, UseCompression::Yes).expect("unable to write public key");
|
pubkey
|
||||||
|
.write::<Bn256CeremonyParameters>(&mut writable_map, UseCompression::Yes)
|
||||||
|
.expect("unable to write public key");
|
||||||
writable_map.flush().expect("must flush the memory map");
|
writable_map.flush().expect("must flush the memory map");
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut challenge_reader = OpenOptions::new()
|
let mut challenge_reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(file_name).expect("unable to open temporary tmp_response_file_hash");
|
.open(file_name)
|
||||||
|
.expect("unable to open temporary tmp_response_file_hash");
|
||||||
|
|
||||||
let mut contents = vec![];
|
let mut contents = vec![];
|
||||||
challenge_reader.read_to_end(&mut contents).unwrap();
|
challenge_reader.read_to_end(&mut contents).unwrap();
|
||||||
|
|
||||||
sink.write_all(&contents)
|
sink.write_all(&contents).unwrap();
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
|
|
||||||
let mut tmp = [0; 64];
|
let mut tmp = [0; 64];
|
||||||
tmp.copy_from_slice(sink.into_hash().as_slice());
|
tmp.copy_from_slice(sink.into_hash().as_slice());
|
||||||
@ -162,11 +177,22 @@ fn new_accumulator_for_verify() -> BatchedAccumulator<Bn256, Bn256CeremonyParame
|
|||||||
.expect("unable to create `./tmp_initial_challenge`");
|
.expect("unable to create `./tmp_initial_challenge`");
|
||||||
|
|
||||||
let expected_challenge_length = Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE;
|
let expected_challenge_length = Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE;
|
||||||
file.set_len(expected_challenge_length as u64).expect("unable to allocate large enough file");
|
file.set_len(expected_challenge_length as u64)
|
||||||
|
.expect("unable to allocate large enough file");
|
||||||
|
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&file).expect("unable to create a memory map") };
|
let mut writable_map = unsafe {
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, UseCompression::No).expect("generation of initial accumulator is successful");
|
MmapOptions::new()
|
||||||
writable_map.flush().expect("unable to flush memmap to disk");
|
.map_mut(&file)
|
||||||
|
.expect("unable to create a memory map")
|
||||||
|
};
|
||||||
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(
|
||||||
|
&mut writable_map,
|
||||||
|
UseCompression::No,
|
||||||
|
)
|
||||||
|
.expect("generation of initial accumulator is successful");
|
||||||
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to flush memmap to disk");
|
||||||
}
|
}
|
||||||
|
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
@ -174,14 +200,14 @@ fn new_accumulator_for_verify() -> BatchedAccumulator<Bn256, Bn256CeremonyParame
|
|||||||
.open(file_name)
|
.open(file_name)
|
||||||
.expect("unable open transcript file in this directory");
|
.expect("unable open transcript file in this directory");
|
||||||
|
|
||||||
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let readable_map = unsafe {
|
||||||
let initial_accumulator = BatchedAccumulator::deserialize(
|
MmapOptions::new()
|
||||||
&readable_map,
|
.map(&reader)
|
||||||
CheckForCorrectness::Yes,
|
.expect("unable to create a memory map for input")
|
||||||
UseCompression::No,
|
};
|
||||||
).expect("unable to read uncompressed accumulator");
|
|
||||||
|
|
||||||
initial_accumulator
|
BatchedAccumulator::deserialize(&readable_map, CheckForCorrectness::Yes, UseCompression::No)
|
||||||
|
.expect("unable to read uncompressed accumulator")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
@ -198,7 +224,11 @@ fn main() {
|
|||||||
.open(transcript_filename)
|
.open(transcript_filename)
|
||||||
.expect("unable open transcript file in this directory");
|
.expect("unable open transcript file in this directory");
|
||||||
|
|
||||||
let transcript_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let transcript_readable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map(&reader)
|
||||||
|
.expect("unable to create a memory map for input")
|
||||||
|
};
|
||||||
|
|
||||||
// Initialize the accumulator
|
// Initialize the accumulator
|
||||||
let mut current_accumulator = new_accumulator_for_verify();
|
let mut current_accumulator = new_accumulator_for_verify();
|
||||||
@ -218,7 +248,12 @@ fn main() {
|
|||||||
remove_file(file_name).unwrap();
|
remove_file(file_name).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let memory_slice = transcript_readable_map.get(i*Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE..(i+1)*Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE).expect("must read point data from file");
|
let memory_slice = transcript_readable_map
|
||||||
|
.get(
|
||||||
|
i * Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||||
|
..(i + 1) * Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
||||||
|
)
|
||||||
|
.expect("must read point data from file");
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
@ -226,19 +261,26 @@ fn main() {
|
|||||||
.open(file_name)
|
.open(file_name)
|
||||||
.expect("unable to create temporary tmp_response");
|
.expect("unable to create temporary tmp_response");
|
||||||
|
|
||||||
writer.set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64).expect("must make output file large enough");
|
writer
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
.set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64)
|
||||||
|
.expect("must make output file large enough");
|
||||||
|
let mut writable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map_mut(&writer)
|
||||||
|
.expect("unable to create a memory map for output")
|
||||||
|
};
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(&memory_slice[..]).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..])
|
||||||
|
.write_all(&memory_slice[..])
|
||||||
|
.expect("unable to write a default hash to mmap");
|
||||||
writable_map.flush().expect("must flush the memory map");
|
writable_map.flush().expect("must flush the memory map");
|
||||||
|
|
||||||
let response_readable_map = writable_map.make_read_only().expect("must make a map readonly");
|
let response_readable_map = writable_map
|
||||||
|
.make_read_only()
|
||||||
|
.expect("must make a map readonly");
|
||||||
|
|
||||||
let last_challenge_file_hash = get_challenge_file_hash(
|
let last_challenge_file_hash =
|
||||||
&mut current_accumulator,
|
get_challenge_file_hash(&mut current_accumulator, &last_response_file_hash, i == 0);
|
||||||
&last_response_file_hash,
|
|
||||||
i == 0,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Deserialize the accumulator provided by the player in
|
// Deserialize the accumulator provided by the player in
|
||||||
// their response file. It's stored in the transcript in
|
// their response file. It's stored in the transcript in
|
||||||
@ -249,16 +291,21 @@ fn main() {
|
|||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::Yes,
|
UseCompression::Yes,
|
||||||
).expect("unable to read uncompressed accumulator");
|
)
|
||||||
|
.expect("unable to read uncompressed accumulator");
|
||||||
|
|
||||||
let response_file_pubkey = PublicKey::<Bn256>::read::<Bn256CeremonyParameters>(&response_readable_map, UseCompression::Yes).unwrap();
|
let response_file_pubkey = PublicKey::<Bn256>::read::<Bn256CeremonyParameters>(
|
||||||
|
&response_readable_map,
|
||||||
|
UseCompression::Yes,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
// Compute the hash of the response file. (we had it in uncompressed
|
// Compute the hash of the response file. (we had it in uncompressed
|
||||||
// form in the transcript, but the response file is compressed to save
|
// form in the transcript, but the response file is compressed to save
|
||||||
// participants bandwidth.)
|
// participants bandwidth.)
|
||||||
last_response_file_hash = get_response_file_hash(
|
last_response_file_hash = get_response_file_hash(
|
||||||
&mut response_file_accumulator,
|
&mut response_file_accumulator,
|
||||||
&response_file_pubkey,
|
&response_file_pubkey,
|
||||||
&last_challenge_file_hash
|
&last_challenge_file_hash,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Verify the transformation from the previous accumulator to the new
|
// Verify the transformation from the previous accumulator to the new
|
||||||
@ -268,9 +315,8 @@ fn main() {
|
|||||||
¤t_accumulator,
|
¤t_accumulator,
|
||||||
&response_file_accumulator,
|
&response_file_accumulator,
|
||||||
&response_file_pubkey,
|
&response_file_pubkey,
|
||||||
&last_challenge_file_hash
|
&last_challenge_file_hash,
|
||||||
)
|
) {
|
||||||
{
|
|
||||||
println!(" ... FAILED");
|
println!(" ... FAILED");
|
||||||
panic!("INVALID RESPONSE FILE!");
|
panic!("INVALID RESPONSE FILE!");
|
||||||
} else {
|
} else {
|
||||||
@ -286,35 +332,43 @@ fn main() {
|
|||||||
|
|
||||||
// Create the parameters for various 2^m circuit depths.
|
// Create the parameters for various 2^m circuit depths.
|
||||||
let max_degree = log_2(current_accumulator.tau_powers_g2.len() as u64);
|
let max_degree = log_2(current_accumulator.tau_powers_g2.len() as u64);
|
||||||
for m in 0..max_degree+1 {
|
for m in 0..=max_degree {
|
||||||
let paramname = format!("phase1radix2m{}", m);
|
let paramname = format!("phase1radix2m{}", m);
|
||||||
println!("Creating {}", paramname);
|
println!("Creating {}", paramname);
|
||||||
|
|
||||||
let degree = 1 << m;
|
let degree = 1 << m;
|
||||||
|
|
||||||
let mut g1_coeffs = EvaluationDomain::from_coeffs(
|
let mut g1_coeffs = EvaluationDomain::from_coeffs(
|
||||||
current_accumulator.tau_powers_g1[0..degree].iter()
|
current_accumulator.tau_powers_g1[0..degree]
|
||||||
|
.iter()
|
||||||
.map(|e| Point(e.into_projective()))
|
.map(|e| Point(e.into_projective()))
|
||||||
.collect()
|
.collect(),
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let mut g2_coeffs = EvaluationDomain::from_coeffs(
|
let mut g2_coeffs = EvaluationDomain::from_coeffs(
|
||||||
current_accumulator.tau_powers_g2[0..degree].iter()
|
current_accumulator.tau_powers_g2[0..degree]
|
||||||
|
.iter()
|
||||||
.map(|e| Point(e.into_projective()))
|
.map(|e| Point(e.into_projective()))
|
||||||
.collect()
|
.collect(),
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let mut g1_alpha_coeffs = EvaluationDomain::from_coeffs(
|
let mut g1_alpha_coeffs = EvaluationDomain::from_coeffs(
|
||||||
current_accumulator.alpha_tau_powers_g1[0..degree].iter()
|
current_accumulator.alpha_tau_powers_g1[0..degree]
|
||||||
|
.iter()
|
||||||
.map(|e| Point(e.into_projective()))
|
.map(|e| Point(e.into_projective()))
|
||||||
.collect()
|
.collect(),
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let mut g1_beta_coeffs = EvaluationDomain::from_coeffs(
|
let mut g1_beta_coeffs = EvaluationDomain::from_coeffs(
|
||||||
current_accumulator.beta_tau_powers_g1[0..degree].iter()
|
current_accumulator.beta_tau_powers_g1[0..degree]
|
||||||
|
.iter()
|
||||||
.map(|e| Point(e.into_projective()))
|
.map(|e| Point(e.into_projective()))
|
||||||
.collect()
|
.collect(),
|
||||||
).unwrap();
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// This converts all of the elements into Lagrange coefficients
|
// This converts all of the elements into Lagrange coefficients
|
||||||
// for later construction of interpolation polynomials
|
// for later construction of interpolation polynomials
|
||||||
@ -335,21 +389,13 @@ fn main() {
|
|||||||
|
|
||||||
// Remove the Point() wrappers
|
// Remove the Point() wrappers
|
||||||
|
|
||||||
let mut g1_coeffs = g1_coeffs.into_iter()
|
let mut g1_coeffs = g1_coeffs.into_iter().map(|e| e.0).collect::<Vec<_>>();
|
||||||
.map(|e| e.0)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut g2_coeffs = g2_coeffs.into_iter()
|
let mut g2_coeffs = g2_coeffs.into_iter().map(|e| e.0).collect::<Vec<_>>();
|
||||||
.map(|e| e.0)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter()
|
let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter().map(|e| e.0).collect::<Vec<_>>();
|
||||||
.map(|e| e.0)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut g1_beta_coeffs = g1_beta_coeffs.into_iter()
|
let mut g1_beta_coeffs = g1_beta_coeffs.into_iter().map(|e| e.0).collect::<Vec<_>>();
|
||||||
.map(|e| e.0)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Batch normalize
|
// Batch normalize
|
||||||
G1::batch_normalization(&mut g1_coeffs);
|
G1::batch_normalization(&mut g1_coeffs);
|
||||||
@ -362,7 +408,7 @@ fn main() {
|
|||||||
// x^(i + m) - x^i for i in 0..=(m-2)
|
// x^(i + m) - x^i for i in 0..=(m-2)
|
||||||
// for radix2 evaluation domains
|
// for radix2 evaluation domains
|
||||||
let mut h = Vec::with_capacity(degree - 1);
|
let mut h = Vec::with_capacity(degree - 1);
|
||||||
for i in 0..(degree-1) {
|
for i in 0..(degree - 1) {
|
||||||
let mut tmp = current_accumulator.tau_powers_g1[i + degree].into_projective();
|
let mut tmp = current_accumulator.tau_powers_g1[i + degree].into_projective();
|
||||||
let mut tmp2 = current_accumulator.tau_powers_g1[i].into_projective();
|
let mut tmp2 = current_accumulator.tau_powers_g1[i].into_projective();
|
||||||
tmp2.negate();
|
tmp2.negate();
|
||||||
@ -387,28 +433,30 @@ fn main() {
|
|||||||
// Write alpha (in g1)
|
// Write alpha (in g1)
|
||||||
// Needed by verifier for e(alpha, beta)
|
// Needed by verifier for e(alpha, beta)
|
||||||
// Needed by prover for A and C elements of proof
|
// Needed by prover for A and C elements of proof
|
||||||
writer.write_all(
|
writer
|
||||||
current_accumulator.alpha_tau_powers_g1[0]
|
.write_all(
|
||||||
.into_uncompressed()
|
current_accumulator.alpha_tau_powers_g1[0]
|
||||||
.as_ref()
|
.into_uncompressed()
|
||||||
).unwrap();
|
.as_ref(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Write beta (in g1)
|
// Write beta (in g1)
|
||||||
// Needed by prover for C element of proof
|
// Needed by prover for C element of proof
|
||||||
writer.write_all(
|
writer
|
||||||
current_accumulator.beta_tau_powers_g1[0]
|
.write_all(
|
||||||
.into_uncompressed()
|
current_accumulator.beta_tau_powers_g1[0]
|
||||||
.as_ref()
|
.into_uncompressed()
|
||||||
).unwrap();
|
.as_ref(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Write beta (in g2)
|
// Write beta (in g2)
|
||||||
// Needed by verifier for e(alpha, beta)
|
// Needed by verifier for e(alpha, beta)
|
||||||
// Needed by prover for B element of proof
|
// Needed by prover for B element of proof
|
||||||
writer.write_all(
|
writer
|
||||||
current_accumulator.beta_g2
|
.write_all(current_accumulator.beta_g2.into_uncompressed().as_ref())
|
||||||
.into_uncompressed()
|
.unwrap();
|
||||||
.as_ref()
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// Lagrange coefficients in G1 (for constructing
|
// Lagrange coefficients in G1 (for constructing
|
||||||
// LC/IC queries and precomputing polynomials for A)
|
// LC/IC queries and precomputing polynomials for A)
|
||||||
@ -416,10 +464,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lagrange coefficients in G2 (for precomputing
|
// Lagrange coefficients in G2 (for precomputing
|
||||||
@ -428,10 +475,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lagrange coefficients in G1 with alpha (for
|
// Lagrange coefficients in G1 with alpha (for
|
||||||
@ -440,10 +486,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lagrange coefficients in G1 with beta (for
|
// Lagrange coefficients in G1 with beta (for
|
||||||
@ -452,10 +497,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bases for H polynomial computation
|
// Bases for H polynomial computation
|
||||||
@ -463,10 +507,9 @@ fn main() {
|
|||||||
// Was normalized earlier in parallel
|
// Was normalized earlier in parallel
|
||||||
let coeff = coeff.into_affine();
|
let coeff = coeff.into_affine();
|
||||||
|
|
||||||
writer.write_all(
|
writer
|
||||||
coeff.into_uncompressed()
|
.write_all(coeff.into_uncompressed().as_ref())
|
||||||
.as_ref()
|
.unwrap();
|
||||||
).unwrap();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,18 +1,11 @@
|
|||||||
extern crate powersoftau;
|
use powersoftau::batched_accumulator::BatchedAccumulator;
|
||||||
extern crate bellman_ce;
|
use powersoftau::bn256::Bn256CeremonyParameters;
|
||||||
extern crate memmap;
|
use powersoftau::keypair::PublicKey;
|
||||||
extern crate rand;
|
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
|
||||||
extern crate blake2;
|
|
||||||
extern crate byteorder;
|
|
||||||
|
|
||||||
use powersoftau::bn256::{Bn256CeremonyParameters};
|
|
||||||
use powersoftau::batched_accumulator::{BatchedAccumulator};
|
|
||||||
use powersoftau::keypair::{PublicKey};
|
|
||||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
|
||||||
|
|
||||||
use std::fs::OpenOptions;
|
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use memmap::*;
|
use memmap::*;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
@ -32,8 +25,11 @@ fn main() {
|
|||||||
let response_filename = &args[2];
|
let response_filename = &args[2];
|
||||||
let new_challenge_filename = &args[3];
|
let new_challenge_filename = &args[3];
|
||||||
|
|
||||||
println!("Will verify and decompress a contribution to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
println!(
|
||||||
|
"Will verify and decompress a contribution to accumulator for 2^{} powers of tau",
|
||||||
|
Bn256CeremonyParameters::REQUIRED_POWER
|
||||||
|
);
|
||||||
|
|
||||||
// Try to load challenge file from disk.
|
// Try to load challenge file from disk.
|
||||||
let challenge_reader = OpenOptions::new()
|
let challenge_reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
@ -41,21 +37,30 @@ fn main() {
|
|||||||
.expect("unable open challenge file in this directory");
|
.expect("unable open challenge file in this directory");
|
||||||
|
|
||||||
{
|
{
|
||||||
let metadata = challenge_reader.metadata().expect("unable to get filesystem metadata for challenge file");
|
let metadata = challenge_reader
|
||||||
|
.metadata()
|
||||||
|
.expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED {
|
let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||||
},
|
- Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||||
UseCompression::No => {
|
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
|
||||||
}
|
}
|
||||||
|
UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
|
||||||
};
|
};
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
panic!(
|
||||||
|
"The size of challenge file should be {}, but it's {}, so something isn't right.",
|
||||||
|
expected_challenge_length,
|
||||||
|
metadata.len()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let challenge_readable_map = unsafe { MmapOptions::new().map(&challenge_reader).expect("unable to create a memory map for input") };
|
let challenge_readable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map(&challenge_reader)
|
||||||
|
.expect("unable to create a memory map for input")
|
||||||
|
};
|
||||||
|
|
||||||
// Try to load response file from disk.
|
// Try to load response file from disk.
|
||||||
let response_reader = OpenOptions::new()
|
let response_reader = OpenOptions::new()
|
||||||
@ -64,27 +69,39 @@ fn main() {
|
|||||||
.expect("unable open response file in this directory");
|
.expect("unable open response file in this directory");
|
||||||
|
|
||||||
{
|
{
|
||||||
let metadata = response_reader.metadata().expect("unable to get filesystem metadata for response file");
|
let metadata = response_reader
|
||||||
|
.metadata()
|
||||||
|
.expect("unable to get filesystem metadata for response file");
|
||||||
let expected_response_length = match CONTRIBUTION_IS_COMPRESSED {
|
let expected_response_length = match CONTRIBUTION_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
|
||||||
},
|
|
||||||
UseCompression::No => {
|
UseCompression::No => {
|
||||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
||||||
|
+ Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if metadata.len() != (expected_response_length as u64) {
|
if metadata.len() != (expected_response_length as u64) {
|
||||||
panic!("The size of response file should be {}, but it's {}, so something isn't right.", expected_response_length, metadata.len());
|
panic!(
|
||||||
|
"The size of response file should be {}, but it's {}, so something isn't right.",
|
||||||
|
expected_response_length,
|
||||||
|
metadata.len()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let response_readable_map = unsafe { MmapOptions::new().map(&response_reader).expect("unable to create a memory map for input") };
|
let response_readable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map(&response_reader)
|
||||||
|
.expect("unable to create a memory map for input")
|
||||||
|
};
|
||||||
|
|
||||||
println!("Calculating previous challenge hash...");
|
println!("Calculating previous challenge hash...");
|
||||||
|
|
||||||
// Check that contribution is correct
|
// Check that contribution is correct
|
||||||
|
|
||||||
let current_accumulator_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&challenge_readable_map);
|
let current_accumulator_hash =
|
||||||
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(
|
||||||
|
&challenge_readable_map,
|
||||||
|
);
|
||||||
|
|
||||||
println!("Hash of the `challenge` file for verification:");
|
println!("Hash of the `challenge` file for verification:");
|
||||||
for line in current_accumulator_hash.as_slice().chunks(16) {
|
for line in current_accumulator_hash.as_slice().chunks(16) {
|
||||||
@ -101,8 +118,12 @@ fn main() {
|
|||||||
// Check the hash chain - a new response must be based on the previous challenge!
|
// Check the hash chain - a new response must be based on the previous challenge!
|
||||||
{
|
{
|
||||||
let mut response_challenge_hash = [0; 64];
|
let mut response_challenge_hash = [0; 64];
|
||||||
let memory_slice = response_readable_map.get(0..64).expect("must read point data from file");
|
let mut memory_slice = response_readable_map
|
||||||
memory_slice.clone().read_exact(&mut response_challenge_hash).expect("couldn't read hash of challenge file from response file");
|
.get(0..64)
|
||||||
|
.expect("must read point data from file");
|
||||||
|
memory_slice
|
||||||
|
.read_exact(&mut response_challenge_hash)
|
||||||
|
.expect("couldn't read hash of challenge file from response file");
|
||||||
|
|
||||||
println!("`response` was based on the hash:");
|
println!("`response` was based on the hash:");
|
||||||
for line in response_challenge_hash.chunks(16) {
|
for line in response_challenge_hash.chunks(16) {
|
||||||
@ -121,7 +142,9 @@ fn main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let response_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&response_readable_map);
|
let response_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(
|
||||||
|
&response_readable_map,
|
||||||
|
);
|
||||||
|
|
||||||
println!("Hash of the response file for verification:");
|
println!("Hash of the response file for verification:");
|
||||||
for line in response_hash.as_slice().chunks(16) {
|
for line in response_hash.as_slice().chunks(16) {
|
||||||
@ -136,18 +159,22 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get the contributor's public key
|
// get the contributor's public key
|
||||||
let public_key = PublicKey::<Bn256>::read::<Bn256CeremonyParameters>(&response_readable_map, CONTRIBUTION_IS_COMPRESSED)
|
let public_key = PublicKey::<Bn256>::read::<Bn256CeremonyParameters>(
|
||||||
.expect("wasn't able to deserialize the response file's public key");
|
&response_readable_map,
|
||||||
|
CONTRIBUTION_IS_COMPRESSED,
|
||||||
|
)
|
||||||
|
.expect("wasn't able to deserialize the response file's public key");
|
||||||
|
|
||||||
// check that it follows the protocol
|
// check that it follows the protocol
|
||||||
|
|
||||||
println!("Verifying a contribution to contain proper powers and correspond to the public key...");
|
println!(
|
||||||
|
"Verifying a contribution to contain proper powers and correspond to the public key..."
|
||||||
|
);
|
||||||
|
|
||||||
let valid = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::verify_transformation(
|
let valid = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::verify_transformation(
|
||||||
&challenge_readable_map,
|
&challenge_readable_map,
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
&public_key,
|
&public_key,
|
||||||
current_accumulator_hash.as_slice(),
|
current_accumulator_hash.as_slice(),
|
||||||
PREVIOUS_CHALLENGE_IS_COMPRESSED,
|
PREVIOUS_CHALLENGE_IS_COMPRESSED,
|
||||||
CONTRIBUTION_IS_COMPRESSED,
|
CONTRIBUTION_IS_COMPRESSED,
|
||||||
@ -163,7 +190,9 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if COMPRESS_NEW_CHALLENGE == UseCompression::Yes {
|
if COMPRESS_NEW_CHALLENGE == UseCompression::Yes {
|
||||||
println!("Don't need to recompress the contribution, please copy response file as new challenge");
|
println!(
|
||||||
|
"Don't need to recompress the contribution, please copy response file as new challenge"
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
println!("Verification succeeded! Writing to new challenge file...");
|
println!("Verification succeeded! Writing to new challenge file...");
|
||||||
|
|
||||||
@ -175,29 +204,44 @@ fn main() {
|
|||||||
.open(new_challenge_filename)
|
.open(new_challenge_filename)
|
||||||
.expect("unable to create new challenge file in this directory");
|
.expect("unable to create new challenge file in this directory");
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Recomputation strips the public key and uses hashing to link with the previous contribution after decompression
|
// Recomputation strips the public key and uses hashing to link with the previous contribution after decompression
|
||||||
writer.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough");
|
writer
|
||||||
|
.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64)
|
||||||
|
.expect("must make output file large enough");
|
||||||
|
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
let mut writable_map = unsafe {
|
||||||
|
MmapOptions::new()
|
||||||
|
.map_mut(&writer)
|
||||||
|
.expect("unable to create a memory map for output")
|
||||||
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
(&mut writable_map[0..]).write(response_hash.as_slice()).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..])
|
||||||
|
.write_all(response_hash.as_slice())
|
||||||
|
.expect("unable to write a default hash to mmap");
|
||||||
|
|
||||||
writable_map.flush().expect("unable to write hash to new challenge file");
|
writable_map
|
||||||
|
.flush()
|
||||||
|
.expect("unable to write hash to new challenge file");
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::decompress(
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::decompress(
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
CheckForCorrectness::No).expect("must decompress a response for a new challenge");
|
CheckForCorrectness::No,
|
||||||
|
)
|
||||||
|
.expect("must decompress a response for a new challenge");
|
||||||
|
|
||||||
writable_map.flush().expect("must flush the memory map");
|
writable_map.flush().expect("must flush the memory map");
|
||||||
|
|
||||||
let new_challenge_readable_map = writable_map.make_read_only().expect("must make a map readonly");
|
let new_challenge_readable_map = writable_map
|
||||||
|
.make_read_only()
|
||||||
|
.expect("must make a map readonly");
|
||||||
|
|
||||||
let recompressed_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&new_challenge_readable_map);
|
let recompressed_hash =
|
||||||
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(
|
||||||
|
&new_challenge_readable_map,
|
||||||
|
);
|
||||||
|
|
||||||
println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:");
|
println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:");
|
||||||
|
|
||||||
|
@ -1,33 +1,7 @@
|
|||||||
extern crate rand;
|
use crate::parameters::PowersOfTauParameters;
|
||||||
extern crate crossbeam;
|
|
||||||
extern crate num_cpus;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate generic_array;
|
|
||||||
extern crate typenum;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman_ce;
|
|
||||||
|
|
||||||
use self::bellman_ce::pairing::ff::{Field, PrimeField};
|
|
||||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
|
||||||
use self::rand::{SeedableRng, Rng, Rand};
|
|
||||||
use self::rand::chacha::ChaChaRng;
|
|
||||||
use self::bellman_ce::pairing::bn256::{Bn256};
|
|
||||||
use self::bellman_ce::pairing::*;
|
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use self::generic_array::GenericArray;
|
|
||||||
use self::typenum::consts::U64;
|
|
||||||
use self::blake2::{Blake2b, Digest};
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use crate::parameters::*;
|
|
||||||
use crate::keypair::*;
|
|
||||||
use crate::utils::*;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Bn256CeremonyParameters {
|
pub struct Bn256CeremonyParameters {}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PowersOfTauParameters for Bn256CeremonyParameters {
|
impl PowersOfTauParameters for Bn256CeremonyParameters {
|
||||||
#[cfg(not(feature = "smalltest"))]
|
#[cfg(not(feature = "smalltest"))]
|
||||||
@ -45,81 +19,91 @@ impl PowersOfTauParameters for Bn256CeremonyParameters {
|
|||||||
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[cfg(test)]
|
||||||
fn test_pubkey_serialization() {
|
mod tests {
|
||||||
use self::rand::thread_rng;
|
use super::*;
|
||||||
|
use crate::accumulator::*;
|
||||||
let rng = &mut thread_rng();
|
use crate::{
|
||||||
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
keypair::{keypair, PublicKey},
|
||||||
let (pk, _) = keypair::<_, Bn256>(rng, &digest);
|
parameters::{CheckForCorrectness, UseCompression},
|
||||||
let mut v = vec![];
|
utils::{power_pairs, same_ratio},
|
||||||
pk.serialize(&mut v).unwrap();
|
};
|
||||||
assert_eq!(v.len(), Bn256CeremonyParameters::PUBLIC_KEY_SIZE);
|
use bellman_ce::pairing::{
|
||||||
let deserialized = PublicKey::<Bn256>::deserialize(&mut &v[..]).unwrap();
|
bn256::{Bn256, Fr, G1Affine, G2Affine},
|
||||||
assert!(pk == deserialized);
|
ff::Field,
|
||||||
}
|
CurveAffine, CurveProjective,
|
||||||
|
};
|
||||||
|
use rand::{thread_rng, Rand, Rng};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_power_pairs() {
|
fn test_pubkey_serialization() {
|
||||||
use self::rand::thread_rng;
|
let rng = &mut thread_rng();
|
||||||
use self::bellman_ce::pairing::bn256::{Fr, G1Affine, G2Affine};
|
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||||
let rng = &mut thread_rng();
|
let (pk, _) = keypair::<_, Bn256>(rng, &digest);
|
||||||
|
let mut v = vec![];
|
||||||
let mut v = vec![];
|
pk.serialize(&mut v).unwrap();
|
||||||
let x = Fr::rand(rng);
|
assert_eq!(v.len(), Bn256CeremonyParameters::PUBLIC_KEY_SIZE);
|
||||||
let mut acc = Fr::one();
|
let deserialized = PublicKey::<Bn256>::deserialize(&mut &v[..]).unwrap();
|
||||||
for _ in 0..100 {
|
assert!(pk == deserialized);
|
||||||
v.push(G1Affine::one().mul(acc).into_affine());
|
|
||||||
acc.mul_assign(&x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let gx = G2Affine::one().mul(x).into_affine();
|
#[test]
|
||||||
|
fn test_power_pairs() {
|
||||||
|
let rng = &mut thread_rng();
|
||||||
|
|
||||||
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
let mut v = vec![];
|
||||||
|
let x = Fr::rand(rng);
|
||||||
|
let mut acc = Fr::one();
|
||||||
|
for _ in 0..100 {
|
||||||
|
v.push(G1Affine::one().mul(acc).into_affine());
|
||||||
|
acc.mul_assign(&x);
|
||||||
|
}
|
||||||
|
|
||||||
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
|
let gx = G2Affine::one().mul(x).into_affine();
|
||||||
|
|
||||||
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||||
}
|
|
||||||
|
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
|
||||||
#[test]
|
|
||||||
fn test_same_ratio() {
|
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||||
use self::rand::thread_rng;
|
}
|
||||||
use self::bellman_ce::pairing::bn256::{Fr, G1Affine, G2Affine};
|
|
||||||
|
#[test]
|
||||||
let rng = &mut thread_rng();
|
fn test_same_ratio() {
|
||||||
|
let rng = &mut thread_rng();
|
||||||
let s = Fr::rand(rng);
|
|
||||||
let g1 = G1Affine::one();
|
let s = Fr::rand(rng);
|
||||||
let g2 = G2Affine::one();
|
let g1 = G1Affine::one();
|
||||||
let g1_s = g1.mul(s).into_affine();
|
let g2 = G2Affine::one();
|
||||||
let g2_s = g2.mul(s).into_affine();
|
let g1_s = g1.mul(s).into_affine();
|
||||||
|
let g2_s = g2.mul(s).into_affine();
|
||||||
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
|
|
||||||
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
|
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
|
||||||
}
|
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
|
||||||
|
}
|
||||||
#[test]
|
|
||||||
fn test_accumulator_serialization() {
|
#[test]
|
||||||
use crate::accumulator::*;
|
fn test_accumulator_serialization() {
|
||||||
|
let rng = &mut thread_rng();
|
||||||
use self::rand::thread_rng;
|
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||||
use self::bellman_ce::pairing::bn256::{Bn256, Fr, G1Affine, G2Affine};
|
let params = Bn256CeremonyParameters {};
|
||||||
use self::PowersOfTauParameters;
|
let mut acc = Accumulator::<Bn256, _>::new(params.clone());
|
||||||
|
let before = acc.clone();
|
||||||
let rng = &mut thread_rng();
|
let (pk, sk) = keypair::<_, Bn256>(rng, &digest);
|
||||||
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
acc.transform(&sk);
|
||||||
let params = Bn256CeremonyParameters{};
|
assert!(verify_transform(&before, &acc, &pk, &digest));
|
||||||
let mut acc = Accumulator::<Bn256, _>::new(params.clone());
|
digest[0] = !digest[0];
|
||||||
let before = acc.clone();
|
assert!(!verify_transform(&before, &acc, &pk, &digest));
|
||||||
let (pk, sk) = keypair::<_, Bn256>(rng, &digest);
|
let mut v = Vec::with_capacity(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
|
||||||
acc.transform(&sk);
|
acc.serialize(&mut v, UseCompression::No).unwrap();
|
||||||
assert!(verify_transform(&before, &acc, &pk, &digest));
|
assert_eq!(v.len(), Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
|
||||||
digest[0] = !digest[0];
|
let deserialized = Accumulator::deserialize(
|
||||||
assert!(!verify_transform(&before, &acc, &pk, &digest));
|
&mut &v[..],
|
||||||
let mut v = Vec::with_capacity(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
|
UseCompression::No,
|
||||||
acc.serialize(&mut v, UseCompression::No).unwrap();
|
CheckForCorrectness::No,
|
||||||
assert_eq!(v.len(), Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
|
params,
|
||||||
let deserialized = Accumulator::deserialize(&mut &v[..], UseCompression::No, CheckForCorrectness::No, params).unwrap();
|
)
|
||||||
assert!(acc == deserialized);
|
.unwrap();
|
||||||
|
assert!(acc == deserialized);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,31 +1,16 @@
|
|||||||
extern crate rand;
|
use bellman_ce::pairing::{CurveAffine, CurveProjective, EncodedPoint, Engine};
|
||||||
extern crate crossbeam;
|
use blake2::{Blake2b, Digest};
|
||||||
extern crate num_cpus;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate generic_array;
|
|
||||||
extern crate typenum;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman_ce;
|
|
||||||
extern crate memmap;
|
|
||||||
extern crate itertools;
|
|
||||||
|
|
||||||
use itertools::Itertools;
|
|
||||||
use memmap::{Mmap, MmapMut};
|
use memmap::{Mmap, MmapMut};
|
||||||
use self::bellman_ce::pairing::ff::{Field, PrimeField};
|
|
||||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
|
||||||
use self::rand::{SeedableRng, Rng, Rand};
|
|
||||||
use self::rand::chacha::ChaChaRng;
|
|
||||||
use self::bellman_ce::pairing::bn256::{Bn256};
|
|
||||||
use self::bellman_ce::pairing::*;
|
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use self::generic_array::GenericArray;
|
|
||||||
use self::typenum::consts::U64;
|
|
||||||
use self::blake2::{Blake2b, Digest};
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use super::utils::*;
|
use rand::{Rand, Rng};
|
||||||
use super::parameters::*;
|
|
||||||
|
use std::io::{self, Read, Write};
|
||||||
|
|
||||||
|
use typenum::consts::U64;
|
||||||
|
|
||||||
|
use super::parameters::{DeserializationError, PowersOfTauParameters, UseCompression};
|
||||||
|
use super::utils::{hash_to_g2, write_point};
|
||||||
|
|
||||||
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
|
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
|
||||||
/// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity.
|
/// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity.
|
||||||
@ -41,20 +26,20 @@ pub struct PublicKey<E: Engine> {
|
|||||||
pub beta_g1: (E::G1Affine, E::G1Affine),
|
pub beta_g1: (E::G1Affine, E::G1Affine),
|
||||||
pub tau_g2: E::G2Affine,
|
pub tau_g2: E::G2Affine,
|
||||||
pub alpha_g2: E::G2Affine,
|
pub alpha_g2: E::G2Affine,
|
||||||
pub beta_g2: E::G2Affine
|
pub beta_g2: E::G2Affine,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: Engine> PartialEq for PublicKey<E> {
|
impl<E: Engine> PartialEq for PublicKey<E> {
|
||||||
fn eq(&self, other: &PublicKey<E>) -> bool {
|
fn eq(&self, other: &PublicKey<E>) -> bool {
|
||||||
self.tau_g1.0 == other.tau_g1.0 &&
|
self.tau_g1.0 == other.tau_g1.0
|
||||||
self.tau_g1.1 == other.tau_g1.1 &&
|
&& self.tau_g1.1 == other.tau_g1.1
|
||||||
self.alpha_g1.0 == other.alpha_g1.0 &&
|
&& self.alpha_g1.0 == other.alpha_g1.0
|
||||||
self.alpha_g1.1 == other.alpha_g1.1 &&
|
&& self.alpha_g1.1 == other.alpha_g1.1
|
||||||
self.beta_g1.0 == other.beta_g1.0 &&
|
&& self.beta_g1.0 == other.beta_g1.0
|
||||||
self.beta_g1.1 == other.beta_g1.1 &&
|
&& self.beta_g1.1 == other.beta_g1.1
|
||||||
self.tau_g2 == other.tau_g2 &&
|
&& self.tau_g2 == other.tau_g2
|
||||||
self.alpha_g2 == other.alpha_g2 &&
|
&& self.alpha_g2 == other.alpha_g2
|
||||||
self.beta_g2 == other.beta_g2
|
&& self.beta_g2 == other.beta_g2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,12 +47,11 @@ impl<E: Engine> PartialEq for PublicKey<E> {
|
|||||||
pub struct PrivateKey<E: Engine> {
|
pub struct PrivateKey<E: Engine> {
|
||||||
pub tau: E::Fr,
|
pub tau: E::Fr,
|
||||||
pub alpha: E::Fr,
|
pub alpha: E::Fr,
|
||||||
pub beta: E::Fr
|
pub beta: E::Fr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Constructs a keypair given an RNG and a 64-byte transcript `digest`.
|
/// Constructs a keypair given an RNG and a 64-byte transcript `digest`.
|
||||||
pub fn keypair<R: Rng, E: Engine>(rng: &mut R, digest: &[u8]) -> (PublicKey<E>, PrivateKey<E>)
|
pub fn keypair<R: Rng, E: Engine>(rng: &mut R, digest: &[u8]) -> (PublicKey<E>, PrivateKey<E>) {
|
||||||
{
|
|
||||||
assert_eq!(digest.len(), 64);
|
assert_eq!(digest.len(), 64);
|
||||||
|
|
||||||
// tau is a contribution to the "powers of tau", in a set of points of the form "tau^i * G"
|
// tau is a contribution to the "powers of tau", in a set of points of the form "tau^i * G"
|
||||||
@ -114,18 +98,13 @@ pub fn keypair<R: Rng, E: Engine>(rng: &mut R, digest: &[u8]) -> (PublicKey<E>,
|
|||||||
alpha_g2: pk_alpha.1,
|
alpha_g2: pk_alpha.1,
|
||||||
beta_g2: pk_beta.1,
|
beta_g2: pk_beta.1,
|
||||||
},
|
},
|
||||||
PrivateKey {
|
PrivateKey { tau, alpha, beta },
|
||||||
tau: tau,
|
|
||||||
alpha: alpha,
|
|
||||||
beta: beta
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: Engine> PublicKey<E> {
|
impl<E: Engine> PublicKey<E> {
|
||||||
/// Serialize the public key. Points are always in uncompressed form.
|
/// Serialize the public key. Points are always in uncompressed form.
|
||||||
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>
|
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
{
|
|
||||||
write_point(writer, &self.tau_g1.0, UseCompression::No)?;
|
write_point(writer, &self.tau_g1.0, UseCompression::No)?;
|
||||||
write_point(writer, &self.tau_g1.1, UseCompression::No)?;
|
write_point(writer, &self.tau_g1.1, UseCompression::No)?;
|
||||||
|
|
||||||
@ -145,9 +124,10 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
/// Deserialize the public key. Points are always in uncompressed form, and
|
/// Deserialize the public key. Points are always in uncompressed form, and
|
||||||
/// always checked, since there aren't very many of them. Does not allow any
|
/// always checked, since there aren't very many of them. Does not allow any
|
||||||
/// points at infinity.
|
/// points at infinity.
|
||||||
pub fn deserialize<R: Read>(reader: &mut R) -> Result<PublicKey<E>, DeserializationError>
|
pub fn deserialize<R: Read>(reader: &mut R) -> Result<PublicKey<E>, DeserializationError> {
|
||||||
{
|
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>, R: Read>(
|
||||||
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>, R: Read>(reader: &mut R) -> Result<C, DeserializationError> {
|
reader: &mut R,
|
||||||
|
) -> Result<C, DeserializationError> {
|
||||||
let mut repr = C::Uncompressed::empty();
|
let mut repr = C::Uncompressed::empty();
|
||||||
reader.read_exact(repr.as_mut())?;
|
reader.read_exact(repr.as_mut())?;
|
||||||
let v = repr.into_affine()?;
|
let v = repr.into_affine()?;
|
||||||
@ -176,60 +156,55 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
||||||
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
||||||
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
||||||
tau_g2: tau_g2,
|
tau_g2,
|
||||||
alpha_g2: alpha_g2,
|
alpha_g2,
|
||||||
beta_g2: beta_g2
|
beta_g2,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: Engine> PublicKey<E> {
|
impl<E: Engine> PublicKey<E> {
|
||||||
|
|
||||||
/// This function is intended to write the key to the memory map and calculates
|
/// This function is intended to write the key to the memory map and calculates
|
||||||
/// a position for writing into the file itself based on information whether
|
/// a position for writing into the file itself based on information whether
|
||||||
/// contribution was output in compressed on uncompressed form
|
/// contribution was output in compressed on uncompressed form
|
||||||
pub fn write<P>(
|
pub fn write<P>(
|
||||||
&self,
|
&self,
|
||||||
output_map: &mut MmapMut,
|
output_map: &mut MmapMut,
|
||||||
accumulator_was_compressed: UseCompression
|
accumulator_was_compressed: UseCompression,
|
||||||
)
|
) -> io::Result<()>
|
||||||
-> io::Result<()>
|
where
|
||||||
where P: PowersOfTauParameters
|
P: PowersOfTauParameters,
|
||||||
{
|
{
|
||||||
let mut position = match accumulator_was_compressed {
|
let mut position = match accumulator_was_compressed {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE,
|
||||||
P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE
|
UseCompression::No => P::ACCUMULATOR_BYTE_SIZE,
|
||||||
},
|
|
||||||
UseCompression::No => {
|
|
||||||
P::ACCUMULATOR_BYTE_SIZE
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.tau_g1.0.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.tau_g1.0.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.tau_g1.1.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.tau_g1.1.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.alpha_g1.0.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.alpha_g1.0.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.alpha_g1.1.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.alpha_g1.1.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.beta_g1.0.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.beta_g1.0.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.beta_g1.1.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.beta_g1.1.into_uncompressed().as_ref())?;
|
||||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.tau_g2.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.tau_g2.into_uncompressed().as_ref())?;
|
||||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.alpha_g2.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.alpha_g2.into_uncompressed().as_ref())?;
|
||||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
||||||
|
|
||||||
(&mut output_map[position..]).write(&self.beta_g2.into_uncompressed().as_ref())?;
|
(&mut output_map[position..]).write_all(&self.beta_g2.into_uncompressed().as_ref())?;
|
||||||
|
|
||||||
output_map.flush()?;
|
output_map.flush()?;
|
||||||
|
|
||||||
@ -241,15 +216,21 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
/// points at infinity.
|
/// points at infinity.
|
||||||
pub fn read<P>(
|
pub fn read<P>(
|
||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
accumulator_was_compressed: UseCompression
|
accumulator_was_compressed: UseCompression,
|
||||||
) -> Result<Self, DeserializationError>
|
) -> Result<Self, DeserializationError>
|
||||||
where P: PowersOfTauParameters
|
where
|
||||||
|
P: PowersOfTauParameters,
|
||||||
{
|
{
|
||||||
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(input_map: &Mmap, position: usize) -> Result<C, DeserializationError> {
|
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
|
||||||
|
input_map: &Mmap,
|
||||||
|
position: usize,
|
||||||
|
) -> Result<C, DeserializationError> {
|
||||||
let mut repr = C::Uncompressed::empty();
|
let mut repr = C::Uncompressed::empty();
|
||||||
let element_size = C::Uncompressed::size();
|
let element_size = C::Uncompressed::size();
|
||||||
let memory_slice = input_map.get(position..position+element_size).expect("must read point data from file");
|
let mut memory_slice = input_map
|
||||||
memory_slice.clone().read_exact(repr.as_mut())?;
|
.get(position..position + element_size)
|
||||||
|
.expect("must read point data from file");
|
||||||
|
memory_slice.read_exact(repr.as_mut())?;
|
||||||
let v = repr.into_affine()?;
|
let v = repr.into_affine()?;
|
||||||
|
|
||||||
if v.is_zero() {
|
if v.is_zero() {
|
||||||
@ -260,12 +241,8 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut position = match accumulator_was_compressed {
|
let mut position = match accumulator_was_compressed {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE,
|
||||||
P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE
|
UseCompression::No => P::ACCUMULATOR_BYTE_SIZE,
|
||||||
},
|
|
||||||
UseCompression::No => {
|
|
||||||
P::ACCUMULATOR_BYTE_SIZE
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let tau_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
let tau_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
||||||
@ -298,9 +275,9 @@ impl<E: Engine> PublicKey<E> {
|
|||||||
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
||||||
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
||||||
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
||||||
tau_g2: tau_g2,
|
tau_g2,
|
||||||
alpha_g2: alpha_g2,
|
alpha_g2,
|
||||||
beta_g2: beta_g2
|
beta_g2,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
#![allow(unused_imports)]
|
|
||||||
|
|
||||||
pub mod bn256;
|
|
||||||
pub mod accumulator;
|
pub mod accumulator;
|
||||||
pub mod batched_accumulator;
|
pub mod batched_accumulator;
|
||||||
|
pub mod bn256;
|
||||||
pub mod keypair;
|
pub mod keypair;
|
||||||
pub mod parameters;
|
pub mod parameters;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
@ -1,30 +1,10 @@
|
|||||||
extern crate rand;
|
use bellman_ce::pairing::GroupDecodingError;
|
||||||
extern crate crossbeam;
|
|
||||||
extern crate num_cpus;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate generic_array;
|
|
||||||
extern crate typenum;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman_ce;
|
|
||||||
|
|
||||||
use bellman_ce::pairing::ff::{Field, PrimeField};
|
|
||||||
use byteorder::{ReadBytesExt, BigEndian};
|
|
||||||
use rand::{SeedableRng, Rng, Rand};
|
|
||||||
use rand::chacha::ChaChaRng;
|
|
||||||
use bellman_ce::pairing::bn256::{Bn256};
|
|
||||||
use bellman_ce::pairing::*;
|
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use generic_array::GenericArray;
|
|
||||||
use typenum::consts::U64;
|
|
||||||
use blake2::{Blake2b, Digest};
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use std::io;
|
||||||
use super::keypair::*;
|
|
||||||
|
|
||||||
pub trait PowersOfTauParameters: Clone {
|
pub trait PowersOfTauParameters: Clone {
|
||||||
const REQUIRED_POWER: usize;
|
const REQUIRED_POWER: usize;
|
||||||
|
|
||||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize;
|
const G1_UNCOMPRESSED_BYTE_SIZE: usize;
|
||||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize;
|
const G2_UNCOMPRESSED_BYTE_SIZE: usize;
|
||||||
const G1_COMPRESSED_BYTE_SIZE: usize;
|
const G1_COMPRESSED_BYTE_SIZE: usize;
|
||||||
@ -58,13 +38,11 @@ pub trait PowersOfTauParameters: Clone {
|
|||||||
const EMPIRICAL_BATCH_SIZE: usize = 1 << 21;
|
const EMPIRICAL_BATCH_SIZE: usize = 1 << 21;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Determines if point compression should be used.
|
/// Determines if point compression should be used.
|
||||||
#[derive(Copy, Clone, PartialEq)]
|
#[derive(Copy, Clone, PartialEq)]
|
||||||
pub enum UseCompression {
|
pub enum UseCompression {
|
||||||
Yes,
|
Yes,
|
||||||
No
|
No,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determines if points should be checked for correctness during deserialization.
|
/// Determines if points should be checked for correctness during deserialization.
|
||||||
@ -73,16 +51,15 @@ pub enum UseCompression {
|
|||||||
#[derive(Copy, Clone, PartialEq)]
|
#[derive(Copy, Clone, PartialEq)]
|
||||||
pub enum CheckForCorrectness {
|
pub enum CheckForCorrectness {
|
||||||
Yes,
|
Yes,
|
||||||
No
|
No,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Errors that might occur during deserialization.
|
/// Errors that might occur during deserialization.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum DeserializationError {
|
pub enum DeserializationError {
|
||||||
IoError(io::Error),
|
IoError(io::Error),
|
||||||
DecodingError(GroupDecodingError),
|
DecodingError(GroupDecodingError),
|
||||||
PointAtInfinity
|
PointAtInfinity,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for DeserializationError {
|
impl fmt::Display for DeserializationError {
|
||||||
@ -90,7 +67,7 @@ impl fmt::Display for DeserializationError {
|
|||||||
match *self {
|
match *self {
|
||||||
DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e),
|
DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e),
|
||||||
DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e),
|
DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e),
|
||||||
DeserializationError::PointAtInfinity => write!(f, "Point at infinity found")
|
DeserializationError::PointAtInfinity => write!(f, "Point at infinity found"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -113,5 +90,5 @@ pub enum ElementType {
|
|||||||
TauG2,
|
TauG2,
|
||||||
AlphaG1,
|
AlphaG1,
|
||||||
BetaG1,
|
BetaG1,
|
||||||
BetaG2
|
BetaG2,
|
||||||
}
|
}
|
||||||
|
@ -1,126 +1,76 @@
|
|||||||
extern crate rand;
|
|
||||||
extern crate crossbeam;
|
|
||||||
extern crate num_cpus;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate generic_array;
|
|
||||||
extern crate typenum;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman_ce;
|
|
||||||
|
|
||||||
use bellman_ce::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
|
use bellman_ce::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
|
||||||
use byteorder::{ReadBytesExt, BigEndian};
|
|
||||||
use rand::{SeedableRng, Rng, Rand};
|
|
||||||
use rand::chacha::ChaChaRng;
|
|
||||||
use bellman_ce::pairing::bn256::{Bn256};
|
|
||||||
use bellman_ce::pairing::*;
|
use bellman_ce::pairing::*;
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use generic_array::GenericArray;
|
|
||||||
use typenum::consts::U64;
|
|
||||||
use blake2::{Blake2b, Digest};
|
use blake2::{Blake2b, Digest};
|
||||||
use std::fmt;
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
|
use generic_array::GenericArray;
|
||||||
|
use rand::chacha::ChaChaRng;
|
||||||
|
use rand::{Rand, Rng, SeedableRng};
|
||||||
|
|
||||||
use super::parameters::*;
|
use std::io::{self, Write};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use typenum::consts::U64;
|
||||||
|
|
||||||
|
use super::parameters::UseCompression;
|
||||||
|
|
||||||
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
|
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
|
||||||
/// than 32 bytes.
|
/// than 32 bytes.
|
||||||
pub fn hash_to_g2<E:Engine>(mut digest: &[u8]) -> E::G2
|
pub fn hash_to_g2<E: Engine>(mut digest: &[u8]) -> E::G2 {
|
||||||
{
|
|
||||||
assert!(digest.len() >= 32);
|
assert!(digest.len() >= 32);
|
||||||
|
|
||||||
let mut seed = Vec::with_capacity(8);
|
let mut seed = Vec::with_capacity(8);
|
||||||
|
|
||||||
for _ in 0..8 {
|
for _ in 0..8 {
|
||||||
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
|
seed.push(
|
||||||
|
digest
|
||||||
|
.read_u32::<BigEndian>()
|
||||||
|
.expect("assertion above guarantees this to work"),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
ChaChaRng::from_seed(&seed).gen()
|
ChaChaRng::from_seed(&seed).gen()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[cfg(test)]
|
||||||
fn test_hash_to_g2() {
|
mod tests {
|
||||||
assert!(
|
use super::*;
|
||||||
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
==
|
|
||||||
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34])
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(
|
#[test]
|
||||||
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])
|
fn test_hash_to_g2() {
|
||||||
!=
|
assert!(
|
||||||
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33])
|
hash_to_g2::<Bn256>(&[
|
||||||
);
|
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||||
|
24, 25, 26, 27, 28, 29, 30, 31, 32, 33
|
||||||
|
]) == hash_to_g2::<Bn256>(&[
|
||||||
|
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||||
|
24, 25, 26, 27, 28, 29, 30, 31, 32, 34
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
hash_to_g2::<Bn256>(&[
|
||||||
|
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||||
|
24, 25, 26, 27, 28, 29, 30, 31, 32
|
||||||
|
]) != hash_to_g2::<Bn256>(&[
|
||||||
|
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||||
|
24, 25, 26, 27, 28, 29, 30, 31, 33
|
||||||
|
])
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes a random linear combination over v1/v2.
|
fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(
|
||||||
///
|
v1: &[G],
|
||||||
/// Checking that many pairs of elements are exponentiated by
|
v2: &[G],
|
||||||
/// the same `x` can be achieved (with high probability) with
|
) -> (G, G) {
|
||||||
/// the following technique:
|
use rand::thread_rng;
|
||||||
///
|
|
||||||
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
|
|
||||||
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
|
|
||||||
/// random r1, r2, r3. Given (g, g^s)...
|
|
||||||
///
|
|
||||||
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
|
|
||||||
///
|
|
||||||
/// ... with high probability.
|
|
||||||
// fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v1: &[G], v2: &[G]) -> (G, G)
|
|
||||||
// {
|
|
||||||
// use std::sync::{Arc, Mutex};
|
|
||||||
// use self::rand::{thread_rng};
|
|
||||||
|
|
||||||
// assert_eq!(v1.len(), v2.len());
|
|
||||||
|
|
||||||
// let chunk = (v1.len() / num_cpus::get()) + 1;
|
|
||||||
|
|
||||||
// let s = Arc::new(Mutex::new(G::Projective::zero()));
|
|
||||||
// let sx = Arc::new(Mutex::new(G::Projective::zero()));
|
|
||||||
|
|
||||||
// crossbeam::scope(|scope| {
|
|
||||||
// for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
|
|
||||||
// let s = s.clone();
|
|
||||||
// let sx = sx.clone();
|
|
||||||
|
|
||||||
// scope.spawn(move || {
|
|
||||||
// // We do not need to be overly cautious of the RNG
|
|
||||||
// // used for this check.
|
|
||||||
// let rng = &mut thread_rng();
|
|
||||||
|
|
||||||
// let mut wnaf = Wnaf::new();
|
|
||||||
// let mut local_s = G::Projective::zero();
|
|
||||||
// let mut local_sx = G::Projective::zero();
|
|
||||||
|
|
||||||
// for (v1, v2) in v1.iter().zip(v2.iter()) {
|
|
||||||
// let rho = G::Scalar::rand(rng);
|
|
||||||
// let mut wnaf = wnaf.scalar(rho.into_repr());
|
|
||||||
// let v1 = wnaf.base(v1.into_projective());
|
|
||||||
// let v2 = wnaf.base(v2.into_projective());
|
|
||||||
|
|
||||||
// local_s.add_assign(&v1);
|
|
||||||
// local_sx.add_assign(&v2);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// s.lock().unwrap().add_assign(&local_s);
|
|
||||||
// sx.lock().unwrap().add_assign(&local_sx);
|
|
||||||
// });
|
|
||||||
// }
|
|
||||||
// });
|
|
||||||
|
|
||||||
// let s = s.lock().unwrap().into_affine();
|
|
||||||
// let sx = sx.lock().unwrap().into_affine();
|
|
||||||
|
|
||||||
// (s, sx)
|
|
||||||
// }
|
|
||||||
|
|
||||||
fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v1: &[G], v2: &[G]) -> (G, G)
|
|
||||||
{
|
|
||||||
use self::rand::{thread_rng};
|
|
||||||
|
|
||||||
assert_eq!(v1.len(), v2.len());
|
assert_eq!(v1.len(), v2.len());
|
||||||
let rng = &mut thread_rng();
|
let rng = &mut thread_rng();
|
||||||
|
|
||||||
let randomness: Vec<<G::Scalar as PrimeField>::Repr> = (0..v1.len()).map(|_| G::Scalar::rand(rng).into_repr()).collect();
|
let randomness: Vec<<G::Scalar as PrimeField>::Repr> = (0..v1.len())
|
||||||
|
.map(|_| G::Scalar::rand(rng).into_repr())
|
||||||
|
.collect();
|
||||||
|
|
||||||
let s = dense_multiexp(&v1, &randomness[..]).into_affine();
|
let s = dense_multiexp(&v1, &randomness[..]).into_affine();
|
||||||
let sx = dense_multiexp(&v2, &randomness[..]).into_affine();
|
let sx = dense_multiexp(&v2, &randomness[..]).into_affine();
|
||||||
@ -130,9 +80,8 @@ fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v1: &[G],
|
|||||||
|
|
||||||
/// Construct a single pair (s, s^x) for a vector of
|
/// Construct a single pair (s, s^x) for a vector of
|
||||||
/// the form [1, x, x^2, x^3, ...].
|
/// the form [1, x, x^2, x^3, ...].
|
||||||
pub fn power_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v: &[G]) -> (G, G)
|
pub fn power_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v: &[G]) -> (G, G) {
|
||||||
{
|
merge_pairs::<E, _>(&v[0..(v.len() - 1)], &v[1..])
|
||||||
merge_pairs::<E, _>(&v[0..(v.len()-1)], &v[1..])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute BLAKE2b("")
|
/// Compute BLAKE2b("")
|
||||||
@ -146,26 +95,20 @@ pub fn reduced_hash(old_power: u8, new_power: u8) -> GenericArray<u8, U64> {
|
|||||||
hasher.result()
|
hasher.result()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/// Checks if pairs have the same ratio.
|
/// Checks if pairs have the same ratio.
|
||||||
/// Under the hood uses pairing to check
|
/// Under the hood uses pairing to check
|
||||||
/// x1/x2 = y1/y2 => x1*y2 = x2*y1
|
/// x1/x2 = y1/y2 => x1*y2 = x2*y1
|
||||||
pub fn same_ratio<E: Engine, G1: CurveAffine<Engine = E, Scalar = E::Fr>>(
|
pub fn same_ratio<E: Engine, G1: CurveAffine<Engine = E, Scalar = E::Fr>>(
|
||||||
g1: (G1, G1),
|
g1: (G1, G1),
|
||||||
g2: (G1::Pair, G1::Pair)
|
g2: (G1::Pair, G1::Pair),
|
||||||
) -> bool
|
) -> bool {
|
||||||
{
|
|
||||||
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
|
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_point<W, G>(
|
pub fn write_point<W, G>(writer: &mut W, p: &G, compression: UseCompression) -> io::Result<()>
|
||||||
writer: &mut W,
|
where
|
||||||
p: &G,
|
W: Write,
|
||||||
compression: UseCompression
|
G: CurveAffine,
|
||||||
) -> io::Result<()>
|
|
||||||
where W: Write,
|
|
||||||
G: CurveAffine
|
|
||||||
{
|
{
|
||||||
match compression {
|
match compression {
|
||||||
UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()),
|
UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()),
|
||||||
@ -173,13 +116,12 @@ pub fn write_point<W, G>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn compute_g2_s<E: Engine> (
|
pub fn compute_g2_s<E: Engine>(
|
||||||
digest: &[u8],
|
digest: &[u8],
|
||||||
g1_s: &E::G1Affine,
|
g1_s: &E::G1Affine,
|
||||||
g1_s_x: &E::G1Affine,
|
g1_s_x: &E::G1Affine,
|
||||||
personalization: u8
|
personalization: u8,
|
||||||
) -> E::G2Affine
|
) -> E::G2Affine {
|
||||||
{
|
|
||||||
let mut h = Blake2b::default();
|
let mut h = Blake2b::default();
|
||||||
h.input(&[personalization]);
|
h.input(&[personalization]);
|
||||||
h.input(digest);
|
h.input(digest);
|
||||||
@ -193,10 +135,9 @@ pub fn compute_g2_s<E: Engine> (
|
|||||||
/// the number of bases is the same as the number of exponents.
|
/// the number of bases is the same as the number of exponents.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn dense_multiexp<G: CurveAffine>(
|
pub fn dense_multiexp<G: CurveAffine>(
|
||||||
bases: & [G],
|
bases: &[G],
|
||||||
exponents: & [<G::Scalar as PrimeField>::Repr]
|
exponents: &[<G::Scalar as PrimeField>::Repr],
|
||||||
) -> <G as CurveAffine>::Projective
|
) -> <G as CurveAffine>::Projective {
|
||||||
{
|
|
||||||
if exponents.len() != bases.len() {
|
if exponents.len() != bases.len() {
|
||||||
panic!("invalid length")
|
panic!("invalid length")
|
||||||
}
|
}
|
||||||
@ -210,14 +151,13 @@ pub fn dense_multiexp<G: CurveAffine>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn dense_multiexp_inner<G: CurveAffine>(
|
fn dense_multiexp_inner<G: CurveAffine>(
|
||||||
bases: & [G],
|
bases: &[G],
|
||||||
exponents: & [<G::Scalar as PrimeField>::Repr],
|
exponents: &[<G::Scalar as PrimeField>::Repr],
|
||||||
mut skip: u32,
|
mut skip: u32,
|
||||||
c: u32,
|
c: u32,
|
||||||
handle_trivial: bool
|
handle_trivial: bool,
|
||||||
) -> <G as CurveAffine>::Projective
|
) -> <G as CurveAffine>::Projective {
|
||||||
{
|
use std::sync::Mutex;
|
||||||
use std::sync::{Mutex};
|
|
||||||
// Perform this region of the multiexp. We use a different strategy - go over region in parallel,
|
// Perform this region of the multiexp. We use a different strategy - go over region in parallel,
|
||||||
// then over another region, etc. No Arc required
|
// then over another region, etc. No Arc required
|
||||||
let chunk = (bases.len() / num_cpus::get()) + 1;
|
let chunk = (bases.len() / num_cpus::get()) + 1;
|
||||||
@ -228,7 +168,7 @@ fn dense_multiexp_inner<G: CurveAffine>(
|
|||||||
crossbeam::scope(|scope| {
|
crossbeam::scope(|scope| {
|
||||||
for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
|
for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
|
||||||
let this_region_rwlock = arc.clone();
|
let this_region_rwlock = arc.clone();
|
||||||
// let handle =
|
// let handle =
|
||||||
scope.spawn(move || {
|
scope.spawn(move || {
|
||||||
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
|
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
|
||||||
// Accumulate the result
|
// Accumulate the result
|
||||||
@ -268,42 +208,32 @@ fn dense_multiexp_inner<G: CurveAffine>(
|
|||||||
acc.add_assign(&running_sum);
|
acc.add_assign(&running_sum);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut guard = match this_region_rwlock.lock() {
|
let mut guard = this_region_rwlock.lock().expect("poisoned");
|
||||||
Ok(guard) => guard,
|
|
||||||
Err(_) => {
|
|
||||||
panic!("poisoned!");
|
|
||||||
// poisoned.into_inner()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
(*guard).add_assign(&acc);
|
(*guard).add_assign(&acc);
|
||||||
});
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let this_region = Arc::try_unwrap(arc).unwrap();
|
let this_region = Arc::try_unwrap(arc).unwrap();
|
||||||
let this_region = this_region.into_inner().unwrap();
|
|
||||||
|
|
||||||
this_region
|
this_region.into_inner().unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
skip += c;
|
skip += c;
|
||||||
|
|
||||||
if skip >= <G::Scalar as PrimeField>::NUM_BITS {
|
if skip >= <G::Scalar as PrimeField>::NUM_BITS {
|
||||||
// There isn't another region, and this will be the highest region
|
// There isn't another region, and this will be the highest region
|
||||||
return this;
|
this
|
||||||
} else {
|
} else {
|
||||||
// next region is actually higher than this one, so double it enough times
|
// next region is actually higher than this one, so double it enough times
|
||||||
let mut next_region = dense_multiexp_inner(
|
let mut next_region = dense_multiexp_inner(bases, exponents, skip, c, false);
|
||||||
bases, exponents, skip, c, false);
|
|
||||||
for _ in 0..c {
|
for _ in 0..c {
|
||||||
next_region.double();
|
next_region.double();
|
||||||
}
|
}
|
||||||
|
|
||||||
next_region.add_assign(&this);
|
next_region.add_assign(&this);
|
||||||
|
|
||||||
return next_region;
|
next_region
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user