,
- /// Keep parameters here as a marker
- marker: std::marker::PhantomData,
+ /// The parameters used for the setup of this accumulator
+ pub parameters: &'a CeremonyParams,
}
-impl BatchedAccumulator {
+impl<'a, E: Engine> BatchedAccumulator<'a, E> {
/// Calculate the contribution hash from the resulting file. Original powers of tau implementation
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
/// implementation now writes without a particular order, so plain recalculation at the end
@@ -61,10 +62,8 @@ impl BatchedAccumulator {
}
hasher.result()
}
-}
-impl BatchedAccumulator {
- pub fn empty() -> Self {
+ pub fn empty(parameters: &'a CeremonyParams) -> Self {
Self {
tau_powers_g1: vec![],
tau_powers_g2: vec![],
@@ -72,32 +71,30 @@ impl BatchedAccumulator {
beta_tau_powers_g1: vec![],
beta_g2: E::G2Affine::zero(),
hash: blank_hash(),
- marker: std::marker::PhantomData:: {},
+ parameters,
}
}
-}
-impl BatchedAccumulator {
- fn g1_size(compression: UseCompression) -> usize {
+ fn g1_size(&self, compression: UseCompression) -> usize {
match compression {
- UseCompression::Yes => P::G1_COMPRESSED_BYTE_SIZE,
- UseCompression::No => P::G1_UNCOMPRESSED_BYTE_SIZE,
+ UseCompression::Yes => self.parameters.curve.g1_compressed,
+ UseCompression::No => self.parameters.curve.g1,
}
}
- fn g2_size(compression: UseCompression) -> usize {
+ fn g2_size(&self, compression: UseCompression) -> usize {
match compression {
- UseCompression::Yes => P::G2_COMPRESSED_BYTE_SIZE,
- UseCompression::No => P::G2_UNCOMPRESSED_BYTE_SIZE,
+ UseCompression::Yes => self.parameters.curve.g2_compressed,
+ UseCompression::No => self.parameters.curve.g2,
}
}
- fn get_size(element_type: ElementType, compression: UseCompression) -> usize {
+ fn get_size(&self, element_type: ElementType, compression: UseCompression) -> usize {
match element_type {
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => {
- Self::g1_size(compression)
+ self.g1_size(compression)
}
- ElementType::BetaG2 | ElementType::TauG2 => Self::g2_size(compression),
+ ElementType::BetaG2 | ElementType::TauG2 => self.g2_size(compression),
}
}
@@ -111,24 +108,25 @@ impl BatchedAccumulator {
/// Public key appended to the end of file, but it's irrelevant for an accumulator itself
fn calculate_mmap_position(
+ &self,
index: usize,
element_type: ElementType,
compression: UseCompression,
) -> usize {
- let g1_size = Self::g1_size(compression);
- let g2_size = Self::g2_size(compression);
- let required_tau_g1_power = P::TAU_POWERS_G1_LENGTH;
- let required_power = P::TAU_POWERS_LENGTH;
+ let g1_size = self.g1_size(compression);
+ let g2_size = self.g2_size(compression);
+ let required_tau_g1_power = self.parameters.powers_g1_length;
+ let required_power = self.parameters.powers_length;
+ let parameters = &self.parameters;
let position = match element_type {
ElementType::TauG1 => {
let mut position = 0;
position += g1_size * index;
assert!(
- index < P::TAU_POWERS_G1_LENGTH,
+ index < parameters.powers_g1_length,
format!(
"Index of TauG1 element written must not exceed {}, while it's {}",
- P::TAU_POWERS_G1_LENGTH,
- index
+ parameters.powers_g1_length, index
)
);
@@ -138,11 +136,10 @@ impl BatchedAccumulator {
let mut position = 0;
position += g1_size * required_tau_g1_power;
assert!(
- index < P::TAU_POWERS_LENGTH,
+ index < required_power,
format!(
"Index of TauG2 element written must not exceed {}, while it's {}",
- P::TAU_POWERS_LENGTH,
- index
+ required_power, index
)
);
position += g2_size * index;
@@ -154,11 +151,10 @@ impl BatchedAccumulator {
position += g1_size * required_tau_g1_power;
position += g2_size * required_power;
assert!(
- index < P::TAU_POWERS_LENGTH,
+ index < required_power,
format!(
"Index of AlphaG1 element written must not exceed {}, while it's {}",
- P::TAU_POWERS_LENGTH,
- index
+ required_power, index
)
);
position += g1_size * index;
@@ -171,11 +167,10 @@ impl BatchedAccumulator {
position += g2_size * required_power;
position += g1_size * required_power;
assert!(
- index < P::TAU_POWERS_LENGTH,
+ index < required_power,
format!(
"Index of BetaG1 element written must not exceed {}, while it's {}",
- P::TAU_POWERS_LENGTH,
- index
+ required_power, index
)
);
position += g1_size * index;
@@ -193,14 +188,14 @@ impl BatchedAccumulator {
}
};
- position + P::HASH_SIZE
+ position + self.parameters.hash_size
}
}
-/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
-pub fn verify_transform(
- before: &BatchedAccumulator,
- after: &BatchedAccumulator,
+/// Verifies a transformation of the `BatchedAccumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
+pub fn verify_transform(
+ before: &BatchedAccumulator,
+ after: &BatchedAccumulator,
key: &PublicKey,
digest: &[u8],
) -> bool {
@@ -290,9 +285,9 @@ pub fn verify_transform(
true
}
-impl BatchedAccumulator {
+impl<'a, E: Engine> BatchedAccumulator<'a, E> {
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
- #[allow(clippy::too_many_arguments)]
+ #[allow(clippy::too_many_arguments, clippy::cognitive_complexity)]
pub fn verify_transformation(
input_map: &Mmap,
output_map: &Mmap,
@@ -302,6 +297,7 @@ impl BatchedAccumulator {
output_is_compressed: UseCompression,
check_input_for_correctness: CheckForCorrectness,
check_output_for_correctness: CheckForCorrectness,
+ parameters: &'a CeremonyParams,
) -> bool {
use itertools::MinMaxResult::MinMax;
assert_eq!(digest.len(), 64);
@@ -314,22 +310,22 @@ impl BatchedAccumulator {
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
- println!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)");
+ error!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)");
return false;
}
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
- println!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)");
+ error!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)");
return false;
}
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
- println!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)");
+ error!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)");
return false;
}
// Load accumulators AND perform computations
- let mut before = Self::empty();
- let mut after = Self::empty();
+ let mut before = Self::empty(parameters);
+ let mut after = Self::empty(parameters);
// these checks only touch a part of the accumulator, so read two elements
@@ -356,11 +352,11 @@ impl BatchedAccumulator {
// Check the correctness of the generators for tau powers
if after.tau_powers_g1[0] != E::G1Affine::one() {
- println!("tau_powers_g1[0] != 1");
+ error!("tau_powers_g1[0] != 1");
return false;
}
if after.tau_powers_g2[0] != E::G2Affine::one() {
- println!("tau_powers_g2[0] != 1");
+ error!("tau_powers_g2[0] != 1");
return false;
}
@@ -369,7 +365,7 @@ impl BatchedAccumulator {
(before.tau_powers_g1[1], after.tau_powers_g1[1]),
(tau_g2_s, key.tau_g2),
) {
- println!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)");
+ error!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)");
return false;
}
@@ -378,7 +374,7 @@ impl BatchedAccumulator {
(before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]),
(alpha_g2_s, key.alpha_g2),
) {
- println!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)");
+ error!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)");
return false;
}
@@ -387,14 +383,14 @@ impl BatchedAccumulator {
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
(beta_g2_s, key.beta_g2),
) {
- println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)");
+ error!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)");
return false;
}
if !same_ratio(
(before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]),
(before.beta_g2, after.beta_g2),
) {
- println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)");
+ error!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)");
return false;
}
}
@@ -408,16 +404,11 @@ impl BatchedAccumulator {
// one does not need to care about some overlapping
let mut tau_powers_last_first_chunks = vec![E::G1Affine::zero(); 2];
- for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
+ let tau_powers_length = parameters.powers_length;
+ for chunk in &(0..tau_powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
// extra 1 to ensure intersection between chunks and ensure we don't overflow
- let size = end - start
- + 1
- + if end == P::TAU_POWERS_LENGTH - 1 {
- 0
- } else {
- 1
- };
+ let size = end - start + 1 + if end == tau_powers_length - 1 { 0 } else { 1 };
before
.read_chunk(
start,
@@ -452,47 +443,46 @@ impl BatchedAccumulator {
power_pairs(&after.tau_powers_g1),
(tau_powers_g2_0, tau_powers_g2_1),
) {
- println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
+ error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
if !same_ratio(
power_pairs(&after.tau_powers_g2),
(tau_powers_g1_0, tau_powers_g1_1),
) {
- println!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)");
+ error!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)");
return false;
}
if !same_ratio(
power_pairs(&after.alpha_tau_powers_g1),
(tau_powers_g2_0, tau_powers_g2_1),
) {
- println!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
+ error!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
if !same_ratio(
power_pairs(&after.beta_tau_powers_g1),
(tau_powers_g2_0, tau_powers_g2_1),
) {
- println!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
+ error!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
- if end == P::TAU_POWERS_LENGTH - 1 {
+ if end == tau_powers_length - 1 {
tau_powers_last_first_chunks[0] = after.tau_powers_g1[size - 1];
}
- println!("Done processing {} powers of tau", end);
+ info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
- for chunk in
- &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
+ for chunk in &(tau_powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
// extra 1 to ensure intersection between chunks and ensure we don't overflow
let size = end - start
+ 1
- + if end == P::TAU_POWERS_G1_LENGTH - 1 {
+ + if end == parameters.powers_g1_length - 1 {
0
} else {
1
@@ -542,13 +532,13 @@ impl BatchedAccumulator {
power_pairs(&after.tau_powers_g1),
(tau_powers_g2_0, tau_powers_g2_1),
) {
- println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution");
+ error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution");
return false;
}
- if start == P::TAU_POWERS_LENGTH {
+ if start == parameters.powers_length {
tau_powers_last_first_chunks[1] = after.tau_powers_g1[0];
}
- println!("Done processing {} powers of tau", end);
+ info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
@@ -558,7 +548,7 @@ impl BatchedAccumulator {
power_pairs(&tau_powers_last_first_chunks),
(tau_powers_g2_0, tau_powers_g2_1),
) {
- println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in TauG1 contribution intersection");
+ error!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in TauG1 contribution intersection");
}
true
}
@@ -567,12 +557,13 @@ impl BatchedAccumulator {
input_map: &Mmap,
output_map: &mut MmapMut,
check_input_for_correctness: CheckForCorrectness,
+ parameters: &'a CeremonyParams,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;
- let mut accumulator = Self::empty();
+ let mut accumulator = Self::empty(parameters);
- for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
+ for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
@@ -596,7 +587,7 @@ impl BatchedAccumulator {
}
for chunk in
- &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
+ &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
@@ -643,10 +634,11 @@ impl BatchedAccumulator {
input_map: &Mmap,
check_input_for_correctness: CheckForCorrectness,
compression: UseCompression,
- ) -> io::Result> {
+ parameters: &'a CeremonyParams,
+ ) -> io::Result> {
use itertools::MinMaxResult::MinMax;
- let mut accumulator = Self::empty();
+ let mut accumulator = Self::empty(parameters);
let mut tau_powers_g1 = vec![];
let mut tau_powers_g2 = vec![];
@@ -654,7 +646,7 @@ impl BatchedAccumulator {
let mut beta_tau_powers_g1 = vec![];
let mut beta_g2 = vec![];
- for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
+ for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
@@ -684,7 +676,7 @@ impl BatchedAccumulator {
}
for chunk in
- &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
+ &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
@@ -734,7 +726,7 @@ impl BatchedAccumulator {
beta_tau_powers_g1,
beta_g2: beta_g2[0],
hash: blank_hash(),
- marker: std::marker::PhantomData:: {},
+ parameters,
})
}
@@ -742,19 +734,20 @@ impl BatchedAccumulator {
&mut self,
output_map: &mut MmapMut,
compression: UseCompression,
+ parameters: &CeremonyParams,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;
- for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
+ for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
- let mut tmp_acc = BatchedAccumulator:: {
+ let mut tmp_acc = BatchedAccumulator:: {
tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(),
tau_powers_g2: (&self.tau_powers_g2[start..=end]).to_vec(),
alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..=end]).to_vec(),
beta_tau_powers_g1: (&self.beta_tau_powers_g1[start..=end]).to_vec(),
beta_g2: self.beta_g2,
hash: self.hash,
- marker: std::marker::PhantomData:: {},
+ parameters,
};
tmp_acc.write_chunk(start, compression, output_map)?;
} else {
@@ -763,17 +756,17 @@ impl BatchedAccumulator {
}
for chunk in
- &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
+ &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
- let mut tmp_acc = BatchedAccumulator:: {
+ let mut tmp_acc = BatchedAccumulator:: {
tau_powers_g1: (&self.tau_powers_g1[start..=end]).to_vec(),
tau_powers_g2: vec![],
alpha_tau_powers_g1: vec![],
beta_tau_powers_g1: vec![],
beta_g2: self.beta_g2,
hash: self.hash,
- marker: std::marker::PhantomData:: {},
+ parameters,
};
tmp_acc.write_chunk(start, compression, output_map)?;
} else {
@@ -783,9 +776,7 @@ impl BatchedAccumulator {
Ok(())
}
-}
-impl BatchedAccumulator {
pub fn read_chunk(
&mut self,
from: usize,
@@ -924,7 +915,7 @@ impl BatchedAccumulator {
let index = from + i;
match element_type {
ElementType::TauG1 => {
- if index >= P::TAU_POWERS_G1_LENGTH {
+ if index >= self.parameters.powers_g1_length {
return Ok(vec![]);
}
}
@@ -932,13 +923,13 @@ impl BatchedAccumulator {
| ElementType::BetaG1
| ElementType::BetaG2
| ElementType::TauG2 => {
- if index >= P::TAU_POWERS_LENGTH {
+ if index >= self.parameters.powers_length {
return Ok(vec![]);
}
}
};
- let position = Self::calculate_mmap_position(index, element_type, compression);
- let element_size = Self::get_size(element_type, compression);
+ let position = self.calculate_mmap_position(index, element_type, compression);
+ let element_size = self.get_size(element_type, compression);
let mut memory_slice = input_map
.get(position..position + element_size)
.expect("must read point data from file");
@@ -1021,9 +1012,7 @@ impl BatchedAccumulator {
None => Ok(res_affine),
}
}
-}
-impl BatchedAccumulator {
fn write_all(
&mut self,
chunk_start: usize,
@@ -1086,7 +1075,7 @@ impl BatchedAccumulator {
{
match element_type {
ElementType::TauG1 => {
- if index >= P::TAU_POWERS_G1_LENGTH {
+ if index >= self.parameters.powers_g1_length {
return Ok(());
}
}
@@ -1094,7 +1083,7 @@ impl BatchedAccumulator {
| ElementType::BetaG1
| ElementType::BetaG2
| ElementType::TauG2 => {
- if index >= P::TAU_POWERS_LENGTH {
+ if index >= self.parameters.powers_length {
return Ok(());
}
}
@@ -1102,12 +1091,12 @@ impl BatchedAccumulator {
match compression {
UseCompression::Yes => {
- let position = Self::calculate_mmap_position(index, element_type, compression);
+ let position = self.calculate_mmap_position(index, element_type, compression);
// let size = self.get_size(element_type, compression);
(&mut output_map[position..]).write_all(p.into_compressed().as_ref())?;
}
UseCompression::No => {
- let position = Self::calculate_mmap_position(index, element_type, compression);
+ let position = self.calculate_mmap_position(index, element_type, compression);
// let size = self.get_size(element_type, compression);
(&mut output_map[position..]).write_all(p.into_uncompressed().as_ref())?;
}
@@ -1124,7 +1113,7 @@ impl BatchedAccumulator {
output_map: &mut MmapMut,
) -> io::Result<()> {
self.write_all(chunk_start, compression, ElementType::TauG1, output_map)?;
- if chunk_start < P::TAU_POWERS_LENGTH {
+ if chunk_start < self.parameters.powers_length {
self.write_all(chunk_start, compression, ElementType::TauG2, output_map)?;
self.write_all(chunk_start, compression, ElementType::AlphaG1, output_map)?;
self.write_all(chunk_start, compression, ElementType::BetaG1, output_map)?;
@@ -1133,9 +1122,7 @@ impl BatchedAccumulator {
Ok(())
}
-}
-impl BatchedAccumulator {
/// Transforms the accumulator with a private key.
/// Due to large amount of data in a previous accumulator even in the compressed form
/// this function can now work on compressed input. Output can be made in any form
@@ -1149,6 +1136,7 @@ impl BatchedAccumulator {
compress_the_output: UseCompression,
check_input_for_correctness: CheckForCorrectness,
key: &PrivateKey,
+ parameters: &'a CeremonyParams,
) -> io::Result<()> {
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
/// exponent.
@@ -1205,11 +1193,11 @@ impl BatchedAccumulator {
}
}
- let mut accumulator = Self::empty();
+ let mut accumulator = Self::empty(parameters);
use itertools::MinMaxResult::MinMax;
- for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
+ for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator
@@ -1258,14 +1246,14 @@ impl BatchedAccumulator {
"your contribution happened to produce a point at infinity, please re-run"
);
accumulator.write_chunk(start, compress_the_output, output_map)?;
- println!("Done processing {} powers of tau", end);
+ info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in
- &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
+ &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
@@ -1307,7 +1295,7 @@ impl BatchedAccumulator {
//assert!(!accumulator.beta_g2.is_zero(), "your contribution happened to produce a point at infinity, please re-run");
accumulator.write_chunk(start, compress_the_output, output_map)?;
- println!("Done processing {} powers of tau", end);
+ info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
@@ -1315,17 +1303,17 @@ impl BatchedAccumulator {
Ok(())
}
-}
-impl BatchedAccumulator {
/// Transforms the accumulator with a private key.
pub fn generate_initial(
output_map: &mut MmapMut,
compress_the_output: UseCompression,
+ parameters: &'a CeremonyParams,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;
- for chunk in &(0..P::TAU_POWERS_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE) {
+ // Write the first Tau powers in chunks where every initial element is a G1 or G2 `one`
+ for chunk in &(0..parameters.powers_length).chunks(parameters.batch_size) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
let mut accumulator = Self {
@@ -1335,18 +1323,19 @@ impl BatchedAccumulator {
beta_tau_powers_g1: vec![E::G1Affine::one(); size],
beta_g2: E::G2Affine::one(),
hash: blank_hash(),
- marker: std::marker::PhantomData:: {},
+ parameters,
};
accumulator.write_chunk(start, compress_the_output, output_map)?;
- println!("Done processing {} powers of tau", end);
+ info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
+ // Write the next `G1 length` elements
for chunk in
- &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).chunks(P::EMPIRICAL_BATCH_SIZE)
+ &(parameters.powers_length..parameters.powers_g1_length).chunks(parameters.batch_size)
{
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
@@ -1357,11 +1346,11 @@ impl BatchedAccumulator {
beta_tau_powers_g1: vec![],
beta_g2: E::G2Affine::one(),
hash: blank_hash(),
- marker: std::marker::PhantomData:: {},
+ parameters,
};
accumulator.write_chunk(start, compress_the_output, output_map)?;
- println!("Done processing {} powers of tau", end);
+ info!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
diff --git a/powersoftau/src/bin/beacon_constrained.rs b/powersoftau/src/bin/beacon_constrained.rs
index b858c95..3fede95 100644
--- a/powersoftau/src/bin/beacon_constrained.rs
+++ b/powersoftau/src/bin/beacon_constrained.rs
@@ -1,17 +1,15 @@
-use powersoftau::bn256::Bn256CeremonyParameters;
-
-use powersoftau::batched_accumulator::BatchedAccumulator;
-use powersoftau::keypair::keypair;
-use powersoftau::parameters::{CheckForCorrectness, UseCompression};
+use powersoftau::{
+ batched_accumulator::BatchedAccumulator,
+ keypair::keypair,
+ parameters::{CeremonyParams, CheckForCorrectness, CurveKind, UseCompression},
+};
use bellman_ce::pairing::bn256::Bn256;
-use memmap::*;
+use memmap::MmapOptions;
use std::fs::OpenOptions;
use std::io::Write;
-use powersoftau::parameters::PowersOfTauParameters;
-
#[macro_use]
extern crate hex_literal;
@@ -22,20 +20,24 @@ const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
#[allow(clippy::modulo_one)]
fn main() {
let args: Vec = std::env::args().collect();
- if args.len() != 3 {
- println!("Usage: \n ");
+ if args.len() != 5 {
+ println!("Usage: \n ");
std::process::exit(exitcode::USAGE);
}
let challenge_filename = &args[1];
let response_filename = &args[2];
+ let circuit_power = args[3].parse().expect("could not parse circuit power");
+ let batch_size = args[4].parse().expect("could not parse batch size");
+
+ let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
println!(
"Will contribute a random beacon to accumulator for 2^{} powers of tau",
- Bn256CeremonyParameters::REQUIRED_POWER
+ parameters.size,
);
println!(
"In total will generate up to {} powers",
- Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
+ parameters.powers_g1_length,
);
// Create an RNG based on the outcome of the random beacon
@@ -102,8 +104,8 @@ fn main() {
.metadata()
.expect("unable to get filesystem metadata for challenge file");
let expected_challenge_length = match INPUT_IS_COMPRESSED {
- UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
- UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
+ UseCompression::Yes => parameters.contribution_size,
+ UseCompression::No => parameters.accumulator_size,
};
if metadata.len() != (expected_challenge_length as u64) {
@@ -130,11 +132,8 @@ fn main() {
.expect("unable to create response file in this directory");
let required_output_length = match COMPRESS_THE_OUTPUT {
- UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
- UseCompression::No => {
- Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
- + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
- }
+ UseCompression::Yes => parameters.contribution_size,
+ UseCompression::No => parameters.accumulator_size + parameters.public_key_size,
};
writer
@@ -149,8 +148,7 @@ fn main() {
println!("Calculating previous contribution hash...");
- let current_accumulator_hash =
- BatchedAccumulator::::calculate_hash(&readable_map);
+ let current_accumulator_hash = BatchedAccumulator::::calculate_hash(&readable_map);
{
println!("Contributing on top of the hash:");
@@ -181,28 +179,28 @@ fn main() {
println!("Computing and writing your contribution, this could take a while...");
// this computes a transformation and writes it
- BatchedAccumulator::::transform(
+ BatchedAccumulator::::transform(
&readable_map,
&mut writable_map,
INPUT_IS_COMPRESSED,
COMPRESS_THE_OUTPUT,
CHECK_INPUT_CORRECTNESS,
&privkey,
+ ¶meters,
)
.expect("must transform with the key");
println!("Finishing writing your contribution to response file...");
// Write the public key
pubkey
- .write::(&mut writable_map, COMPRESS_THE_OUTPUT)
+ .write(&mut writable_map, COMPRESS_THE_OUTPUT, ¶meters)
.expect("unable to write public key");
// Get the hash of the contribution, so the user can compare later
let output_readonly = writable_map
.make_read_only()
.expect("must make a map readonly");
- let contribution_hash =
- BatchedAccumulator::::calculate_hash(&output_readonly);
+ let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly);
print!(
"Done!\n\n\
diff --git a/powersoftau/src/bin/compute_constrained.rs b/powersoftau/src/bin/compute_constrained.rs
index 255f9f7..7016703 100644
--- a/powersoftau/src/bin/compute_constrained.rs
+++ b/powersoftau/src/bin/compute_constrained.rs
@@ -1,5 +1,4 @@
use powersoftau::batched_accumulator::BatchedAccumulator;
-use powersoftau::bn256::Bn256CeremonyParameters;
use powersoftau::keypair::keypair;
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
@@ -9,7 +8,7 @@ use std::fs::OpenOptions;
use std::io::{Read, Write};
-use powersoftau::parameters::PowersOfTauParameters;
+use powersoftau::parameters::{CeremonyParams, CurveKind};
const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No;
const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
@@ -17,20 +16,24 @@ const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
fn main() {
let args: Vec = std::env::args().collect();
- if args.len() != 3 {
- println!("Usage: \n ");
+ if args.len() != 5 {
+ println!("Usage: \n ");
std::process::exit(exitcode::USAGE);
}
let challenge_filename = &args[1];
let response_filename = &args[2];
+ let circuit_power = args[3].parse().expect("could not parse circuit power");
+ let batch_size = args[4].parse().expect("could not parse batch size");
+
+ let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
println!(
"Will contribute to accumulator for 2^{} powers of tau",
- Bn256CeremonyParameters::REQUIRED_POWER
+ parameters.size
);
println!(
"In total will generate up to {} powers",
- Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
+ parameters.powers_g1_length
);
// Create an RNG based on a mixture of system randomness and user provided randomness
@@ -85,8 +88,8 @@ fn main() {
.metadata()
.expect("unable to get filesystem metadata for challenge file");
let expected_challenge_length = match INPUT_IS_COMPRESSED {
- UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
- UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
+ UseCompression::Yes => parameters.contribution_size,
+ UseCompression::No => parameters.accumulator_size,
};
if metadata.len() != (expected_challenge_length as u64) {
@@ -113,11 +116,8 @@ fn main() {
.expect("unable to create response file");
let required_output_length = match COMPRESS_THE_OUTPUT {
- UseCompression::Yes => Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
- UseCompression::No => {
- Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
- + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
- }
+ UseCompression::Yes => parameters.contribution_size,
+ UseCompression::No => parameters.accumulator_size + parameters.public_key_size,
};
writer
@@ -136,8 +136,7 @@ fn main() {
UseCompression::No == INPUT_IS_COMPRESSED,
"Hashing the compressed file in not yet defined"
);
- let current_accumulator_hash =
- BatchedAccumulator::::calculate_hash(&readable_map);
+ let current_accumulator_hash = BatchedAccumulator::::calculate_hash(&readable_map);
{
println!("`challenge` file contains decompressed points and has a hash:");
@@ -190,13 +189,14 @@ fn main() {
println!("Computing and writing your contribution, this could take a while...");
// this computes a transformation and writes it
- BatchedAccumulator::::transform(
+ BatchedAccumulator::::transform(
&readable_map,
&mut writable_map,
INPUT_IS_COMPRESSED,
COMPRESS_THE_OUTPUT,
CHECK_INPUT_CORRECTNESS,
&privkey,
+ ¶meters,
)
.expect("must transform with the key");
@@ -204,7 +204,7 @@ fn main() {
// Write the public key
pubkey
- .write::(&mut writable_map, COMPRESS_THE_OUTPUT)
+ .write(&mut writable_map, COMPRESS_THE_OUTPUT, ¶meters)
.expect("unable to write public key");
writable_map.flush().expect("must flush a memory map");
@@ -213,8 +213,7 @@ fn main() {
let output_readonly = writable_map
.make_read_only()
.expect("must make a map readonly");
- let contribution_hash =
- BatchedAccumulator::::calculate_hash(&output_readonly);
+ let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly);
print!(
"Done!\n\n\
diff --git a/powersoftau/src/bin/new.rs b/powersoftau/src/bin/new.rs
deleted file mode 100644
index e1f6d91..0000000
--- a/powersoftau/src/bin/new.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-use powersoftau::accumulator::Accumulator;
-use powersoftau::bn256::Bn256CeremonyParameters;
-use powersoftau::parameters::UseCompression;
-use powersoftau::utils::blank_hash;
-
-use bellman_ce::pairing::bn256::Bn256;
-use std::fs::OpenOptions;
-use std::io::{BufWriter, Write};
-
-fn main() {
- let args: Vec = std::env::args().collect();
- if args.len() != 2 {
- println!("Usage: \n");
- std::process::exit(exitcode::USAGE);
- }
- let challenge_filename = &args[1];
-
- let file = OpenOptions::new()
- .read(false)
- .write(true)
- .create_new(true)
- .open(challenge_filename)
- .expect("unable to create challenge file");
-
- let mut writer = BufWriter::new(file);
-
- // Write a blank BLAKE2b hash:
- writer
- .write_all(&blank_hash().as_slice())
- .expect("unable to write blank hash to challenge file");
-
- let parameters = Bn256CeremonyParameters {};
-
- let acc: Accumulator = Accumulator::new(parameters);
- println!("Writing an empty accumulator to disk");
- acc.serialize(&mut writer, UseCompression::No)
- .expect("unable to write fresh accumulator to challenge file");
- writer.flush().expect("unable to flush accumulator to disk");
-
- println!("Wrote a fresh accumulator to challenge file");
-}
diff --git a/powersoftau/src/bin/new_constrained.rs b/powersoftau/src/bin/new_constrained.rs
index 181ce7b..c8ac223 100644
--- a/powersoftau/src/bin/new_constrained.rs
+++ b/powersoftau/src/bin/new_constrained.rs
@@ -1,5 +1,3 @@
-use powersoftau::bn256::Bn256CeremonyParameters;
-
use powersoftau::batched_accumulator::BatchedAccumulator;
use powersoftau::parameters::UseCompression;
use powersoftau::utils::blank_hash;
@@ -9,25 +7,29 @@ use memmap::*;
use std::fs::OpenOptions;
use std::io::Write;
-use powersoftau::parameters::PowersOfTauParameters;
+use powersoftau::parameters::{CeremonyParams, CurveKind};
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
fn main() {
let args: Vec = std::env::args().collect();
- if args.len() != 2 {
- println!("Usage: \n");
+ if args.len() != 4 {
+ println!("Usage: \n ");
std::process::exit(exitcode::USAGE);
}
let challenge_filename = &args[1];
+ let circuit_power = args[2].parse().expect("could not parse circuit power");
+ let batch_size = args[3].parse().expect("could not parse batch size");
+
+ let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
println!(
"Will generate an empty accumulator for 2^{} powers of tau",
- Bn256CeremonyParameters::REQUIRED_POWER
+ parameters.size
);
println!(
"In total will generate up to {} powers",
- Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH
+ parameters.powers_g1_length
);
let file = OpenOptions::new()
@@ -38,11 +40,8 @@ fn main() {
.expect("unable to create challenge file");
let expected_challenge_length = match COMPRESS_NEW_CHALLENGE {
- UseCompression::Yes => {
- Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
- - Bn256CeremonyParameters::PUBLIC_KEY_SIZE
- }
- UseCompression::No => Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE,
+ UseCompression::Yes => parameters.contribution_size - parameters.public_key_size,
+ UseCompression::No => parameters.accumulator_size,
};
file.set_len(expected_challenge_length as u64)
@@ -75,9 +74,10 @@ fn main() {
println!();
}
- BatchedAccumulator::::generate_initial(
+ BatchedAccumulator::::generate_initial(
&mut writable_map,
COMPRESS_NEW_CHALLENGE,
+ ¶meters,
)
.expect("generation of initial accumulator is successful");
writable_map
@@ -88,8 +88,7 @@ fn main() {
let output_readonly = writable_map
.make_read_only()
.expect("must make a map readonly");
- let contribution_hash =
- BatchedAccumulator::::calculate_hash(&output_readonly);
+ let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly);
println!("Empty contribution is formed with a hash:");
diff --git a/powersoftau/src/bin/prepare_phase2.rs b/powersoftau/src/bin/prepare_phase2.rs
index 2acd491..fdc1f83 100644
--- a/powersoftau/src/bin/prepare_phase2.rs
+++ b/powersoftau/src/bin/prepare_phase2.rs
@@ -2,7 +2,7 @@ use bellman_ce::pairing::bn256::Bn256;
use bellman_ce::pairing::bn256::{G1, G2};
use bellman_ce::pairing::{CurveAffine, CurveProjective};
use powersoftau::batched_accumulator::*;
-use powersoftau::bn256::Bn256CeremonyParameters;
+use powersoftau::parameters::{CeremonyParams, CurveKind};
use powersoftau::*;
use crate::parameters::*;
@@ -25,6 +25,12 @@ fn log_2(x: u64) -> u32 {
}
fn main() {
+ let parameters = CeremonyParams::new(
+ CurveKind::Bn256,
+ 28, // turn this to 10 for the small test
+ 21, // turn this to 8 for the small test
+ );
+
let args: Vec = std::env::args().collect();
if args.len() != 2 {
println!("Usage: \n");
@@ -43,10 +49,11 @@ fn main() {
.expect("unable to create a memory map for input")
};
- let current_accumulator = BatchedAccumulator::::deserialize(
+ let current_accumulator = BatchedAccumulator::::deserialize(
&response_readable_map,
CheckForCorrectness::Yes,
UseCompression::Yes,
+ ¶meters,
)
.expect("unable to read uncompressed accumulator");
@@ -182,7 +189,7 @@ fn main() {
// Lagrange coefficients in G1 (for constructing
// LC/IC queries and precomputing polynomials for A)
- for coeff in g1_coeffs {
+ for coeff in g1_coeffs.clone() {
// Was normalized earlier in parallel
let coeff = coeff.into_affine();
diff --git a/powersoftau/src/bin/reduce_powers.rs b/powersoftau/src/bin/reduce_powers.rs
index 86b3551..602d33b 100644
--- a/powersoftau/src/bin/reduce_powers.rs
+++ b/powersoftau/src/bin/reduce_powers.rs
@@ -1,8 +1,7 @@
use bellman_ce::pairing::bn256::Bn256;
use powersoftau::{
batched_accumulator::BatchedAccumulator,
- bn256::Bn256CeremonyParameters,
- parameters::{CheckForCorrectness, PowersOfTauParameters, UseCompression},
+ parameters::{CeremonyParams, CheckForCorrectness, CurveKind, UseCompression},
utils::reduced_hash,
};
@@ -11,19 +10,6 @@ use std::io::Write;
use memmap::MmapOptions;
-#[derive(Clone)]
-pub struct Bn256ReducedCeremonyParameters {}
-
-impl PowersOfTauParameters for Bn256ReducedCeremonyParameters {
- const REQUIRED_POWER: usize = 10;
-
- // This ceremony is based on the BN256 elliptic curve construction.
- const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
- const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128;
- const G1_COMPRESSED_BYTE_SIZE: usize = 32;
- const G2_COMPRESSED_BYTE_SIZE: usize = 64;
-}
-
const fn num_bits() -> usize {
std::mem::size_of::() * 8
}
@@ -34,6 +20,12 @@ pub fn log_2(x: u64) -> u32 {
}
fn main() {
+ let parameters = CeremonyParams::new(
+ CurveKind::Bn256,
+ 10, // here we use 10 since it's the reduced ceremony
+ 21,
+ );
+
// Try to load `./challenge` from disk.
let reader = OpenOptions::new()
.read(true)
@@ -45,27 +37,23 @@ fn main() {
.expect("unable to create a memory map for input")
};
- let current_accumulator = BatchedAccumulator::::deserialize(
+ let current_accumulator = BatchedAccumulator::::deserialize(
&challenge_readable_map,
CheckForCorrectness::Yes,
UseCompression::No,
+ ¶meters,
)
.expect("unable to read compressed accumulator");
- let mut reduced_accumulator =
- BatchedAccumulator::::empty();
- reduced_accumulator.tau_powers_g1 = current_accumulator.tau_powers_g1
- [..Bn256ReducedCeremonyParameters::TAU_POWERS_G1_LENGTH]
- .to_vec();
- reduced_accumulator.tau_powers_g2 = current_accumulator.tau_powers_g2
- [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
- .to_vec();
- reduced_accumulator.alpha_tau_powers_g1 = current_accumulator.alpha_tau_powers_g1
- [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
- .to_vec();
- reduced_accumulator.beta_tau_powers_g1 = current_accumulator.beta_tau_powers_g1
- [..Bn256ReducedCeremonyParameters::TAU_POWERS_LENGTH]
- .to_vec();
+ let mut reduced_accumulator = BatchedAccumulator::::empty(¶meters);
+ reduced_accumulator.tau_powers_g1 =
+ current_accumulator.tau_powers_g1[..parameters.powers_g1_length].to_vec();
+ reduced_accumulator.tau_powers_g2 =
+ current_accumulator.tau_powers_g2[..parameters.powers_length].to_vec();
+ reduced_accumulator.alpha_tau_powers_g1 =
+ current_accumulator.alpha_tau_powers_g1[..parameters.powers_length].to_vec();
+ reduced_accumulator.beta_tau_powers_g1 =
+ current_accumulator.beta_tau_powers_g1[..parameters.powers_length].to_vec();
reduced_accumulator.beta_g2 = current_accumulator.beta_g2;
let writer = OpenOptions::new()
@@ -77,7 +65,7 @@ fn main() {
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
writer
- .set_len(Bn256ReducedCeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64)
+ .set_len(parameters.accumulator_size as u64)
.expect("must make output file large enough");
let mut writable_map = unsafe {
@@ -87,8 +75,8 @@ fn main() {
};
let hash = reduced_hash(
- Bn256CeremonyParameters::REQUIRED_POWER as u8,
- Bn256ReducedCeremonyParameters::REQUIRED_POWER as u8,
+ 28, // this is the full size of the hash
+ parameters.size as u8,
);
(&mut writable_map[0..])
.write_all(hash.as_slice())
@@ -110,17 +98,14 @@ fn main() {
}
reduced_accumulator
- .serialize(&mut writable_map, UseCompression::No)
+ .serialize(&mut writable_map, UseCompression::No, ¶meters)
.unwrap();
// Get the hash of the contribution, so the user can compare later
let output_readonly = writable_map
.make_read_only()
.expect("must make a map readonly");
- let contribution_hash =
- BatchedAccumulator::::calculate_hash(
- &output_readonly,
- );
+ let contribution_hash = BatchedAccumulator::::calculate_hash(&output_readonly);
println!("Reduced contribution is formed with a hash:");
diff --git a/powersoftau/src/bin/verify.rs b/powersoftau/src/bin/verify.rs
index 45f5126..a890a70 100644
--- a/powersoftau/src/bin/verify.rs
+++ b/powersoftau/src/bin/verify.rs
@@ -1,10 +1,9 @@
use bellman_ce::pairing::bn256::Bn256;
use bellman_ce::pairing::bn256::{G1, G2};
use bellman_ce::pairing::{CurveAffine, CurveProjective};
-use powersoftau::accumulator::HashWriter;
use powersoftau::batched_accumulator::*;
-use powersoftau::bn256::Bn256CeremonyParameters;
use powersoftau::*;
+use powersoftau::parameters::{CeremonyParams, CurveKind};
use crate::keypair::*;
use crate::parameters::*;
@@ -17,6 +16,10 @@ use std::fs::{remove_file, OpenOptions};
use std::io::{self, BufWriter, Read, Write};
use std::path::Path;
+use blake2::{Blake2b, Digest};
+use generic_array::GenericArray;
+use typenum::U64;
+
use memmap::*;
const fn num_bits() -> usize {
@@ -28,13 +31,51 @@ fn log_2(x: u64) -> u32 {
num_bits::() as u32 - x.leading_zeros() - 1
}
+/// Abstraction over a writer which hashes the data being written.
+pub struct HashWriter {
+ writer: W,
+ hasher: Blake2b,
+}
+
+impl HashWriter {
+ /// Construct a new `HashWriter` given an existing `writer` by value.
+ pub fn new(writer: W) -> Self {
+ HashWriter {
+ writer,
+ hasher: Blake2b::default(),
+ }
+ }
+
+ /// Destroy this writer and return the hash of what was written.
+ pub fn into_hash(self) -> GenericArray {
+ self.hasher.result()
+ }
+}
+
+impl Write for HashWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result {
+ let bytes = self.writer.write(buf)?;
+
+ if bytes > 0 {
+ self.hasher.input(&buf[0..bytes]);
+ }
+
+ Ok(bytes)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.writer.flush()
+ }
+}
+
// Computes the hash of the challenge file for the player,
// given the current state of the accumulator and the last
// response file hash.
fn get_challenge_file_hash(
- acc: &mut BatchedAccumulator,
+ acc: &mut BatchedAccumulator,
last_response_file_hash: &[u8; 64],
is_initial: bool,
+ parameters: &CeremonyParams,
) -> [u8; 64] {
let sink = io::sink();
let mut sink = HashWriter::new(sink);
@@ -53,7 +94,7 @@ fn get_challenge_file_hash(
.expect("unable to create temporary tmp_challenge_file_hash");
writer
- .set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64)
+ .set_len(parameters.accumulator_size as u64)
.expect("must make output file large enough");
let mut writable_map = unsafe {
MmapOptions::new()
@@ -69,13 +110,14 @@ fn get_challenge_file_hash(
.expect("unable to write blank hash to challenge file");
if is_initial {
- BatchedAccumulator::::generate_initial(
+ BatchedAccumulator::::generate_initial(
&mut writable_map,
UseCompression::No,
+ parameters,
)
.expect("generation of initial accumulator is successful");
} else {
- acc.serialize(&mut writable_map, UseCompression::No)
+ acc.serialize(&mut writable_map, UseCompression::No, parameters)
.unwrap();
}
@@ -102,9 +144,10 @@ fn get_challenge_file_hash(
// accumulator, the player's public key, and the challenge
// file's hash.
fn get_response_file_hash(
- acc: &mut BatchedAccumulator,
+ acc: &mut BatchedAccumulator,
pubkey: &PublicKey,
last_challenge_file_hash: &[u8; 64],
+ parameters: &CeremonyParams,
) -> [u8; 64] {
let sink = io::sink();
let mut sink = HashWriter::new(sink);
@@ -122,7 +165,7 @@ fn get_response_file_hash(
.expect("unable to create temporary tmp_response_file_hash");
writer
- .set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64)
+ .set_len(parameters.contribution_size as u64)
.expect("must make output file large enough");
let mut writable_map = unsafe {
MmapOptions::new()
@@ -137,11 +180,11 @@ fn get_response_file_hash(
.flush()
.expect("unable to write blank hash to challenge file");
- acc.serialize(&mut writable_map, UseCompression::Yes)
+ acc.serialize(&mut writable_map, UseCompression::Yes, parameters)
.unwrap();
pubkey
- .write::(&mut writable_map, UseCompression::Yes)
+ .write(&mut writable_map, UseCompression::Yes, parameters)
.expect("unable to write public key");
writable_map.flush().expect("must flush the memory map");
}
@@ -162,7 +205,7 @@ fn get_response_file_hash(
tmp
}
-fn new_accumulator_for_verify() -> BatchedAccumulator {
+fn new_accumulator_for_verify(parameters: &CeremonyParams) -> BatchedAccumulator {
let file_name = "tmp_initial_challenge";
{
if Path::new(file_name).exists() {
@@ -176,7 +219,7 @@ fn new_accumulator_for_verify() -> BatchedAccumulator BatchedAccumulator::generate_initial(
+ BatchedAccumulator::::generate_initial(
&mut writable_map,
UseCompression::No,
+ ¶meters,
)
.expect("generation of initial accumulator is successful");
writable_map
@@ -206,17 +250,26 @@ fn new_accumulator_for_verify() -> BatchedAccumulator = std::env::args().collect();
- if args.len() != 2 {
- println!("Usage: \n");
+ if args.len() != 4 {
+ println!("Usage: \n ");
std::process::exit(exitcode::USAGE);
}
let transcript_filename = &args[1];
+ let circuit_power = args[2].parse().expect("could not parse circuit power");
+ let batch_size = args[3].parse().expect("could not parse batch size");
+
+ let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
// Try to load transcript file from disk.
let reader = OpenOptions::new()
@@ -231,7 +284,7 @@ fn main() {
};
// Initialize the accumulator
- let mut current_accumulator = new_accumulator_for_verify();
+ let mut current_accumulator = new_accumulator_for_verify(¶meters);
// The "last response file hash" is just a blank BLAKE2b hash
// at the beginning of the hash chain.
@@ -249,10 +302,7 @@ fn main() {
}
let memory_slice = transcript_readable_map
- .get(
- i * Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
- ..(i + 1) * Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE,
- )
+ .get(i * parameters.contribution_size..(i + 1) * parameters.contribution_size)
.expect("must read point data from file");
let writer = OpenOptions::new()
.read(true)
@@ -262,7 +312,7 @@ fn main() {
.expect("unable to create temporary tmp_response");
writer
- .set_len(Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE as u64)
+ .set_len(parameters.contribution_size as u64)
.expect("must make output file large enough");
let mut writable_map = unsafe {
MmapOptions::new()
@@ -279,8 +329,12 @@ fn main() {
.make_read_only()
.expect("must make a map readonly");
- let last_challenge_file_hash =
- get_challenge_file_hash(&mut current_accumulator, &last_response_file_hash, i == 0);
+ let last_challenge_file_hash = get_challenge_file_hash(
+ &mut current_accumulator,
+ &last_response_file_hash,
+ i == 0,
+ ¶meters,
+ );
// Deserialize the accumulator provided by the player in
// their response file. It's stored in the transcript in
@@ -291,14 +345,13 @@ fn main() {
&response_readable_map,
CheckForCorrectness::Yes,
UseCompression::Yes,
+ ¶meters,
)
.expect("unable to read uncompressed accumulator");
- let response_file_pubkey = PublicKey::::read::(
- &response_readable_map,
- UseCompression::Yes,
- )
- .unwrap();
+ let response_file_pubkey =
+ PublicKey::::read(&response_readable_map, UseCompression::Yes, ¶meters)
+ .unwrap();
// Compute the hash of the response file. (we had it in uncompressed
// form in the transcript, but the response file is compressed to save
// participants bandwidth.)
@@ -306,6 +359,7 @@ fn main() {
&mut response_file_accumulator,
&response_file_pubkey,
&last_challenge_file_hash,
+ ¶meters,
);
// Verify the transformation from the previous accumulator to the new
diff --git a/powersoftau/src/bin/verify_transform_constrained.rs b/powersoftau/src/bin/verify_transform_constrained.rs
index c05c334..a1b5879 100644
--- a/powersoftau/src/bin/verify_transform_constrained.rs
+++ b/powersoftau/src/bin/verify_transform_constrained.rs
@@ -1,5 +1,4 @@
use powersoftau::batched_accumulator::BatchedAccumulator;
-use powersoftau::bn256::Bn256CeremonyParameters;
use powersoftau::keypair::PublicKey;
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
@@ -9,7 +8,7 @@ use std::fs::OpenOptions;
use std::io::{Read, Write};
-use powersoftau::parameters::PowersOfTauParameters;
+use powersoftau::parameters::{CeremonyParams, CurveKind};
const PREVIOUS_CHALLENGE_IS_COMPRESSED: UseCompression = UseCompression::No;
const CONTRIBUTION_IS_COMPRESSED: UseCompression = UseCompression::Yes;
@@ -17,17 +16,21 @@ const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
fn main() {
let args: Vec