Make setup abstract over the curve being used (#11)

* refactor(bin): make `calculate_hash` a utility function

* feat: make the engine part of the setup parameters

We add an extension trait to Engine to specify the group element sizes
per curve used. We implement that for each curve we want to support.

Added support for BLS12-381

This allows moving the type constraint from the constructors in BatchedAccumulator and KeyPair to the params, improving ergonomics, and prepares multi-curve support in the CLI

* feat(params): do not require harcoding the group sizes

* feat(bin): add CLI params to reduce_powers and prepare_phase2
This commit is contained in:
Georgios Konstantopoulos 2020-02-13 11:53:48 +02:00 committed by GitHub
parent 614b4b899d
commit 5d82e40bb7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 137 additions and 146 deletions

@ -2,7 +2,6 @@
/// and then contributes to entropy in parts as well
use bellman_ce::pairing::ff::{Field, PrimeField};
use bellman_ce::pairing::*;
use blake2::{Blake2b, Digest};
use log::{error, info};
use generic_array::GenericArray;
@ -46,24 +45,11 @@ pub struct BatchedAccumulator<'a, E: Engine> {
/// Hash chain hash
pub hash: GenericArray<u8, U64>,
/// The parameters used for the setup of this accumulator
pub parameters: &'a CeremonyParams,
pub parameters: &'a CeremonyParams<E>,
}
impl<'a, E: Engine> BatchedAccumulator<'a, E> {
/// Calculate the contribution hash from the resulting file. Original powers of tau implementation
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
/// implementation now writes without a particular order, so plain recalculation at the end
/// of the procedure is more efficient
pub fn calculate_hash(input_map: &Mmap) -> GenericArray<u8, U64> {
let chunk_size = 1 << 30; // read by 1GB from map
let mut hasher = Blake2b::default();
for chunk in input_map.chunks(chunk_size) {
hasher.input(&chunk);
}
hasher.result()
}
pub fn empty(parameters: &'a CeremonyParams) -> Self {
pub fn empty(parameters: &'a CeremonyParams<E>) -> Self {
Self {
tau_powers_g1: vec![],
tau_powers_g2: vec![],
@ -297,7 +283,7 @@ impl<'a, E: Engine> BatchedAccumulator<'a, E> {
output_is_compressed: UseCompression,
check_input_for_correctness: CheckForCorrectness,
check_output_for_correctness: CheckForCorrectness,
parameters: &'a CeremonyParams,
parameters: &'a CeremonyParams<E>,
) -> bool {
use itertools::MinMaxResult::MinMax;
assert_eq!(digest.len(), 64);
@ -557,7 +543,7 @@ impl<'a, E: Engine> BatchedAccumulator<'a, E> {
input_map: &Mmap,
output_map: &mut MmapMut,
check_input_for_correctness: CheckForCorrectness,
parameters: &'a CeremonyParams,
parameters: &'a CeremonyParams<E>,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;
@ -634,7 +620,7 @@ impl<'a, E: Engine> BatchedAccumulator<'a, E> {
input_map: &Mmap,
check_input_for_correctness: CheckForCorrectness,
compression: UseCompression,
parameters: &'a CeremonyParams,
parameters: &'a CeremonyParams<E>,
) -> io::Result<BatchedAccumulator<'a, E>> {
use itertools::MinMaxResult::MinMax;
@ -734,7 +720,7 @@ impl<'a, E: Engine> BatchedAccumulator<'a, E> {
&mut self,
output_map: &mut MmapMut,
compression: UseCompression,
parameters: &CeremonyParams,
parameters: &CeremonyParams<E>,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;
@ -1136,7 +1122,7 @@ impl<'a, E: Engine> BatchedAccumulator<'a, E> {
compress_the_output: UseCompression,
check_input_for_correctness: CheckForCorrectness,
key: &PrivateKey<E>,
parameters: &'a CeremonyParams,
parameters: &'a CeremonyParams<E>,
) -> io::Result<()> {
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
/// exponent.
@ -1308,7 +1294,7 @@ impl<'a, E: Engine> BatchedAccumulator<'a, E> {
pub fn generate_initial(
output_map: &mut MmapMut,
compress_the_output: UseCompression,
parameters: &'a CeremonyParams,
parameters: &'a CeremonyParams<E>,
) -> io::Result<()> {
use itertools::MinMaxResult::MinMax;

@ -1,7 +1,8 @@
use powersoftau::{
batched_accumulator::BatchedAccumulator,
keypair::keypair,
parameters::{CeremonyParams, CheckForCorrectness, CurveKind, UseCompression},
parameters::{CeremonyParams, CheckForCorrectness, UseCompression},
utils::calculate_hash,
};
use bellman_ce::pairing::bn256::Bn256;
@ -29,7 +30,7 @@ fn main() {
let circuit_power = args[3].parse().expect("could not parse circuit power");
let batch_size = args[4].parse().expect("could not parse batch size");
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
let parameters = CeremonyParams::<Bn256>::new(circuit_power, batch_size);
println!(
"Will contribute a random beacon to accumulator for 2^{} powers of tau",
@ -148,7 +149,7 @@ fn main() {
println!("Calculating previous contribution hash...");
let current_accumulator_hash = BatchedAccumulator::<Bn256>::calculate_hash(&readable_map);
let current_accumulator_hash = calculate_hash(&readable_map);
{
println!("Contributing on top of the hash:");
@ -179,7 +180,7 @@ fn main() {
println!("Computing and writing your contribution, this could take a while...");
// this computes a transformation and writes it
BatchedAccumulator::<Bn256>::transform(
BatchedAccumulator::transform(
&readable_map,
&mut writable_map,
INPUT_IS_COMPRESSED,
@ -200,7 +201,7 @@ fn main() {
let output_readonly = writable_map
.make_read_only()
.expect("must make a map readonly");
let contribution_hash = BatchedAccumulator::<Bn256>::calculate_hash(&output_readonly);
let contribution_hash = calculate_hash(&output_readonly);
print!(
"Done!\n\n\

@ -1,6 +1,9 @@
use powersoftau::batched_accumulator::BatchedAccumulator;
use powersoftau::keypair::keypair;
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
use powersoftau::{
batched_accumulator::BatchedAccumulator,
keypair::keypair,
parameters::{CeremonyParams, CheckForCorrectness, UseCompression},
utils::calculate_hash,
};
use bellman_ce::pairing::bn256::Bn256;
use memmap::*;
@ -8,8 +11,6 @@ use std::fs::OpenOptions;
use std::io::{Read, Write};
use powersoftau::parameters::{CeremonyParams, CurveKind};
const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No;
const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
@ -25,7 +26,7 @@ fn main() {
let circuit_power = args[3].parse().expect("could not parse circuit power");
let batch_size = args[4].parse().expect("could not parse batch size");
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
let parameters = CeremonyParams::<Bn256>::new(circuit_power, batch_size);
println!(
"Will contribute to accumulator for 2^{} powers of tau",
@ -136,7 +137,7 @@ fn main() {
UseCompression::No == INPUT_IS_COMPRESSED,
"Hashing the compressed file in not yet defined"
);
let current_accumulator_hash = BatchedAccumulator::<Bn256>::calculate_hash(&readable_map);
let current_accumulator_hash = calculate_hash(&readable_map);
{
println!("`challenge` file contains decompressed points and has a hash:");
@ -189,7 +190,7 @@ fn main() {
println!("Computing and writing your contribution, this could take a while...");
// this computes a transformation and writes it
BatchedAccumulator::<Bn256>::transform(
BatchedAccumulator::transform(
&readable_map,
&mut writable_map,
INPUT_IS_COMPRESSED,
@ -213,7 +214,7 @@ fn main() {
let output_readonly = writable_map
.make_read_only()
.expect("must make a map readonly");
let contribution_hash = BatchedAccumulator::<Bn256>::calculate_hash(&output_readonly);
let contribution_hash = calculate_hash(&output_readonly);
print!(
"Done!\n\n\

@ -1,13 +1,13 @@
use powersoftau::batched_accumulator::BatchedAccumulator;
use powersoftau::parameters::UseCompression;
use powersoftau::utils::blank_hash;
use powersoftau::utils::{blank_hash, calculate_hash};
use bellman_ce::pairing::bn256::Bn256;
use memmap::*;
use std::fs::OpenOptions;
use std::io::Write;
use powersoftau::parameters::{CeremonyParams, CurveKind};
use powersoftau::parameters::CeremonyParams;
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
@ -21,7 +21,7 @@ fn main() {
let circuit_power = args[2].parse().expect("could not parse circuit power");
let batch_size = args[3].parse().expect("could not parse batch size");
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
let parameters = CeremonyParams::<Bn256>::new(circuit_power, batch_size);
println!(
"Will generate an empty accumulator for 2^{} powers of tau",
@ -74,11 +74,7 @@ fn main() {
println!();
}
BatchedAccumulator::<Bn256>::generate_initial(
&mut writable_map,
COMPRESS_NEW_CHALLENGE,
&parameters,
)
BatchedAccumulator::generate_initial(&mut writable_map, COMPRESS_NEW_CHALLENGE, &parameters)
.expect("generation of initial accumulator is successful");
writable_map
.flush()
@ -88,7 +84,7 @@ fn main() {
let output_readonly = writable_map
.make_read_only()
.expect("must make a map readonly");
let contribution_hash = BatchedAccumulator::<Bn256>::calculate_hash(&output_readonly);
let contribution_hash = calculate_hash(&output_readonly);
println!("Empty contribution is formed with a hash:");

@ -2,7 +2,7 @@ use bellman_ce::pairing::bn256::Bn256;
use bellman_ce::pairing::bn256::{G1, G2};
use bellman_ce::pairing::{CurveAffine, CurveProjective};
use powersoftau::batched_accumulator::*;
use powersoftau::parameters::{CeremonyParams, CurveKind};
use powersoftau::parameters::CeremonyParams;
use powersoftau::*;
use crate::parameters::*;
@ -25,18 +25,16 @@ fn log_2(x: u64) -> u32 {
}
fn main() {
let parameters = CeremonyParams::new(
CurveKind::Bn256,
28, // turn this to 10 for the small test
21, // turn this to 8 for the small test
);
let args: Vec<String> = std::env::args().collect();
if args.len() != 2 {
println!("Usage: \n<response_filename>");
if args.len() != 4 {
println!("Usage: \n<response_filename> <circuit_power> <batch_size>");
std::process::exit(exitcode::USAGE);
}
let response_filename = &args[1];
let circuit_power = args[2].parse().expect("could not parse circuit power");
let batch_size = args[3].parse().expect("could not parse batch size");
let parameters = CeremonyParams::<Bn256>::new(circuit_power, batch_size);
// Try to load response file from disk.
let reader = OpenOptions::new()
@ -49,7 +47,7 @@ fn main() {
.expect("unable to create a memory map for input")
};
let current_accumulator = BatchedAccumulator::<Bn256>::deserialize(
let current_accumulator = BatchedAccumulator::deserialize(
&response_readable_map,
CheckForCorrectness::Yes,
UseCompression::Yes,

@ -1,8 +1,8 @@
use bellman_ce::pairing::bn256::Bn256;
use powersoftau::{
batched_accumulator::BatchedAccumulator,
parameters::{CeremonyParams, CheckForCorrectness, CurveKind, UseCompression},
utils::reduced_hash,
parameters::{CeremonyParams, CheckForCorrectness, UseCompression},
utils::{calculate_hash, reduced_hash},
};
use std::fs::OpenOptions;
@ -20,24 +20,31 @@ pub fn log_2(x: u64) -> u32 {
}
fn main() {
let parameters = CeremonyParams::new(
CurveKind::Bn256,
10, // here we use 10 since it's the reduced ceremony
21,
);
let args: Vec<String> = std::env::args().collect();
if args.len() != 6 {
println!("Usage: \n<challenge_filename> <reduced_challenge_filename> <original_circuit_power> <reduced_circuit_power> <batch_size>");
std::process::exit(exitcode::USAGE);
}
let challenge_filename = &args[1];
let reduced_challenge_filename = &args[2];
let original_circuit_power = args[3].parse().expect("could not parse original circuit power");
let reduced_circuit_power = args[4].parse().expect("could not parse reduced circuit power");
let batch_size = args[5].parse().expect("could not parse batch size");
// Try to load `./challenge` from disk.
let parameters = CeremonyParams::<Bn256>::new(reduced_circuit_power, batch_size);
// Try to load the challenge from disk.
let reader = OpenOptions::new()
.read(true)
.open("challenge")
.expect("unable open `./challenge` in this directory");
.open(challenge_filename)
.expect("unable to open challenge in this directory");
let challenge_readable_map = unsafe {
MmapOptions::new()
.map(&reader)
.expect("unable to create a memory map for input")
};
let current_accumulator = BatchedAccumulator::<Bn256>::deserialize(
let current_accumulator = BatchedAccumulator::deserialize(
&challenge_readable_map,
CheckForCorrectness::Yes,
UseCompression::No,
@ -45,7 +52,7 @@ fn main() {
)
.expect("unable to read compressed accumulator");
let mut reduced_accumulator = BatchedAccumulator::<Bn256>::empty(&parameters);
let mut reduced_accumulator = BatchedAccumulator::empty(&parameters);
reduced_accumulator.tau_powers_g1 =
current_accumulator.tau_powers_g1[..parameters.powers_g1_length].to_vec();
reduced_accumulator.tau_powers_g2 =
@ -60,8 +67,8 @@ fn main() {
.read(true)
.write(true)
.create_new(true)
.open("reduced_challenge")
.expect("unable to create `./reduced_challenge` in this directory");
.open(reduced_challenge_filename)
.expect("unable to create the reduced challenge in this directory");
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
writer
@ -75,7 +82,7 @@ fn main() {
};
let hash = reduced_hash(
28, // this is the full size of the hash
original_circuit_power,
parameters.size as u8,
);
(&mut writable_map[0..])
@ -83,7 +90,7 @@ fn main() {
.expect("unable to write a default hash to mmap");
writable_map
.flush()
.expect("unable to write reduced hash to `./reduced_challenge`");
.expect("unable to write reduced hash to the reduced_challenge");
println!("Reduced hash for a reduced challenge:");
for line in hash.as_slice().chunks(16) {
@ -105,7 +112,7 @@ fn main() {
let output_readonly = writable_map
.make_read_only()
.expect("must make a map readonly");
let contribution_hash = BatchedAccumulator::<Bn256>::calculate_hash(&output_readonly);
let contribution_hash = calculate_hash(&output_readonly);
println!("Reduced contribution is formed with a hash:");

@ -2,8 +2,8 @@ use bellman_ce::pairing::bn256::Bn256;
use bellman_ce::pairing::bn256::{G1, G2};
use bellman_ce::pairing::{CurveAffine, CurveProjective};
use powersoftau::batched_accumulator::*;
use powersoftau::parameters::CeremonyParams;
use powersoftau::*;
use powersoftau::parameters::{CeremonyParams, CurveKind};
use crate::keypair::*;
use crate::parameters::*;
@ -71,14 +71,14 @@ impl<W: Write> Write for HashWriter<W> {
// Computes the hash of the challenge file for the player,
// given the current state of the accumulator and the last
// response file hash.
fn get_challenge_file_hash(
acc: &mut BatchedAccumulator<Bn256>,
fn get_challenge_file_hash<E: Engine>(
acc: &mut BatchedAccumulator<E>,
last_response_file_hash: &[u8; 64],
is_initial: bool,
parameters: &CeremonyParams,
) -> [u8; 64] {
let sink = io::sink();
let mut sink = HashWriter::new(sink);
let parameters = acc.parameters;
let file_name = "tmp_challenge_file_hash";
@ -110,11 +110,7 @@ fn get_challenge_file_hash(
.expect("unable to write blank hash to challenge file");
if is_initial {
BatchedAccumulator::<Bn256>::generate_initial(
&mut writable_map,
UseCompression::No,
parameters,
)
BatchedAccumulator::generate_initial(&mut writable_map, UseCompression::No, parameters)
.expect("generation of initial accumulator is successful");
} else {
acc.serialize(&mut writable_map, UseCompression::No, parameters)
@ -140,17 +136,19 @@ fn get_challenge_file_hash(
tmp
}
use bellman_ce::pairing::Engine;
// Computes the hash of the response file, given the new
// accumulator, the player's public key, and the challenge
// file's hash.
fn get_response_file_hash(
acc: &mut BatchedAccumulator<Bn256>,
pubkey: &PublicKey<Bn256>,
fn get_response_file_hash<E: Engine>(
acc: &mut BatchedAccumulator<E>,
pubkey: &PublicKey<E>,
last_challenge_file_hash: &[u8; 64],
parameters: &CeremonyParams,
) -> [u8; 64] {
let sink = io::sink();
let mut sink = HashWriter::new(sink);
let parameters = acc.parameters;
let file_name = "tmp_response_file_hash";
if Path::new(file_name).exists() {
@ -205,7 +203,7 @@ fn get_response_file_hash(
tmp
}
fn new_accumulator_for_verify(parameters: &CeremonyParams) -> BatchedAccumulator<Bn256> {
fn new_accumulator_for_verify(parameters: &CeremonyParams<Bn256>) -> BatchedAccumulator<Bn256> {
let file_name = "tmp_initial_challenge";
{
if Path::new(file_name).exists() {
@ -228,11 +226,7 @@ fn new_accumulator_for_verify(parameters: &CeremonyParams) -> BatchedAccumulator
.map_mut(&file)
.expect("unable to create a memory map")
};
BatchedAccumulator::<Bn256>::generate_initial(
&mut writable_map,
UseCompression::No,
&parameters,
)
BatchedAccumulator::generate_initial(&mut writable_map, UseCompression::No, &parameters)
.expect("generation of initial accumulator is successful");
writable_map
.flush()
@ -269,7 +263,7 @@ fn main() {
let circuit_power = args[2].parse().expect("could not parse circuit power");
let batch_size = args[3].parse().expect("could not parse batch size");
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
let parameters = CeremonyParams::<Bn256>::new(circuit_power, batch_size);
// Try to load transcript file from disk.
let reader = OpenOptions::new()
@ -329,12 +323,8 @@ fn main() {
.make_read_only()
.expect("must make a map readonly");
let last_challenge_file_hash = get_challenge_file_hash(
&mut current_accumulator,
&last_response_file_hash,
i == 0,
&parameters,
);
let last_challenge_file_hash =
get_challenge_file_hash(&mut current_accumulator, &last_response_file_hash, i == 0);
// Deserialize the accumulator provided by the player in
// their response file. It's stored in the transcript in
@ -350,8 +340,7 @@ fn main() {
.expect("unable to read uncompressed accumulator");
let response_file_pubkey =
PublicKey::<Bn256>::read(&response_readable_map, UseCompression::Yes, &parameters)
.unwrap();
PublicKey::read(&response_readable_map, UseCompression::Yes, &parameters).unwrap();
// Compute the hash of the response file. (we had it in uncompressed
// form in the transcript, but the response file is compressed to save
// participants bandwidth.)
@ -359,7 +348,6 @@ fn main() {
&mut response_file_accumulator,
&response_file_pubkey,
&last_challenge_file_hash,
&parameters,
);
// Verify the transformation from the previous accumulator to the new

@ -1,6 +1,9 @@
use powersoftau::batched_accumulator::BatchedAccumulator;
use powersoftau::keypair::PublicKey;
use powersoftau::parameters::{CheckForCorrectness, UseCompression};
use powersoftau::{
batched_accumulator::BatchedAccumulator,
keypair::PublicKey,
parameters::{CeremonyParams, CheckForCorrectness, UseCompression},
utils::calculate_hash,
};
use bellman_ce::pairing::bn256::Bn256;
use memmap::*;
@ -8,8 +11,6 @@ use std::fs::OpenOptions;
use std::io::{Read, Write};
use powersoftau::parameters::{CeremonyParams, CurveKind};
const PREVIOUS_CHALLENGE_IS_COMPRESSED: UseCompression = UseCompression::No;
const CONTRIBUTION_IS_COMPRESSED: UseCompression = UseCompression::Yes;
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
@ -26,7 +27,7 @@ fn main() {
let circuit_power = args[4].parse().expect("could not parse circuit power");
let batch_size = args[5].parse().expect("could not parse batch size");
let parameters = CeremonyParams::new(CurveKind::Bn256, circuit_power, batch_size);
let parameters = CeremonyParams::<Bn256>::new(circuit_power, batch_size);
println!(
"Will verify and decompress a contribution to accumulator for 2^{} powers of tau",
@ -95,8 +96,7 @@ fn main() {
// Check that contribution is correct
let current_accumulator_hash =
BatchedAccumulator::<Bn256>::calculate_hash(&challenge_readable_map);
let current_accumulator_hash = calculate_hash(&challenge_readable_map);
println!("Hash of the `challenge` file for verification:");
for line in current_accumulator_hash.as_slice().chunks(16) {
@ -137,7 +137,7 @@ fn main() {
}
}
let response_hash = BatchedAccumulator::<Bn256>::calculate_hash(&response_readable_map);
let response_hash = calculate_hash(&response_readable_map);
println!("Hash of the response file for verification:");
for line in response_hash.as_slice().chunks(16) {
@ -152,7 +152,7 @@ fn main() {
}
// get the contributor's public key
let public_key = PublicKey::<Bn256>::read(
let public_key = PublicKey::read(
&response_readable_map,
CONTRIBUTION_IS_COMPRESSED,
&parameters,
@ -165,7 +165,7 @@ fn main() {
"Verifying a contribution to contain proper powers and correspond to the public key..."
);
let valid = BatchedAccumulator::<Bn256>::verify_transformation(
let valid = BatchedAccumulator::verify_transformation(
&challenge_readable_map,
&response_readable_map,
&public_key,
@ -220,7 +220,7 @@ fn main() {
.expect("unable to write hash to new challenge file");
}
BatchedAccumulator::<Bn256>::decompress(
BatchedAccumulator::decompress(
&response_readable_map,
&mut writable_map,
CheckForCorrectness::No,
@ -234,8 +234,7 @@ fn main() {
.make_read_only()
.expect("must make a map readonly");
let recompressed_hash =
BatchedAccumulator::<Bn256>::calculate_hash(&new_challenge_readable_map);
let recompressed_hash = calculate_hash(&new_challenge_readable_map);
println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:");

@ -171,7 +171,7 @@ impl<E: Engine> PublicKey<E> {
&self,
output_map: &mut MmapMut,
accumulator_was_compressed: UseCompression,
parameters: &CeremonyParams,
parameters: &CeremonyParams<E>,
) -> io::Result<()> {
let mut position = match accumulator_was_compressed {
UseCompression::Yes => parameters.contribution_size - parameters.public_key_size,
@ -218,7 +218,7 @@ impl<E: Engine> PublicKey<E> {
pub fn read(
input_map: &Mmap,
accumulator_was_compressed: UseCompression,
parameters: &CeremonyParams,
parameters: &CeremonyParams<E>,
) -> Result<Self, DeserializationError> {
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(
input_map: &Mmap,
@ -291,12 +291,12 @@ mod tests {
mod bn256 {
use super::*;
use crate::parameters::{CurveKind, CurveParams};
use crate::parameters::CurveParams;
use bellman_ce::pairing::bn256::Bn256;
#[test]
fn test_pubkey_serialization() {
let curve = CurveParams::new(CurveKind::Bn256);
let curve = CurveParams::<Bn256>::new();
let public_key_size = 6 * curve.g1 + 3 * curve.g2;
// Generate a random public key

@ -1,43 +1,44 @@
use bellman_ce::pairing::GroupDecodingError;
use bellman_ce::pairing::{CurveAffine, EncodedPoint, Engine, GroupDecodingError};
use std::fmt;
use std::io;
use std::marker::PhantomData;
/// The sizes of the group elements of a curev
#[derive(Clone, PartialEq, Eq)]
pub struct CurveParams {
#[derive(Clone, PartialEq, Eq, Default)]
pub struct CurveParams<E> {
/// Size of a G1 Element
pub g1: usize,
/// Size of a G2 Element
pub g2: usize,
/// Size of a compressed G1 Element
pub g1_compressed: usize,
/// Size of a compressed G2 Element
pub g2_compressed: usize,
engine_type: PhantomData<E>,
}
/// The types of curves we support
#[derive(Clone, PartialEq, Eq)]
pub enum CurveKind {
Bn256,
}
impl CurveParams {
/// Creates a new curve based on the provided CurveKind
pub fn new(kind: CurveKind) -> Self {
let (g1, g2) = match kind {
CurveKind::Bn256 => (64, 128),
};
impl<E: Engine> CurveParams<E> {
pub fn new() -> CurveParams<E> {
let g1 = <<E as Engine>::G1Affine as CurveAffine>::Uncompressed::size();
let g2 = <<E as Engine>::G2Affine as CurveAffine>::Uncompressed::size();
let g1_compressed = <<E as Engine>::G1Affine as CurveAffine>::Compressed::size();
let g2_compressed = <<E as Engine>::G2Affine as CurveAffine>::Compressed::size();
CurveParams {
g1,
g2,
g1_compressed: g1 / 2,
g2_compressed: g2 / 2,
g1_compressed,
g2_compressed,
engine_type: PhantomData,
}
}
}
#[derive(Clone, PartialEq, Eq)]
/// The parameters used for the trusted setup ceremony
pub struct CeremonyParams {
pub struct CeremonyParams<E> {
/// The type of the curve being used (currently only supports BN256)
pub curve: CurveParams,
pub curve: CurveParams<E>,
/// The number of Powers of Tau G1 elements which will be accumulated
pub powers_g1_length: usize,
/// The number of Powers of Tau Alpha/Beta/G2 elements which will be accumulated
@ -58,18 +59,18 @@ pub struct CeremonyParams {
pub hash_size: usize,
}
impl CeremonyParams {
impl<E: Engine> CeremonyParams<E> {
/// Constructs a new ceremony parameters object from the type of provided curve
pub fn new(kind: CurveKind, size: usize, batch_size: usize) -> Self {
pub fn new(size: usize, batch_size: usize) -> Self {
// create the curve
let curve = CurveParams::new(kind);
let curve = CurveParams::<E>::new();
Self::new_with_curve(curve, size, batch_size)
}
/// Constructs a new ceremony parameters object from the directly provided curve with parameters
/// Consider using the `new` method if you want to use one of the pre-implemented curves
pub fn new_with_curve(curve: CurveParams, size: usize, batch_size: usize) -> Self {
// asume we're using a 64 byte long hash function such as Blake
pub fn new_with_curve(curve: CurveParams<E>, size: usize, batch_size: usize) -> Self {
// assume we're using a 64 byte long hash function such as Blake
let hash_size = 64;
// 2^{size}

@ -6,12 +6,26 @@ use generic_array::GenericArray;
use rand::chacha::ChaChaRng;
use rand::{Rand, Rng, SeedableRng};
use memmap::Mmap;
use std::io::{self, Write};
use std::sync::Arc;
use typenum::consts::U64;
use super::parameters::UseCompression;
/// Calculate the contribution hash from the resulting file. Original powers of tau implementation
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
/// implementation now writes without a particular order, so plain recalculation at the end
/// of the procedure is more efficient
pub fn calculate_hash(input_map: &Mmap) -> GenericArray<u8, U64> {
let chunk_size = 1 << 30; // read by 1GB from map
let mut hasher = Blake2b::default();
for chunk in input_map.chunks(chunk_size) {
hasher.input(&chunk);
}
hasher.result()
}
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
/// than 32 bytes.
pub fn hash_to_g2<E: Engine>(mut digest: &[u8]) -> E::G2 {