Merge pull request #4 from tornadocash/master
Split lib.rs into modules, and few minor improvements
This commit is contained in:
commit
0183ad1e3b
16
.dockerignore
Normal file
16
.dockerignore
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
Dockerfile
|
||||||
|
.git
|
||||||
|
.dockerignore
|
||||||
|
**/node_modules
|
||||||
|
**/target
|
||||||
|
**/phase1radix2m*
|
||||||
|
/phase2/pkg
|
||||||
|
/phase2/*.params
|
||||||
|
/phase2/*.json
|
||||||
|
/phase2/*.bin
|
||||||
|
/phase2/*.circom
|
||||||
|
/phase2/verifier.sol
|
||||||
|
/powersoftau/challenge*
|
||||||
|
/powersoftau/response*
|
||||||
|
/powersoftau/transcript
|
||||||
|
/powersoftau/tmp_*
|
16
Dockerfile
Normal file
16
Dockerfile
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
FROM rust:slim as builder
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y pkg-config libssl-dev && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
RUN mkdir bin
|
||||||
|
RUN cd powersoftau && \
|
||||||
|
cargo build --release --bins && \
|
||||||
|
find ./target/release/ -maxdepth 1 -type f -perm /a+x -exec sh -c 'mv {} /build/bin/phase1_$(basename {})' \;
|
||||||
|
RUN cd phase2 && \
|
||||||
|
cargo build --release --bins && \
|
||||||
|
find ./target/release/ -maxdepth 1 -type f -perm /a+x -exec sh -c 'mv {} /build/bin/phase2_$(basename {})' \;
|
||||||
|
|
||||||
|
FROM debian:buster-slim
|
||||||
|
COPY --from=builder /build/bin/* /usr/bin/
|
@ -0,0 +1 @@
|
|||||||
|
# Trusted setup ceremony [![Docker Cloud Build Status](https://img.shields.io/docker/cloud/build/tornadocash/phase2-bn254.svg)](https://hub.docker.com/r/tornadocash/phase2-bn254/builds)
|
5
phase2/.gitignore
vendored
5
phase2/.gitignore
vendored
@ -3,3 +3,8 @@ phase1*
|
|||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
Cargo.lock
|
Cargo.lock
|
||||||
node_modules
|
node_modules
|
||||||
|
phase1radix2m*
|
||||||
|
/*.json
|
||||||
|
/*.bin
|
||||||
|
/*.params
|
||||||
|
/verifier.sol
|
@ -8,20 +8,35 @@ homepage = "https://github.com/ebfull/phase2"
|
|||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
repository = "https://github.com/ebfull/phase2"
|
repository = "https://github.com/ebfull/phase2"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
crate-type = ["cdylib", "lib"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rand = "0.4"
|
rand = "0.4"
|
||||||
bellman_ce = { path = "../bellman" }
|
|
||||||
byteorder = "1"
|
byteorder = "1"
|
||||||
exitcode = "1.1.2"
|
exitcode = "1.1.2"
|
||||||
num_cpus = "1"
|
|
||||||
crossbeam = "0.3"
|
|
||||||
blake2-rfc = "0.2"
|
blake2-rfc = "0.2"
|
||||||
blake2 = "0.6.1"
|
blake2 = "0.6.1"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
memmap = "0.7"
|
|
||||||
num-bigint = "0.2.3"
|
num-bigint = "0.2.3"
|
||||||
num-traits = "0.2.8"
|
num-traits = "0.2.8"
|
||||||
itertools = "0.8.1"
|
itertools = "0.8.1"
|
||||||
rust-crypto = "0.2"
|
|
||||||
hex = "0.4.0"
|
hex = "0.4.0"
|
||||||
|
cfg-if = "0.1.10"
|
||||||
|
bellman_ce = { path = "../bellman", default-features = false } # active features depend on build type
|
||||||
|
|
||||||
|
# needed for native only but don't break wasm if present
|
||||||
|
num_cpus = "1"
|
||||||
|
crossbeam = "0.3"
|
||||||
|
rust-crypto = { version = "0.2", optional = true }
|
||||||
|
|
||||||
|
# needed for wasm only
|
||||||
|
wasm-bindgen = { version = "0.2.58", optional = true }
|
||||||
|
js-sys = { version = "0.3.35", optional = true }
|
||||||
|
web-sys = { version = "0.3.35", features = ["console"], optional = true }
|
||||||
|
console_error_panic_hook = { version = "0.1.6", optional = true }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = ["bellman_ce/multicore", "rust-crypto"]
|
||||||
|
wasm = ["wasm-bindgen", "js-sys", "web-sys", "console_error_panic_hook", "bellman_ce/wasm"]
|
||||||
|
@ -2,6 +2,28 @@
|
|||||||
|
|
||||||
This library is still under development.
|
This library is still under development.
|
||||||
|
|
||||||
|
## WebAssembly how-to
|
||||||
|
|
||||||
|
Build wasm package using `wasm-pack build --release -- --no-default-features --features wasm`
|
||||||
|
|
||||||
|
this will generate `./pkg` directory with wasm file and js bindings. After that you
|
||||||
|
can use this package in your browser application like so:
|
||||||
|
|
||||||
|
```js
|
||||||
|
async function main() {
|
||||||
|
const phase2 = await import("./pkg/phase2.js")
|
||||||
|
let data = await fetch('params')
|
||||||
|
data = await data.arrayBuffer()
|
||||||
|
data = new Uint8Array(data)
|
||||||
|
console.log('Source params', data)
|
||||||
|
const result = phase2.contribute(data)
|
||||||
|
console.log('Updated params', result)
|
||||||
|
// upload updated params
|
||||||
|
}
|
||||||
|
|
||||||
|
main().catch(console.error)
|
||||||
|
```
|
||||||
|
|
||||||
## [Documentation](https://docs.rs/phase2/)
|
## [Documentation](https://docs.rs/phase2/)
|
||||||
|
|
||||||
## Security Warnings
|
## Security Warnings
|
||||||
|
15
phase2/circuit.circom
Normal file
15
phase2/circuit.circom
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
template Num2Bits(n) {
|
||||||
|
signal input in;
|
||||||
|
signal output out[n];
|
||||||
|
var lc1=0;
|
||||||
|
|
||||||
|
for (var i = 0; i<n; i++) {
|
||||||
|
out[i] <-- (in >> i) & 1;
|
||||||
|
out[i] * (out[i] -1 ) === 0;
|
||||||
|
lc1 += out[i] * 2**i;
|
||||||
|
}
|
||||||
|
|
||||||
|
lc1 === in;
|
||||||
|
}
|
||||||
|
|
||||||
|
component main = Num2Bits(253);
|
@ -1,271 +0,0 @@
|
|||||||
extern crate bellman;
|
|
||||||
extern crate pairing;
|
|
||||||
extern crate rand;
|
|
||||||
extern crate phase2;
|
|
||||||
|
|
||||||
// For randomness (during paramgen and proof generation)
|
|
||||||
use rand::{thread_rng, Rng};
|
|
||||||
|
|
||||||
// For benchmarking
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
|
|
||||||
// Bring in some tools for using pairing-friendly curves
|
|
||||||
use pairing::{
|
|
||||||
Engine,
|
|
||||||
Field,
|
|
||||||
};
|
|
||||||
|
|
||||||
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
|
|
||||||
use pairing::bls12_381::{
|
|
||||||
Bls12
|
|
||||||
};
|
|
||||||
|
|
||||||
// We'll use these interfaces to construct our circuit.
|
|
||||||
use bellman::{
|
|
||||||
Circuit,
|
|
||||||
ConstraintSystem,
|
|
||||||
SynthesisError
|
|
||||||
};
|
|
||||||
|
|
||||||
// We're going to use the Groth16 proving system.
|
|
||||||
use bellman::groth16::{
|
|
||||||
Proof,
|
|
||||||
prepare_verifying_key,
|
|
||||||
create_random_proof,
|
|
||||||
verify_proof,
|
|
||||||
};
|
|
||||||
|
|
||||||
const MIMC_ROUNDS: usize = 322;
|
|
||||||
|
|
||||||
/// This is an implementation of MiMC, specifically a
|
|
||||||
/// variant named `LongsightF322p3` for BLS12-381.
|
|
||||||
/// See http://eprint.iacr.org/2016/492 for more
|
|
||||||
/// information about this construction.
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) {
|
|
||||||
/// for i from 0 up to 321 {
|
|
||||||
/// xL, xR := xR + (xL + Ci)^3, xL
|
|
||||||
/// }
|
|
||||||
/// return xL
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
fn mimc<E: Engine>(
|
|
||||||
mut xl: E::Fr,
|
|
||||||
mut xr: E::Fr,
|
|
||||||
constants: &[E::Fr]
|
|
||||||
) -> E::Fr
|
|
||||||
{
|
|
||||||
assert_eq!(constants.len(), MIMC_ROUNDS);
|
|
||||||
|
|
||||||
for i in 0..MIMC_ROUNDS {
|
|
||||||
let mut tmp1 = xl;
|
|
||||||
tmp1.add_assign(&constants[i]);
|
|
||||||
let mut tmp2 = tmp1;
|
|
||||||
tmp2.square();
|
|
||||||
tmp2.mul_assign(&tmp1);
|
|
||||||
tmp2.add_assign(&xr);
|
|
||||||
xr = xl;
|
|
||||||
xl = tmp2;
|
|
||||||
}
|
|
||||||
|
|
||||||
xl
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is our demo circuit for proving knowledge of the
|
|
||||||
/// preimage of a MiMC hash invocation.
|
|
||||||
struct MiMCDemo<'a, E: Engine> {
|
|
||||||
xl: Option<E::Fr>,
|
|
||||||
xr: Option<E::Fr>,
|
|
||||||
constants: &'a [E::Fr]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Our demo circuit implements this `Circuit` trait which
|
|
||||||
/// is used during paramgen and proving in order to
|
|
||||||
/// synthesize the constraint system.
|
|
||||||
impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
|
|
||||||
fn synthesize<CS: ConstraintSystem<E>>(
|
|
||||||
self,
|
|
||||||
cs: &mut CS
|
|
||||||
) -> Result<(), SynthesisError>
|
|
||||||
{
|
|
||||||
assert_eq!(self.constants.len(), MIMC_ROUNDS);
|
|
||||||
|
|
||||||
// Allocate the first component of the preimage.
|
|
||||||
let mut xl_value = self.xl;
|
|
||||||
let mut xl = cs.alloc(|| "preimage xl", || {
|
|
||||||
xl_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// Allocate the second component of the preimage.
|
|
||||||
let mut xr_value = self.xr;
|
|
||||||
let mut xr = cs.alloc(|| "preimage xr", || {
|
|
||||||
xr_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
for i in 0..MIMC_ROUNDS {
|
|
||||||
// xL, xR := xR + (xL + Ci)^3, xL
|
|
||||||
let cs = &mut cs.namespace(|| format!("round {}", i));
|
|
||||||
|
|
||||||
// tmp = (xL + Ci)^2
|
|
||||||
let mut tmp_value = xl_value.map(|mut e| {
|
|
||||||
e.add_assign(&self.constants[i]);
|
|
||||||
e.square();
|
|
||||||
e
|
|
||||||
});
|
|
||||||
let mut tmp = cs.alloc(|| "tmp", || {
|
|
||||||
tmp_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
cs.enforce(
|
|
||||||
|| "tmp = (xL + Ci)^2",
|
|
||||||
|lc| lc + xl + (self.constants[i], CS::one()),
|
|
||||||
|lc| lc + xl + (self.constants[i], CS::one()),
|
|
||||||
|lc| lc + tmp
|
|
||||||
);
|
|
||||||
|
|
||||||
// new_xL = xR + (xL + Ci)^3
|
|
||||||
// new_xL = xR + tmp * (xL + Ci)
|
|
||||||
// new_xL - xR = tmp * (xL + Ci)
|
|
||||||
let mut new_xl_value = xl_value.map(|mut e| {
|
|
||||||
e.add_assign(&self.constants[i]);
|
|
||||||
e.mul_assign(&tmp_value.unwrap());
|
|
||||||
e.add_assign(&xr_value.unwrap());
|
|
||||||
e
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut new_xl = if i == (MIMC_ROUNDS-1) {
|
|
||||||
// This is the last round, xL is our image and so
|
|
||||||
// we allocate a public input.
|
|
||||||
cs.alloc_input(|| "image", || {
|
|
||||||
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?
|
|
||||||
} else {
|
|
||||||
cs.alloc(|| "new_xl", || {
|
|
||||||
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?
|
|
||||||
};
|
|
||||||
|
|
||||||
cs.enforce(
|
|
||||||
|| "new_xL = xR + (xL + Ci)^3",
|
|
||||||
|lc| lc + tmp,
|
|
||||||
|lc| lc + xl + (self.constants[i], CS::one()),
|
|
||||||
|lc| lc + new_xl - xr
|
|
||||||
);
|
|
||||||
|
|
||||||
// xR = xL
|
|
||||||
xr = xl;
|
|
||||||
xr_value = xl_value;
|
|
||||||
|
|
||||||
// xL = new_xL
|
|
||||||
xl = new_xl;
|
|
||||||
xl_value = new_xl_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
// This may not be cryptographically safe, use
|
|
||||||
// `OsRng` (for example) in production software.
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
|
|
||||||
// Generate the MiMC round constants
|
|
||||||
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
println!("Creating parameters...");
|
|
||||||
|
|
||||||
// Create parameters for our circuit
|
|
||||||
let mut params = {
|
|
||||||
let c = MiMCDemo::<Bls12> {
|
|
||||||
xl: None,
|
|
||||||
xr: None,
|
|
||||||
constants: &constants
|
|
||||||
};
|
|
||||||
|
|
||||||
phase2::MPCParameters::new(c).unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
let old_params = params.clone();
|
|
||||||
params.contribute(rng);
|
|
||||||
|
|
||||||
let first_contrib = phase2::verify_contribution(&old_params, ¶ms).expect("should verify");
|
|
||||||
|
|
||||||
let old_params = params.clone();
|
|
||||||
params.contribute(rng);
|
|
||||||
|
|
||||||
let second_contrib = phase2::verify_contribution(&old_params, ¶ms).expect("should verify");
|
|
||||||
|
|
||||||
let verification_result = params.verify(MiMCDemo::<Bls12> {
|
|
||||||
xl: None,
|
|
||||||
xr: None,
|
|
||||||
constants: &constants
|
|
||||||
}).unwrap();
|
|
||||||
|
|
||||||
assert!(phase2::contains_contribution(&verification_result, &first_contrib));
|
|
||||||
assert!(phase2::contains_contribution(&verification_result, &second_contrib));
|
|
||||||
|
|
||||||
let params = params.get_params();
|
|
||||||
|
|
||||||
// Prepare the verification key (for proof verification)
|
|
||||||
let pvk = prepare_verifying_key(¶ms.vk);
|
|
||||||
|
|
||||||
println!("Creating proofs...");
|
|
||||||
|
|
||||||
// Let's benchmark stuff!
|
|
||||||
const SAMPLES: u32 = 50;
|
|
||||||
let mut total_proving = Duration::new(0, 0);
|
|
||||||
let mut total_verifying = Duration::new(0, 0);
|
|
||||||
|
|
||||||
// Just a place to put the proof data, so we can
|
|
||||||
// benchmark deserialization.
|
|
||||||
let mut proof_vec = vec![];
|
|
||||||
|
|
||||||
for _ in 0..SAMPLES {
|
|
||||||
// Generate a random preimage and compute the image
|
|
||||||
let xl = rng.gen();
|
|
||||||
let xr = rng.gen();
|
|
||||||
let image = mimc::<Bls12>(xl, xr, &constants);
|
|
||||||
|
|
||||||
proof_vec.truncate(0);
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
{
|
|
||||||
// Create an instance of our circuit (with the
|
|
||||||
// witness)
|
|
||||||
let c = MiMCDemo {
|
|
||||||
xl: Some(xl),
|
|
||||||
xr: Some(xr),
|
|
||||||
constants: &constants
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create a groth16 proof with our parameters.
|
|
||||||
let proof = create_random_proof(c, params, rng).unwrap();
|
|
||||||
|
|
||||||
proof.write(&mut proof_vec).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
total_proving += start.elapsed();
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
let proof = Proof::read(&proof_vec[..]).unwrap();
|
|
||||||
// Check the proof
|
|
||||||
assert!(verify_proof(
|
|
||||||
&pvk,
|
|
||||||
&proof,
|
|
||||||
&[image]
|
|
||||||
).unwrap());
|
|
||||||
total_verifying += start.elapsed();
|
|
||||||
}
|
|
||||||
let proving_avg = total_proving / SAMPLES;
|
|
||||||
let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64
|
|
||||||
+ (proving_avg.as_secs() as f64);
|
|
||||||
|
|
||||||
let verifying_avg = total_verifying / SAMPLES;
|
|
||||||
let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64
|
|
||||||
+ (verifying_avg.as_secs() as f64);
|
|
||||||
|
|
||||||
println!("Average proving time: {:?} seconds", proving_avg);
|
|
||||||
println!("Average verifying time: {:?} seconds", verifying_avg);
|
|
||||||
}
|
|
1
phase2/input.json
Normal file
1
phase2/input.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
{"in": 42}
|
@ -1,6 +1,5 @@
|
|||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate phase2;
|
extern crate phase2;
|
||||||
extern crate memmap;
|
|
||||||
extern crate num_bigint;
|
extern crate num_bigint;
|
||||||
extern crate num_traits;
|
extern crate num_traits;
|
||||||
extern crate blake2;
|
extern crate blake2;
|
||||||
@ -15,6 +14,7 @@ use itertools::Itertools;
|
|||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
|
|
||||||
|
use phase2::parameters::MPCParameters;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
@ -64,7 +64,7 @@ fn main() {
|
|||||||
for b in cur_hash.iter() {
|
for b in cur_hash.iter() {
|
||||||
print!("{:02x}", b);
|
print!("{:02x}", b);
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
|
|
||||||
let mut digest = &cur_hash[..];
|
let mut digest = &cur_hash[..];
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ fn main() {
|
|||||||
.read(true)
|
.read(true)
|
||||||
.open(in_params_filename)
|
.open(in_params_filename)
|
||||||
.expect("unable to open.");
|
.expect("unable to open.");
|
||||||
let mut params = phase2::MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
|
let mut params = MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
|
||||||
|
|
||||||
println!("Contributing to {}...", in_params_filename);
|
println!("Contributing to {}...", in_params_filename);
|
||||||
let hash = params.contribute(&mut rng);
|
let hash = params.contribute(&mut rng);
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate phase2;
|
extern crate phase2;
|
||||||
extern crate memmap;
|
|
||||||
extern crate num_bigint;
|
extern crate num_bigint;
|
||||||
extern crate num_traits;
|
extern crate num_traits;
|
||||||
extern crate blake2;
|
extern crate blake2;
|
||||||
@ -13,15 +12,17 @@ use itertools::Itertools;
|
|||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
|
|
||||||
|
use phase2::parameters::MPCParameters;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 4 {
|
if args.len() != 4 {
|
||||||
println!("Usage: \n<in_params.params> <in_str_entropy> <out_params.params>");
|
println!("Usage: \n<in_params.params> <out_params.params> <in_str_entropy>");
|
||||||
std::process::exit(exitcode::USAGE);
|
std::process::exit(exitcode::USAGE);
|
||||||
}
|
}
|
||||||
let in_params_filename = &args[1];
|
let in_params_filename = &args[1];
|
||||||
let entropy = &args[2];
|
let out_params_filename = &args[2];
|
||||||
let out_params_filename = &args[3];
|
let entropy = &args[3];
|
||||||
|
|
||||||
let disallow_points_at_infinity = false;
|
let disallow_points_at_infinity = false;
|
||||||
|
|
||||||
@ -62,7 +63,7 @@ fn main() {
|
|||||||
.read(true)
|
.read(true)
|
||||||
.open(in_params_filename)
|
.open(in_params_filename)
|
||||||
.expect("unable to open.");
|
.expect("unable to open.");
|
||||||
let mut params = phase2::MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
|
let mut params = MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
|
||||||
|
|
||||||
println!("Contributing to {}...", in_params_filename);
|
println!("Contributing to {}...", in_params_filename);
|
||||||
let hash = params.contribute(&mut rng);
|
let hash = params.contribute(&mut rng);
|
||||||
|
26
phase2/src/bin/copy_json.rs
Normal file
26
phase2/src/bin/copy_json.rs
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
extern crate exitcode;
|
||||||
|
extern crate serde_json;
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use serde_json::*;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 4 {
|
||||||
|
println!("Usage: \n<reference_key.json> <in_key.json> <out_key.json>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let ref_file = &args[1];
|
||||||
|
let in_file = &args[2];
|
||||||
|
let out_file = &args[3];
|
||||||
|
|
||||||
|
let in_json: Map<String, Value> = serde_json::from_str(&fs::read_to_string(in_file).unwrap()).unwrap();
|
||||||
|
let mut reference_json: Map<String, Value> = serde_json::from_str(&fs::read_to_string(ref_file).unwrap()).unwrap();
|
||||||
|
|
||||||
|
for (key, value) in &in_json {
|
||||||
|
reference_json[key] = value.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
fs::write(out_file, serde_json::to_string(&reference_json).unwrap().as_bytes()).unwrap();
|
||||||
|
println!("Done");
|
||||||
|
}
|
@ -1,21 +1,30 @@
|
|||||||
extern crate bellman_ce;
|
extern crate bellman_ce;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate phase2;
|
extern crate phase2;
|
||||||
extern crate memmap;
|
|
||||||
extern crate num_bigint;
|
|
||||||
extern crate num_traits;
|
|
||||||
extern crate exitcode;
|
extern crate exitcode;
|
||||||
|
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
|
extern crate num_bigint;
|
||||||
|
extern crate num_traits;
|
||||||
|
extern crate itertools;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use std::fs;
|
||||||
use num_bigint::BigUint;
|
|
||||||
use num_traits::Num;
|
|
||||||
|
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
use std::io::Write;
|
use std::iter::repeat;
|
||||||
use std::ops::DerefMut;
|
use itertools::Itertools;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use phase2::parameters::MPCParameters;
|
||||||
|
use phase2::utils::{
|
||||||
|
p1_to_vec,
|
||||||
|
p2_to_vec,
|
||||||
|
pairing_to_vec,
|
||||||
|
};
|
||||||
|
use bellman_ce::pairing::{
|
||||||
|
Engine,
|
||||||
|
bn256::{
|
||||||
|
Bn256,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct ProvingKeyJson {
|
struct ProvingKeyJson {
|
||||||
@ -44,31 +53,7 @@ struct VerifyingKeyJson {
|
|||||||
pub vk_beta_2: Vec<Vec<String>>,
|
pub vk_beta_2: Vec<Vec<String>>,
|
||||||
pub vk_gamma_2: Vec<Vec<String>>,
|
pub vk_gamma_2: Vec<Vec<String>>,
|
||||||
pub vk_delta_2: Vec<Vec<String>>,
|
pub vk_delta_2: Vec<Vec<String>>,
|
||||||
}
|
pub vk_alfabeta_12: Vec<Vec<Vec<String>>>,
|
||||||
|
|
||||||
// Bring in some tools for using pairing-friendly curves
|
|
||||||
use bellman_ce::pairing::{
|
|
||||||
Engine,
|
|
||||||
CurveAffine,
|
|
||||||
ff::PrimeField,
|
|
||||||
};
|
|
||||||
|
|
||||||
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
|
|
||||||
use bellman_ce::pairing::bn256::{
|
|
||||||
Bn256,
|
|
||||||
};
|
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
struct CircuitJson {
|
|
||||||
pub constraints: Vec<Vec<BTreeMap<String, String>>>,
|
|
||||||
#[serde(rename = "nPubInputs")]
|
|
||||||
pub num_inputs: usize,
|
|
||||||
#[serde(rename = "nOutputs")]
|
|
||||||
pub num_outputs: usize,
|
|
||||||
#[serde(rename = "nVars")]
|
|
||||||
pub num_variables: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
@ -89,131 +74,35 @@ fn main() {
|
|||||||
.read(true)
|
.read(true)
|
||||||
.open(params_filename)
|
.open(params_filename)
|
||||||
.expect("unable to open.");
|
.expect("unable to open.");
|
||||||
let params = phase2::MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
|
let params = MPCParameters::read(reader, disallow_points_at_infinity, true).expect("unable to read params");
|
||||||
let params = params.get_params();
|
let params = params.get_params();
|
||||||
|
|
||||||
let mut proving_key = ProvingKeyJson {
|
let proving_key = ProvingKeyJson {
|
||||||
a: vec![],
|
a: params.a.iter().map(|e| p1_to_vec(e)).collect_vec(),
|
||||||
b1: vec![],
|
b1: params.b_g1.iter().map(|e| p1_to_vec(e)).collect_vec(),
|
||||||
b2: vec![],
|
b2: params.b_g2.iter().map(|e| p2_to_vec(e)).collect_vec(),
|
||||||
c: vec![],
|
c: repeat(None).take(params.vk.ic.len()).chain(params.l.iter().map(|e| Some(p1_to_vec(e)))).collect_vec(),
|
||||||
vk_alfa_1: vec![],
|
vk_alfa_1: p1_to_vec(¶ms.vk.alpha_g1),
|
||||||
vk_beta_1: vec![],
|
vk_beta_1: p1_to_vec(¶ms.vk.beta_g1),
|
||||||
vk_delta_1: vec![],
|
vk_delta_1: p1_to_vec(¶ms.vk.delta_g1),
|
||||||
vk_beta_2: vec![],
|
vk_beta_2: p2_to_vec(¶ms.vk.beta_g2),
|
||||||
vk_delta_2: vec![],
|
vk_delta_2: p2_to_vec(¶ms.vk.delta_g2),
|
||||||
h: vec![],
|
h: params.h.iter().map(|e| p1_to_vec(e)).collect_vec(),
|
||||||
};
|
|
||||||
let repr_to_big = |r| {
|
|
||||||
BigUint::from_str_radix(&format!("{}", r)[2..], 16).unwrap().to_str_radix(10)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let p1_to_vec = |p : &<Bn256 as Engine>::G1Affine| {
|
let verification_key = VerifyingKeyJson {
|
||||||
let mut v = vec![];
|
ic: params.vk.ic.iter().map(|e| p1_to_vec(e)).collect_vec(),
|
||||||
//println!("test: {}", p.get_x().into_repr());
|
vk_alfa_1: p1_to_vec(¶ms.vk.alpha_g1),
|
||||||
let x = repr_to_big(p.get_x().into_repr());
|
vk_beta_2: p2_to_vec(¶ms.vk.beta_g2),
|
||||||
v.push(x);
|
vk_gamma_2: p2_to_vec(¶ms.vk.gamma_g2),
|
||||||
let y = repr_to_big(p.get_y().into_repr());
|
vk_delta_2: p2_to_vec(¶ms.vk.delta_g2),
|
||||||
v.push(y);
|
vk_alfabeta_12: pairing_to_vec(&Bn256::pairing(params.vk.alpha_g1, params.vk.beta_g2)),
|
||||||
if p.is_zero() {
|
|
||||||
v.push("0".to_string());
|
|
||||||
} else {
|
|
||||||
v.push("1".to_string());
|
|
||||||
}
|
|
||||||
v
|
|
||||||
};
|
|
||||||
let p2_to_vec = |p : &<Bn256 as Engine>::G2Affine| {
|
|
||||||
let mut v = vec![];
|
|
||||||
let x = p.get_x();
|
|
||||||
let mut x_v = vec![];
|
|
||||||
x_v.push(repr_to_big(x.c0.into_repr()));
|
|
||||||
x_v.push(repr_to_big(x.c1.into_repr()));
|
|
||||||
v.push(x_v);
|
|
||||||
|
|
||||||
let y = p.get_y();
|
|
||||||
let mut y_v = vec![];
|
|
||||||
y_v.push(repr_to_big(y.c0.into_repr()));
|
|
||||||
y_v.push(repr_to_big(y.c1.into_repr()));
|
|
||||||
v.push(y_v);
|
|
||||||
|
|
||||||
if p.is_zero() {
|
|
||||||
v.push(["0".to_string(), "0".to_string()].to_vec());
|
|
||||||
} else {
|
|
||||||
v.push(["1".to_string(), "0".to_string()].to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
v
|
|
||||||
};
|
|
||||||
let a = params.a.clone();
|
|
||||||
for e in a.iter() {
|
|
||||||
proving_key.a.push(p1_to_vec(e));
|
|
||||||
}
|
|
||||||
let b1 = params.b_g1.clone();
|
|
||||||
for e in b1.iter() {
|
|
||||||
proving_key.b1.push(p1_to_vec(e));
|
|
||||||
}
|
|
||||||
let b2 = params.b_g2.clone();
|
|
||||||
for e in b2.iter() {
|
|
||||||
proving_key.b2.push(p2_to_vec(e));
|
|
||||||
}
|
|
||||||
let c = params.l.clone();
|
|
||||||
for _ in 0..params.vk.ic.len() {
|
|
||||||
proving_key.c.push(None);
|
|
||||||
}
|
|
||||||
for e in c.iter() {
|
|
||||||
proving_key.c.push(Some(p1_to_vec(e)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let vk_alfa_1 = params.vk.alpha_g1.clone();
|
|
||||||
proving_key.vk_alfa_1 = p1_to_vec(&vk_alfa_1);
|
|
||||||
|
|
||||||
let vk_beta_1 = params.vk.beta_g1.clone();
|
|
||||||
proving_key.vk_beta_1 = p1_to_vec(&vk_beta_1);
|
|
||||||
|
|
||||||
let vk_delta_1 = params.vk.delta_g1.clone();
|
|
||||||
proving_key.vk_delta_1 = p1_to_vec(&vk_delta_1);
|
|
||||||
|
|
||||||
let vk_beta_2 = params.vk.beta_g2.clone();
|
|
||||||
proving_key.vk_beta_2 = p2_to_vec(&vk_beta_2);
|
|
||||||
|
|
||||||
let vk_delta_2 = params.vk.delta_g2.clone();
|
|
||||||
proving_key.vk_delta_2 = p2_to_vec(&vk_delta_2);
|
|
||||||
|
|
||||||
let h = params.h.clone();
|
|
||||||
for e in h.iter() {
|
|
||||||
proving_key.h.push(p1_to_vec(e));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut verification_key = VerifyingKeyJson {
|
|
||||||
ic: vec![],
|
|
||||||
vk_alfa_1: vec![],
|
|
||||||
vk_beta_2: vec![],
|
|
||||||
vk_gamma_2: vec![],
|
|
||||||
vk_delta_2: vec![],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let ic = params.vk.ic.clone();
|
|
||||||
for e in ic.iter() {
|
|
||||||
verification_key.ic.push(p1_to_vec(e));
|
|
||||||
}
|
|
||||||
|
|
||||||
verification_key.vk_alfa_1 = p1_to_vec(&vk_alfa_1);
|
|
||||||
verification_key.vk_beta_2 = p2_to_vec(&vk_beta_2);
|
|
||||||
let vk_gamma_2 = params.vk.gamma_g2.clone();
|
|
||||||
verification_key.vk_gamma_2 = p2_to_vec(&vk_gamma_2);
|
|
||||||
verification_key.vk_delta_2 = p2_to_vec(&vk_delta_2);
|
|
||||||
|
|
||||||
let pk_file = OpenOptions::new().read(true).write(true).create_new(true).open(pk_filename).unwrap();
|
|
||||||
let pk_json = serde_json::to_string(&proving_key).unwrap();
|
let pk_json = serde_json::to_string(&proving_key).unwrap();
|
||||||
pk_file.set_len(pk_json.len() as u64).expect("unable to write pk file");
|
|
||||||
let mut mmap = unsafe { memmap::Mmap::map(&pk_file) }.unwrap().make_mut().unwrap();
|
|
||||||
mmap.deref_mut().write_all(pk_json.as_bytes()).unwrap();
|
|
||||||
|
|
||||||
let vk_file = OpenOptions::new().read(true).write(true).create_new(true).open(vk_filename).unwrap();
|
|
||||||
let vk_json = serde_json::to_string(&verification_key).unwrap();
|
let vk_json = serde_json::to_string(&verification_key).unwrap();
|
||||||
vk_file.set_len(vk_json.len() as u64).expect("unable to write vk file");
|
fs::write(pk_filename, pk_json.as_bytes()).unwrap();
|
||||||
let mut mmap = unsafe { memmap::Mmap::map(&vk_file) }.unwrap().make_mut().unwrap();
|
fs::write(vk_filename, vk_json.as_bytes()).unwrap();
|
||||||
mmap.deref_mut().write_all(vk_json.as_bytes()).unwrap();
|
|
||||||
|
|
||||||
println!("Created {} and {}.", pk_filename, vk_filename);
|
println!("Created {} and {}.", pk_filename, vk_filename);
|
||||||
}
|
}
|
||||||
|
78
phase2/src/bin/generate_verifier.rs
Normal file
78
phase2/src/bin/generate_verifier.rs
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
#![allow(unused_imports)]
|
||||||
|
|
||||||
|
extern crate phase2;
|
||||||
|
extern crate bellman_ce;
|
||||||
|
extern crate num_bigint;
|
||||||
|
extern crate num_traits;
|
||||||
|
extern crate exitcode;
|
||||||
|
extern crate serde;
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
use std::fs;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use num_bigint::BigUint;
|
||||||
|
use num_traits::Num;
|
||||||
|
use phase2::utils::repr_to_big;
|
||||||
|
use phase2::parameters::MPCParameters;
|
||||||
|
use bellman_ce::pairing::{
|
||||||
|
Engine,
|
||||||
|
CurveAffine,
|
||||||
|
ff::PrimeField,
|
||||||
|
bn256::{
|
||||||
|
Bn256,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 3 {
|
||||||
|
println!("Usage: \n<params> <out_contract.sol>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let params_filename = &args[1];
|
||||||
|
let verifier_filename = &args[2];
|
||||||
|
|
||||||
|
let should_filter_points_at_infinity = false;
|
||||||
|
let bytes = include_bytes!("../verifier_groth.sol");
|
||||||
|
let template = String::from_utf8_lossy(bytes);
|
||||||
|
|
||||||
|
let reader = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.open(params_filename)
|
||||||
|
.expect("unable to open.");
|
||||||
|
|
||||||
|
let params = MPCParameters::read(reader, should_filter_points_at_infinity, true).expect("unable to read params");
|
||||||
|
let vk = ¶ms.get_params().vk;
|
||||||
|
|
||||||
|
let p1_to_str = |p: &<Bn256 as Engine>::G1Affine| {
|
||||||
|
let x = repr_to_big(p.get_x().into_repr());
|
||||||
|
let y = repr_to_big(p.get_y().into_repr());
|
||||||
|
return format!("{}, {}", x, y)
|
||||||
|
};
|
||||||
|
let p2_to_str = |p: &<Bn256 as Engine>::G2Affine| {
|
||||||
|
let x = p.get_x();
|
||||||
|
let y = p.get_y();
|
||||||
|
let x_c0 = repr_to_big(x.c0.into_repr());
|
||||||
|
let x_c1 = repr_to_big(x.c1.into_repr());
|
||||||
|
let y_c0 = repr_to_big(y.c0.into_repr());
|
||||||
|
let y_c1 = repr_to_big(y.c1.into_repr());
|
||||||
|
format!("[{}, {}], [{}, {}]", x_c0, x_c1, y_c0, y_c1)
|
||||||
|
};
|
||||||
|
|
||||||
|
let template = template.replace("<%vk_alfa1%>", &*p1_to_str(&vk.alpha_g1));
|
||||||
|
let template = template.replace("<%vk_beta2%>", &*p2_to_str(&vk.beta_g2));
|
||||||
|
let template = template.replace("<%vk_gamma2%>", &*p2_to_str(&vk.gamma_g2));
|
||||||
|
let template = template.replace("<%vk_delta2%>", &*p2_to_str(&vk.delta_g2));
|
||||||
|
|
||||||
|
let template = template.replace("<%vk_ic_length%>", &*vk.ic.len().to_string());
|
||||||
|
let template = template.replace("<%vk_input_length%>", &*(vk.ic.len() - 1).to_string());
|
||||||
|
|
||||||
|
let mut vi = String::from("");
|
||||||
|
for i in 0..vk.ic.len() {
|
||||||
|
vi = format!("{}{}vk.IC[{}] = Pairing.G1Point({});\n", vi, if vi.len() == 0 { "" } else { " " }, i, &*p1_to_str(&vk.ic[i]));
|
||||||
|
}
|
||||||
|
let template = template.replace("<%vk_ic_pts%>", &*vi);
|
||||||
|
|
||||||
|
fs::write(verifier_filename, template.as_bytes()).unwrap();
|
||||||
|
println!("Created {}", verifier_filename);
|
||||||
|
}
|
@ -1,445 +0,0 @@
|
|||||||
extern crate bellman_ce;
|
|
||||||
extern crate rand;
|
|
||||||
extern crate phase2;
|
|
||||||
extern crate num_bigint;
|
|
||||||
extern crate num_traits;
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate serde;
|
|
||||||
extern crate serde_json;
|
|
||||||
|
|
||||||
use num_bigint::BigUint;
|
|
||||||
use num_traits::Num;
|
|
||||||
use std::ops::DerefMut;
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
// For randomness (during paramgen and proof generation)
|
|
||||||
use rand::{thread_rng, Rng};
|
|
||||||
|
|
||||||
// For benchmarking
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
|
|
||||||
// Bring in some tools for using pairing-friendly curves
|
|
||||||
use bellman_ce::pairing::{
|
|
||||||
Engine,
|
|
||||||
CurveAffine,
|
|
||||||
ff::{Field, PrimeField},
|
|
||||||
};
|
|
||||||
|
|
||||||
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
|
|
||||||
use bellman_ce::pairing::bn256::{
|
|
||||||
Bn256
|
|
||||||
};
|
|
||||||
|
|
||||||
// We'll use these interfaces to construct our circuit.
|
|
||||||
use bellman_ce::{
|
|
||||||
Circuit,
|
|
||||||
ConstraintSystem,
|
|
||||||
SynthesisError
|
|
||||||
};
|
|
||||||
|
|
||||||
// We're going to use the Groth16 proving system.
|
|
||||||
use bellman_ce::groth16::{
|
|
||||||
Proof,
|
|
||||||
prepare_verifying_key,
|
|
||||||
create_random_proof,
|
|
||||||
verify_proof,
|
|
||||||
};
|
|
||||||
|
|
||||||
use std::fs::File;
|
|
||||||
use std::fs::{OpenOptions, remove_file};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
struct ProvingKeyJson {
|
|
||||||
#[serde(rename = "A")]
|
|
||||||
pub a: Vec<Vec<String>>,
|
|
||||||
#[serde(rename = "B1")]
|
|
||||||
pub b1: Vec<Vec<String>>,
|
|
||||||
#[serde(rename = "B2")]
|
|
||||||
pub b2: Vec<Vec<Vec<String>>>,
|
|
||||||
#[serde(rename = "C")]
|
|
||||||
pub c: Vec<Option<Vec<String>>>,
|
|
||||||
pub vk_alfa_1: Vec<String>,
|
|
||||||
pub vk_beta_1: Vec<String>,
|
|
||||||
pub vk_delta_1: Vec<String>,
|
|
||||||
pub vk_beta_2: Vec<Vec<String>>,
|
|
||||||
pub vk_delta_2: Vec<Vec<String>>,
|
|
||||||
#[serde(rename = "hExps")]
|
|
||||||
pub h: Vec<Vec<String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
struct VerifyingKeyJson {
|
|
||||||
#[serde(rename = "IC")]
|
|
||||||
pub ic: Vec<Vec<String>>,
|
|
||||||
pub vk_alfa_1: Vec<String>,
|
|
||||||
pub vk_beta_2: Vec<Vec<String>>,
|
|
||||||
pub vk_gamma_2: Vec<Vec<String>>,
|
|
||||||
pub vk_delta_2: Vec<Vec<String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
const MIMC_ROUNDS: usize = 322;
|
|
||||||
|
|
||||||
/// This is an implementation of MiMC, specifically a
|
|
||||||
/// variant named `LongsightF322p3` for BLS12-381.
|
|
||||||
/// See http://eprint.iacr.org/2016/492 for more
|
|
||||||
/// information about this construction.
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) {
|
|
||||||
/// for i from 0 up to 321 {
|
|
||||||
/// xL, xR := xR + (xL + Ci)^3, xL
|
|
||||||
/// }
|
|
||||||
/// return xL
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
fn mimc<E: Engine>(
|
|
||||||
mut xl: E::Fr,
|
|
||||||
mut xr: E::Fr,
|
|
||||||
constants: &[E::Fr]
|
|
||||||
) -> E::Fr
|
|
||||||
{
|
|
||||||
assert_eq!(constants.len(), MIMC_ROUNDS);
|
|
||||||
|
|
||||||
for i in 0..MIMC_ROUNDS {
|
|
||||||
let mut tmp1 = xl;
|
|
||||||
tmp1.add_assign(&constants[i]);
|
|
||||||
let mut tmp2 = tmp1;
|
|
||||||
tmp2.square();
|
|
||||||
tmp2.mul_assign(&tmp1);
|
|
||||||
tmp2.add_assign(&xr);
|
|
||||||
xr = xl;
|
|
||||||
xl = tmp2;
|
|
||||||
}
|
|
||||||
|
|
||||||
xl
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is our demo circuit for proving knowledge of the
|
|
||||||
/// preimage of a MiMC hash invocation.
|
|
||||||
struct MiMCDemo<'a, E: Engine> {
|
|
||||||
xl: Option<E::Fr>,
|
|
||||||
xr: Option<E::Fr>,
|
|
||||||
constants: &'a [E::Fr]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Our demo circuit implements this `Circuit` trait which
|
|
||||||
/// is used during paramgen and proving in order to
|
|
||||||
/// synthesize the constraint system.
|
|
||||||
impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
|
|
||||||
fn synthesize<CS: ConstraintSystem<E>>(
|
|
||||||
self,
|
|
||||||
cs: &mut CS
|
|
||||||
) -> Result<(), SynthesisError>
|
|
||||||
{
|
|
||||||
assert_eq!(self.constants.len(), MIMC_ROUNDS);
|
|
||||||
|
|
||||||
// Allocate the first component of the preimage.
|
|
||||||
let mut xl_value = self.xl;
|
|
||||||
let mut xl = cs.alloc(|| "preimage xl", || {
|
|
||||||
xl_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// Allocate the second component of the preimage.
|
|
||||||
let mut xr_value = self.xr;
|
|
||||||
let mut xr = cs.alloc(|| "preimage xr", || {
|
|
||||||
xr_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
for i in 0..MIMC_ROUNDS {
|
|
||||||
// xL, xR := xR + (xL + Ci)^3, xL
|
|
||||||
let cs = &mut cs.namespace(|| format!("round {}", i));
|
|
||||||
|
|
||||||
// tmp = (xL + Ci)^2
|
|
||||||
let mut tmp_value = xl_value.map(|mut e| {
|
|
||||||
e.add_assign(&self.constants[i]);
|
|
||||||
e.square();
|
|
||||||
e
|
|
||||||
});
|
|
||||||
let mut tmp = cs.alloc(|| "tmp", || {
|
|
||||||
tmp_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
cs.enforce(
|
|
||||||
|| "tmp = (xL + Ci)^2",
|
|
||||||
|lc| lc + xl + (self.constants[i], CS::one()),
|
|
||||||
|lc| lc + xl + (self.constants[i], CS::one()),
|
|
||||||
|lc| lc + tmp
|
|
||||||
);
|
|
||||||
|
|
||||||
// new_xL = xR + (xL + Ci)^3
|
|
||||||
// new_xL = xR + tmp * (xL + Ci)
|
|
||||||
// new_xL - xR = tmp * (xL + Ci)
|
|
||||||
let mut new_xl_value = xl_value.map(|mut e| {
|
|
||||||
e.add_assign(&self.constants[i]);
|
|
||||||
e.mul_assign(&tmp_value.unwrap());
|
|
||||||
e.add_assign(&xr_value.unwrap());
|
|
||||||
e
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut new_xl = if i == (MIMC_ROUNDS-1) {
|
|
||||||
// This is the last round, xL is our image and so
|
|
||||||
// we allocate a public input.
|
|
||||||
cs.alloc_input(|| "image", || {
|
|
||||||
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?
|
|
||||||
} else {
|
|
||||||
cs.alloc(|| "new_xl", || {
|
|
||||||
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
|
|
||||||
})?
|
|
||||||
};
|
|
||||||
|
|
||||||
cs.enforce(
|
|
||||||
|| "new_xL = xR + (xL + Ci)^3",
|
|
||||||
|lc| lc + tmp,
|
|
||||||
|lc| lc + xl + (self.constants[i], CS::one()),
|
|
||||||
|lc| lc + new_xl - xr
|
|
||||||
);
|
|
||||||
|
|
||||||
// xR = xL
|
|
||||||
xr = xl;
|
|
||||||
xr_value = xl_value;
|
|
||||||
|
|
||||||
// xL = new_xL
|
|
||||||
xl = new_xl;
|
|
||||||
xl_value = new_xl_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
// This may not be cryptographically safe, use
|
|
||||||
// `OsRng` (for example) in production software.
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
|
|
||||||
// Generate the MiMC round constants
|
|
||||||
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
println!("Creating parameters...");
|
|
||||||
|
|
||||||
let should_filter_points_at_infinity = false;
|
|
||||||
|
|
||||||
// Create parameters for our circuit
|
|
||||||
let mut params = {
|
|
||||||
let c = MiMCDemo::<Bn256> {
|
|
||||||
xl: None,
|
|
||||||
xr: None,
|
|
||||||
constants: &constants
|
|
||||||
};
|
|
||||||
|
|
||||||
phase2::MPCParameters::new(c, should_filter_points_at_infinity).unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
let old_params = params.clone();
|
|
||||||
params.contribute(rng);
|
|
||||||
|
|
||||||
let first_contrib = phase2::verify_contribution(&old_params, ¶ms).expect("should verify");
|
|
||||||
|
|
||||||
let old_params = params.clone();
|
|
||||||
params.contribute(rng);
|
|
||||||
|
|
||||||
let second_contrib = phase2::verify_contribution(&old_params, ¶ms).expect("should verify");
|
|
||||||
|
|
||||||
let verification_result = params.verify(MiMCDemo::<Bn256> {
|
|
||||||
xl: None,
|
|
||||||
xr: None,
|
|
||||||
constants: &constants
|
|
||||||
}, should_filter_points_at_infinity).unwrap();
|
|
||||||
|
|
||||||
assert!(phase2::contains_contribution(&verification_result, &first_contrib));
|
|
||||||
assert!(phase2::contains_contribution(&verification_result, &second_contrib));
|
|
||||||
|
|
||||||
let params = params.get_params();
|
|
||||||
|
|
||||||
let mut f = File::create("mimc.params").unwrap();
|
|
||||||
params.write(&mut f);
|
|
||||||
|
|
||||||
let mut proving_key = ProvingKeyJson {
|
|
||||||
a: vec![],
|
|
||||||
b1: vec![],
|
|
||||||
b2: vec![],
|
|
||||||
c: vec![],
|
|
||||||
vk_alfa_1: vec![],
|
|
||||||
vk_beta_1: vec![],
|
|
||||||
vk_delta_1: vec![],
|
|
||||||
vk_beta_2: vec![],
|
|
||||||
vk_delta_2: vec![],
|
|
||||||
h: vec![],
|
|
||||||
};
|
|
||||||
let repr_to_big = |r| {
|
|
||||||
BigUint::from_str_radix(&format!("{}", r)[2..], 16).unwrap().to_str_radix(10)
|
|
||||||
};
|
|
||||||
|
|
||||||
let p1_to_vec = |p : &<Bn256 as Engine>::G1Affine| {
|
|
||||||
let mut v = vec![];
|
|
||||||
let x = repr_to_big(p.get_x().into_repr());
|
|
||||||
v.push(x);
|
|
||||||
let y = repr_to_big(p.get_y().into_repr());
|
|
||||||
v.push(y);
|
|
||||||
if p.is_zero() {
|
|
||||||
v.push("0".to_string());
|
|
||||||
} else {
|
|
||||||
v.push("1".to_string());
|
|
||||||
}
|
|
||||||
v
|
|
||||||
};
|
|
||||||
let p2_to_vec = |p : &<Bn256 as Engine>::G2Affine| {
|
|
||||||
let mut v = vec![];
|
|
||||||
let x = p.get_x();
|
|
||||||
let mut x_v = vec![];
|
|
||||||
x_v.push(repr_to_big(x.c0.into_repr()));
|
|
||||||
x_v.push(repr_to_big(x.c1.into_repr()));
|
|
||||||
v.push(x_v);
|
|
||||||
|
|
||||||
let y = p.get_y();
|
|
||||||
let mut y_v = vec![];
|
|
||||||
y_v.push(repr_to_big(y.c0.into_repr()));
|
|
||||||
y_v.push(repr_to_big(y.c1.into_repr()));
|
|
||||||
v.push(y_v);
|
|
||||||
|
|
||||||
if p.is_zero() {
|
|
||||||
v.push(["0".to_string(), "0".to_string()].to_vec());
|
|
||||||
} else {
|
|
||||||
v.push(["1".to_string(), "0".to_string()].to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
v
|
|
||||||
};
|
|
||||||
let a = params.a.clone();
|
|
||||||
for e in a.iter() {
|
|
||||||
proving_key.a.push(p1_to_vec(e));
|
|
||||||
}
|
|
||||||
let b1 = params.b_g1.clone();
|
|
||||||
for e in b1.iter() {
|
|
||||||
proving_key.b1.push(p1_to_vec(e));
|
|
||||||
}
|
|
||||||
let b2 = params.b_g2.clone();
|
|
||||||
for e in b2.iter() {
|
|
||||||
proving_key.b2.push(p2_to_vec(e));
|
|
||||||
}
|
|
||||||
let c = params.l.clone();
|
|
||||||
for _ in 0..params.vk.ic.len() {
|
|
||||||
proving_key.c.push(None);
|
|
||||||
}
|
|
||||||
for e in c.iter() {
|
|
||||||
proving_key.c.push(Some(p1_to_vec(e)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let vk_alfa_1 = params.vk.alpha_g1.clone();
|
|
||||||
proving_key.vk_alfa_1 = p1_to_vec(&vk_alfa_1);
|
|
||||||
|
|
||||||
let vk_beta_1 = params.vk.beta_g1.clone();
|
|
||||||
proving_key.vk_beta_1 = p1_to_vec(&vk_beta_1);
|
|
||||||
|
|
||||||
let vk_delta_1 = params.vk.delta_g1.clone();
|
|
||||||
proving_key.vk_delta_1 = p1_to_vec(&vk_delta_1);
|
|
||||||
|
|
||||||
let vk_beta_2 = params.vk.beta_g2.clone();
|
|
||||||
proving_key.vk_beta_2 = p2_to_vec(&vk_beta_2);
|
|
||||||
|
|
||||||
let vk_delta_2 = params.vk.delta_g2.clone();
|
|
||||||
proving_key.vk_delta_2 = p2_to_vec(&vk_delta_2);
|
|
||||||
|
|
||||||
let h = params.h.clone();
|
|
||||||
for e in h.iter() {
|
|
||||||
proving_key.h.push(p1_to_vec(e));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
let mut verification_key = VerifyingKeyJson {
|
|
||||||
ic: vec![],
|
|
||||||
vk_alfa_1: vec![],
|
|
||||||
vk_beta_2: vec![],
|
|
||||||
vk_gamma_2: vec![],
|
|
||||||
vk_delta_2: vec![],
|
|
||||||
};
|
|
||||||
|
|
||||||
let ic = params.vk.ic.clone();
|
|
||||||
for e in ic.iter() {
|
|
||||||
verification_key.ic.push(p1_to_vec(e));
|
|
||||||
}
|
|
||||||
|
|
||||||
verification_key.vk_alfa_1 = p1_to_vec(&vk_alfa_1);
|
|
||||||
verification_key.vk_beta_2 = p2_to_vec(&vk_beta_2);
|
|
||||||
let vk_gamma_2 = params.vk.gamma_g2.clone();
|
|
||||||
verification_key.vk_gamma_2 = p2_to_vec(&vk_gamma_2);
|
|
||||||
verification_key.vk_delta_2 = p2_to_vec(&vk_delta_2);
|
|
||||||
|
|
||||||
let mut pk_file = OpenOptions::new().read(true).write(true).create_new(true).open("pk.json").unwrap();
|
|
||||||
let pk_json = serde_json::to_string(&proving_key).unwrap();
|
|
||||||
pk_file.set_len(pk_json.len() as u64);
|
|
||||||
let mut mmap = unsafe { memmap::Mmap::map(&pk_file) }.unwrap().make_mut().unwrap();
|
|
||||||
mmap.deref_mut().write_all(pk_json.as_bytes()).unwrap();
|
|
||||||
|
|
||||||
let mut vk_file = OpenOptions::new().read(true).write(true).create_new(true).open("vk.json").unwrap();
|
|
||||||
let vk_json = serde_json::to_string(&verification_key).unwrap();
|
|
||||||
vk_file.set_len(vk_json.len() as u64);
|
|
||||||
let mut mmap = unsafe { memmap::Mmap::map(&vk_file) }.unwrap().make_mut().unwrap();
|
|
||||||
mmap.deref_mut().write_all(vk_json.as_bytes()).unwrap();
|
|
||||||
|
|
||||||
/*
|
|
||||||
// Prepare the verification key (for proof verification)
|
|
||||||
let pvk = prepare_verifying_key(¶ms.vk);
|
|
||||||
println!("Creating proofs...");
|
|
||||||
|
|
||||||
// Let's benchmark stuff!
|
|
||||||
const SAMPLES: u32 = 50;
|
|
||||||
let mut total_proving = Duration::new(0, 0);
|
|
||||||
let mut total_verifying = Duration::new(0, 0);
|
|
||||||
|
|
||||||
// Just a place to put the proof data, so we can
|
|
||||||
// benchmark deserialization.
|
|
||||||
let mut proof_vec = vec![];
|
|
||||||
|
|
||||||
for _ in 0..SAMPLES {
|
|
||||||
// Generate a random preimage and compute the image
|
|
||||||
let xl = rng.gen();
|
|
||||||
let xr = rng.gen();
|
|
||||||
let image = mimc::<Bn256>(xl, xr, &constants);
|
|
||||||
|
|
||||||
proof_vec.truncate(0);
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
{
|
|
||||||
// Create an instance of our circuit (with the
|
|
||||||
// witness)
|
|
||||||
let c = MiMCDemo {
|
|
||||||
xl: Some(xl),
|
|
||||||
xr: Some(xr),
|
|
||||||
constants: &constants
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create a groth16 proof with our parameters.
|
|
||||||
let proof = create_random_proof(c, params, rng).unwrap();
|
|
||||||
|
|
||||||
proof.write(&mut proof_vec).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
total_proving += start.elapsed();
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
let proof = Proof::read(&proof_vec[..]).unwrap();
|
|
||||||
// Check the proof
|
|
||||||
assert!(verify_proof(
|
|
||||||
&pvk,
|
|
||||||
&proof,
|
|
||||||
&[image]
|
|
||||||
).unwrap());
|
|
||||||
total_verifying += start.elapsed();
|
|
||||||
}
|
|
||||||
let proving_avg = total_proving / SAMPLES;
|
|
||||||
let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64
|
|
||||||
+ (proving_avg.as_secs() as f64);
|
|
||||||
|
|
||||||
let verifying_avg = total_verifying / SAMPLES;
|
|
||||||
let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64
|
|
||||||
+ (verifying_avg.as_secs() as f64);
|
|
||||||
|
|
||||||
println!("Average proving time: {:?} seconds", proving_avg);
|
|
||||||
println!("Average verifying time: {:?} seconds", verifying_avg);
|
|
||||||
*/
|
|
||||||
}
|
|
@ -3,6 +3,8 @@ extern crate phase2;
|
|||||||
extern crate exitcode;
|
extern crate exitcode;
|
||||||
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
|
use phase2::parameters::MPCParameters;
|
||||||
|
use phase2::circom_circuit::CircomCircuit;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
@ -18,10 +20,8 @@ fn main() {
|
|||||||
// Import the circuit and create the initial parameters using phase 1
|
// Import the circuit and create the initial parameters using phase 1
|
||||||
println!("Creating initial parameters for {}...", circuit_filename);
|
println!("Creating initial parameters for {}...", circuit_filename);
|
||||||
let params = {
|
let params = {
|
||||||
let c = phase2::CircomCircuit {
|
let c = CircomCircuit::from_json_file(&circuit_filename);
|
||||||
file_name: &circuit_filename,
|
MPCParameters::new(c, should_filter_points_at_infinity).unwrap()
|
||||||
};
|
|
||||||
phase2::MPCParameters::new(c, should_filter_points_at_infinity).unwrap()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
println!("Writing initial parameters to {}.", params_filename);
|
println!("Writing initial parameters to {}.", params_filename);
|
||||||
|
88
phase2/src/bin/prove.rs
Normal file
88
phase2/src/bin/prove.rs
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
extern crate phase2;
|
||||||
|
extern crate bellman_ce;
|
||||||
|
extern crate exitcode;
|
||||||
|
extern crate serde;
|
||||||
|
extern crate num_bigint;
|
||||||
|
extern crate num_traits;
|
||||||
|
extern crate itertools;
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use itertools::Itertools;
|
||||||
|
use phase2::parameters::MPCParameters;
|
||||||
|
use phase2::circom_circuit::CircomCircuit;
|
||||||
|
use phase2::utils::{
|
||||||
|
repr_to_big,
|
||||||
|
p1_to_vec,
|
||||||
|
p2_to_vec,
|
||||||
|
};
|
||||||
|
use bellman_ce::groth16::{prepare_verifying_key, create_random_proof, verify_proof};
|
||||||
|
use bellman_ce::pairing::ff::PrimeField;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct ProofJson {
|
||||||
|
pub protocol: String,
|
||||||
|
pub pi_a: Vec<String>,
|
||||||
|
pub pi_b: Vec<Vec<String>>,
|
||||||
|
pub pi_c: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 6 {
|
||||||
|
println!("Usage: \n<circuit.json> <witness.json> <params> <proof.json> <public.json>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let circuit_filename = &args[1];
|
||||||
|
let witness_filename = &args[2];
|
||||||
|
let params_filename = &args[3];
|
||||||
|
let proof_filename = &args[4];
|
||||||
|
let public_filename = &args[5];
|
||||||
|
|
||||||
|
let should_filter_points_at_infinity = false;
|
||||||
|
let rng = &mut rand::XorShiftRng::new_unseeded(); // TODO: change this unsafe unseeded random (!)
|
||||||
|
|
||||||
|
let mut c = CircomCircuit::from_json_file(circuit_filename);
|
||||||
|
c.load_witness_json_file(witness_filename);
|
||||||
|
let input = c.inputs.to_vec();
|
||||||
|
|
||||||
|
let reader = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.open(params_filename)
|
||||||
|
.expect("unable to open.");
|
||||||
|
|
||||||
|
let mut params = MPCParameters::read(reader, should_filter_points_at_infinity, true).expect("unable to read params");
|
||||||
|
|
||||||
|
params.filter_params();
|
||||||
|
let params = params.get_params();
|
||||||
|
|
||||||
|
println!("Proving...");
|
||||||
|
let proof = create_random_proof(c, &*params, rng).unwrap();
|
||||||
|
|
||||||
|
println!("Checking proof");
|
||||||
|
let pvk = prepare_verifying_key(¶ms.vk);
|
||||||
|
let result = verify_proof(
|
||||||
|
&pvk,
|
||||||
|
&proof,
|
||||||
|
&input[1..]
|
||||||
|
).unwrap();
|
||||||
|
assert!(result, "Proof is correct");
|
||||||
|
|
||||||
|
let proof = ProofJson {
|
||||||
|
protocol: "groth".to_string(),
|
||||||
|
pi_a: p1_to_vec(&proof.a),
|
||||||
|
pi_b: p2_to_vec(&proof.b),
|
||||||
|
pi_c: p1_to_vec(&proof.c),
|
||||||
|
};
|
||||||
|
|
||||||
|
let proof_json = serde_json::to_string(&proof).unwrap();
|
||||||
|
fs::write(proof_filename, proof_json.as_bytes()).unwrap();
|
||||||
|
|
||||||
|
let public_inputs = input[1..].iter().map(|x| repr_to_big(x.into_repr())).collect_vec();
|
||||||
|
let public_json = serde_json::to_string(&public_inputs).unwrap();
|
||||||
|
fs::write(public_filename, public_json.as_bytes()).unwrap();
|
||||||
|
|
||||||
|
println!("Done!")
|
||||||
|
}
|
@ -3,6 +3,9 @@ extern crate exitcode;
|
|||||||
|
|
||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
|
|
||||||
|
use phase2::parameters::*;
|
||||||
|
use phase2::circom_circuit::CircomCircuit;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: Vec<String> = std::env::args().collect();
|
let args: Vec<String> = std::env::args().collect();
|
||||||
if args.len() != 4 {
|
if args.len() != 4 {
|
||||||
@ -19,21 +22,19 @@ fn main() {
|
|||||||
.read(true)
|
.read(true)
|
||||||
.open(old_params_filename)
|
.open(old_params_filename)
|
||||||
.expect("unable to open old params");
|
.expect("unable to open old params");
|
||||||
let old_params = phase2::MPCParameters::read(old_reader, disallow_points_at_infinity, true).expect("unable to read old params");
|
let old_params = MPCParameters::read(old_reader, disallow_points_at_infinity, true).expect("unable to read old params");
|
||||||
|
|
||||||
let new_reader = OpenOptions::new()
|
let new_reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(new_params_filename)
|
.open(new_params_filename)
|
||||||
.expect("unable to open new params");
|
.expect("unable to open new params");
|
||||||
let new_params = phase2::MPCParameters::read(new_reader, disallow_points_at_infinity, true).expect("unable to read new params");
|
let new_params = MPCParameters::read(new_reader, disallow_points_at_infinity, true).expect("unable to read new params");
|
||||||
|
|
||||||
println!("Checking contribution {}...", new_params_filename);
|
println!("Checking contribution {}...", new_params_filename);
|
||||||
let contribution = phase2::verify_contribution(&old_params, &new_params).expect("should verify");
|
let contribution = verify_contribution(&old_params, &new_params).expect("should verify");
|
||||||
|
|
||||||
let should_filter_points_at_infinity = false;
|
let should_filter_points_at_infinity = false;
|
||||||
let verification_result = new_params.verify(phase2::CircomCircuit {
|
let verification_result = new_params.verify(CircomCircuit::from_json_file(&circuit_filename), should_filter_points_at_infinity).unwrap();
|
||||||
file_name: &circuit_filename,
|
assert!(contains_contribution(&verification_result, &contribution));
|
||||||
}, should_filter_points_at_infinity).unwrap();
|
|
||||||
assert!(phase2::contains_contribution(&verification_result, &contribution));
|
|
||||||
println!("Contribution {} verified.", new_params_filename);
|
println!("Contribution {} verified.", new_params_filename);
|
||||||
}
|
}
|
||||||
|
149
phase2/src/circom_circuit.rs
Normal file
149
phase2/src/circom_circuit.rs
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
#![allow(unused_imports)]
|
||||||
|
|
||||||
|
extern crate bellman_ce;
|
||||||
|
|
||||||
|
use std::str;
|
||||||
|
use std::fs;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use itertools::Itertools;
|
||||||
|
use std::io::{
|
||||||
|
Read,
|
||||||
|
Write,
|
||||||
|
};
|
||||||
|
|
||||||
|
use bellman_ce::pairing::{
|
||||||
|
Engine,
|
||||||
|
ff::{
|
||||||
|
PrimeField,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use bellman_ce::{
|
||||||
|
Circuit,
|
||||||
|
SynthesisError,
|
||||||
|
Variable,
|
||||||
|
Index,
|
||||||
|
ConstraintSystem,
|
||||||
|
LinearCombination,
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct CircuitJson {
|
||||||
|
pub constraints: Vec<Vec<BTreeMap<String, String>>>,
|
||||||
|
#[serde(rename = "nPubInputs")]
|
||||||
|
pub num_inputs: usize,
|
||||||
|
#[serde(rename = "nOutputs")]
|
||||||
|
pub num_outputs: usize,
|
||||||
|
#[serde(rename = "nVars")]
|
||||||
|
pub num_variables: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct CircomCircuit<E: Engine> {
|
||||||
|
pub num_inputs: usize,
|
||||||
|
pub num_aux: usize,
|
||||||
|
pub num_constraints: usize,
|
||||||
|
pub inputs: Vec<E::Fr>,
|
||||||
|
pub aux: Vec<E::Fr>,
|
||||||
|
pub constraints: Vec<(
|
||||||
|
Vec<(usize, E::Fr)>,
|
||||||
|
Vec<(usize, E::Fr)>,
|
||||||
|
Vec<(usize, E::Fr)>,
|
||||||
|
)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, E: Engine> CircomCircuit<E> {
|
||||||
|
pub fn load_witness_json_file(&mut self, filename: &str) {
|
||||||
|
let reader = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.open(filename)
|
||||||
|
.expect("unable to open.");
|
||||||
|
self.load_witness_json(reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_witness_json<R: Read>(&mut self, reader: R) {
|
||||||
|
let witness: Vec<String> = serde_json::from_reader(reader).unwrap();
|
||||||
|
let witness = witness.into_iter().map(|x| E::Fr::from_str(&x).unwrap()).collect::<Vec<E::Fr>>();
|
||||||
|
self.inputs = witness[..self.num_inputs].to_vec();
|
||||||
|
self.aux = witness[self.num_inputs..].to_vec();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_json_file(filename: &str) -> CircomCircuit::<E> {
|
||||||
|
let reader = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.open(filename)
|
||||||
|
.expect("unable to open.");
|
||||||
|
return CircomCircuit::from_json(reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_json<R: Read>(reader: R) -> CircomCircuit::<E> {
|
||||||
|
let circuit_json: CircuitJson = serde_json::from_reader(reader).unwrap();
|
||||||
|
|
||||||
|
let num_inputs = circuit_json.num_inputs + circuit_json.num_outputs + 1;
|
||||||
|
let num_aux = circuit_json.num_variables - num_inputs;
|
||||||
|
|
||||||
|
let convert_constraint = |lc: &BTreeMap<String, String>| {
|
||||||
|
lc.iter().map(|(index, coeff)| (index.parse().unwrap(), E::Fr::from_str(coeff).unwrap())).collect_vec()
|
||||||
|
};
|
||||||
|
|
||||||
|
let constraints = circuit_json.constraints.iter().map(
|
||||||
|
|c| (convert_constraint(&c[0]), convert_constraint(&c[1]), convert_constraint(&c[2]))
|
||||||
|
).collect_vec();
|
||||||
|
|
||||||
|
return CircomCircuit {
|
||||||
|
num_inputs: num_inputs,
|
||||||
|
num_aux: num_aux,
|
||||||
|
num_constraints: circuit_json.num_variables,
|
||||||
|
inputs: vec![],
|
||||||
|
aux: vec![],
|
||||||
|
constraints: constraints,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Our demo circuit implements this `Circuit` trait which
|
||||||
|
/// is used during paramgen and proving in order to
|
||||||
|
/// synthesize the constraint system.
|
||||||
|
impl<'a, E: Engine> Circuit<E> for CircomCircuit<E> {
|
||||||
|
fn synthesize<CS: ConstraintSystem<E>>(
|
||||||
|
self,
|
||||||
|
cs: &mut CS
|
||||||
|
) -> Result<(), SynthesisError>
|
||||||
|
{
|
||||||
|
for i in 1..self.num_inputs {
|
||||||
|
cs.alloc_input(|| format!("variable {}", i),
|
||||||
|
|| {
|
||||||
|
Ok(if self.inputs.len() > 0 { self.inputs[i] } else { E::Fr::from_str("1").unwrap() })
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in 0..self.num_aux {
|
||||||
|
cs.alloc(|| format!("aux {}", i),
|
||||||
|
|| {
|
||||||
|
Ok(if self.aux.len() > 0 { self.aux[i] } else { E::Fr::from_str("1").unwrap() })
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let make_index = |index|
|
||||||
|
if index < self.num_inputs {
|
||||||
|
Index::Input(index)
|
||||||
|
} else {
|
||||||
|
Index::Aux(index - self.num_inputs)
|
||||||
|
};
|
||||||
|
let make_lc = |lc_data: Vec<(usize, E::Fr)>|
|
||||||
|
lc_data.iter().fold(
|
||||||
|
LinearCombination::<E>::zero(),
|
||||||
|
|lc: LinearCombination<E>, (index, coeff)| lc + (*coeff, Variable::new_unchecked(make_index(*index)))
|
||||||
|
);
|
||||||
|
for (i, constraint) in self.constraints.iter().enumerate() {
|
||||||
|
cs.enforce(|| format!("constraint {}", i),
|
||||||
|
|_| make_lc(constraint.0.clone()),
|
||||||
|
|_| make_lc(constraint.1.clone()),
|
||||||
|
|_| make_lc(constraint.2.clone()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
53
phase2/src/hash_writer.rs
Normal file
53
phase2/src/hash_writer.rs
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
extern crate blake2_rfc;
|
||||||
|
|
||||||
|
use std::io;
|
||||||
|
use std::io::Write;
|
||||||
|
use blake2_rfc::blake2b::Blake2b;
|
||||||
|
|
||||||
|
/// Abstraction over a writer which hashes the data being written.
|
||||||
|
pub struct HashWriter<W: Write> {
|
||||||
|
writer: W,
|
||||||
|
hasher: Blake2b
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for HashWriter<io::Sink> {
|
||||||
|
fn clone(&self) -> HashWriter<io::Sink> {
|
||||||
|
HashWriter {
|
||||||
|
writer: io::sink(),
|
||||||
|
hasher: self.hasher.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<W: Write> HashWriter<W> {
|
||||||
|
/// Construct a new `HashWriter` given an existing `writer` by value.
|
||||||
|
pub fn new(writer: W) -> Self {
|
||||||
|
HashWriter {
|
||||||
|
writer: writer,
|
||||||
|
hasher: Blake2b::new(64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Destroy this writer and return the hash of what was written.
|
||||||
|
pub fn into_hash(self) -> [u8; 64] {
|
||||||
|
let mut tmp = [0u8; 64];
|
||||||
|
tmp.copy_from_slice(self.hasher.finalize().as_ref());
|
||||||
|
tmp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<W: Write> Write for HashWriter<W> {
|
||||||
|
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||||
|
let bytes = self.writer.write(buf)?;
|
||||||
|
|
||||||
|
if bytes > 0 {
|
||||||
|
self.hasher.update(&buf[0..bytes]);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self) -> io::Result<()> {
|
||||||
|
self.writer.flush()
|
||||||
|
}
|
||||||
|
}
|
116
phase2/src/keypair.rs
Normal file
116
phase2/src/keypair.rs
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
extern crate bellman_ce;
|
||||||
|
|
||||||
|
use std::io::{
|
||||||
|
self,
|
||||||
|
Read,
|
||||||
|
Write,
|
||||||
|
};
|
||||||
|
|
||||||
|
use bellman_ce::pairing::{
|
||||||
|
EncodedPoint,
|
||||||
|
CurveAffine,
|
||||||
|
bn256::{
|
||||||
|
Fr,
|
||||||
|
G1Affine,
|
||||||
|
G1Uncompressed,
|
||||||
|
G2Affine,
|
||||||
|
G2Uncompressed
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/// This needs to be destroyed by at least one participant
|
||||||
|
/// for the final parameters to be secure.
|
||||||
|
pub struct PrivateKey {
|
||||||
|
pub delta: Fr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This allows others to verify that you contributed. The hash produced
|
||||||
|
/// by `MPCParameters::contribute` is just a BLAKE2b hash of this object.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct PublicKey {
|
||||||
|
/// This is the delta (in G1) after the transformation, kept so that we
|
||||||
|
/// can check correctness of the public keys without having the entire
|
||||||
|
/// interstitial parameters for each contribution.
|
||||||
|
pub delta_after: G1Affine,
|
||||||
|
|
||||||
|
/// Random element chosen by the contributor.
|
||||||
|
pub s: G1Affine,
|
||||||
|
|
||||||
|
/// That element, taken to the contributor's secret delta.
|
||||||
|
pub s_delta: G1Affine,
|
||||||
|
|
||||||
|
/// r is H(last_pubkey | s | s_delta), r_delta proves knowledge of delta
|
||||||
|
pub r_delta: G2Affine,
|
||||||
|
|
||||||
|
/// Hash of the transcript (used for mapping to r)
|
||||||
|
pub transcript: [u8; 64],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PublicKey {
|
||||||
|
pub fn write<W: Write>(
|
||||||
|
&self,
|
||||||
|
mut writer: W
|
||||||
|
) -> io::Result<()>
|
||||||
|
{
|
||||||
|
writer.write_all(self.delta_after.into_uncompressed().as_ref())?;
|
||||||
|
writer.write_all(self.s.into_uncompressed().as_ref())?;
|
||||||
|
writer.write_all(self.s_delta.into_uncompressed().as_ref())?;
|
||||||
|
writer.write_all(self.r_delta.into_uncompressed().as_ref())?;
|
||||||
|
writer.write_all(&self.transcript)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read<R: Read>(
|
||||||
|
mut reader: R
|
||||||
|
) -> io::Result<PublicKey>
|
||||||
|
{
|
||||||
|
let mut g1_repr = G1Uncompressed::empty();
|
||||||
|
let mut g2_repr = G2Uncompressed::empty();
|
||||||
|
|
||||||
|
reader.read_exact(g1_repr.as_mut())?;
|
||||||
|
let delta_after = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
|
||||||
|
|
||||||
|
if delta_after.is_zero() {
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"));
|
||||||
|
}
|
||||||
|
|
||||||
|
reader.read_exact(g1_repr.as_mut())?;
|
||||||
|
let s = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
|
||||||
|
|
||||||
|
if s.is_zero() {
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"));
|
||||||
|
}
|
||||||
|
|
||||||
|
reader.read_exact(g1_repr.as_mut())?;
|
||||||
|
let s_delta = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
|
||||||
|
|
||||||
|
if s_delta.is_zero() {
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"));
|
||||||
|
}
|
||||||
|
|
||||||
|
reader.read_exact(g2_repr.as_mut())?;
|
||||||
|
let r_delta = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
|
||||||
|
|
||||||
|
if r_delta.is_zero() {
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut transcript = [0u8; 64];
|
||||||
|
reader.read_exact(&mut transcript)?;
|
||||||
|
|
||||||
|
Ok(PublicKey {
|
||||||
|
delta_after, s, s_delta, r_delta, transcript
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for PublicKey {
|
||||||
|
fn eq(&self, other: &PublicKey) -> bool {
|
||||||
|
self.delta_after == other.delta_after &&
|
||||||
|
self.s == other.s &&
|
||||||
|
self.s_delta == other.s_delta &&
|
||||||
|
self.r_delta == other.r_delta &&
|
||||||
|
&self.transcript[..] == &other.transcript[..]
|
||||||
|
}
|
||||||
|
}
|
118
phase2/src/keypair_assembly.rs
Normal file
118
phase2/src/keypair_assembly.rs
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
extern crate bellman_ce;
|
||||||
|
|
||||||
|
use bellman_ce::pairing::Engine;
|
||||||
|
use bellman_ce::{
|
||||||
|
SynthesisError,
|
||||||
|
Variable,
|
||||||
|
Index,
|
||||||
|
ConstraintSystem,
|
||||||
|
LinearCombination,
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/// This is our assembly structure that we'll use to synthesize the
|
||||||
|
/// circuit into a QAP.
|
||||||
|
pub struct KeypairAssembly<E: Engine> {
|
||||||
|
pub num_inputs: usize,
|
||||||
|
pub num_aux: usize,
|
||||||
|
pub num_constraints: usize,
|
||||||
|
pub at_inputs: Vec<Vec<(E::Fr, usize)>>,
|
||||||
|
pub bt_inputs: Vec<Vec<(E::Fr, usize)>>,
|
||||||
|
pub ct_inputs: Vec<Vec<(E::Fr, usize)>>,
|
||||||
|
pub at_aux: Vec<Vec<(E::Fr, usize)>>,
|
||||||
|
pub bt_aux: Vec<Vec<(E::Fr, usize)>>,
|
||||||
|
pub ct_aux: Vec<Vec<(E::Fr, usize)>>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
|
||||||
|
type Root = Self;
|
||||||
|
|
||||||
|
fn alloc<F, A, AR>(
|
||||||
|
&mut self,
|
||||||
|
_: A,
|
||||||
|
_: F
|
||||||
|
) -> Result<Variable, SynthesisError>
|
||||||
|
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
|
||||||
|
{
|
||||||
|
// There is no assignment, so we don't even invoke the
|
||||||
|
// function for obtaining one.
|
||||||
|
|
||||||
|
let index = self.num_aux;
|
||||||
|
self.num_aux += 1;
|
||||||
|
|
||||||
|
self.at_aux.push(vec![]);
|
||||||
|
self.bt_aux.push(vec![]);
|
||||||
|
self.ct_aux.push(vec![]);
|
||||||
|
|
||||||
|
Ok(Variable::new_unchecked(Index::Aux(index)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn alloc_input<F, A, AR>(
|
||||||
|
&mut self,
|
||||||
|
_: A,
|
||||||
|
_: F
|
||||||
|
) -> Result<Variable, SynthesisError>
|
||||||
|
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
|
||||||
|
{
|
||||||
|
// There is no assignment, so we don't even invoke the
|
||||||
|
// function for obtaining one.
|
||||||
|
|
||||||
|
let index = self.num_inputs;
|
||||||
|
self.num_inputs += 1;
|
||||||
|
|
||||||
|
self.at_inputs.push(vec![]);
|
||||||
|
self.bt_inputs.push(vec![]);
|
||||||
|
self.ct_inputs.push(vec![]);
|
||||||
|
|
||||||
|
Ok(Variable::new_unchecked(Index::Input(index)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enforce<A, AR, LA, LB, LC>(
|
||||||
|
&mut self,
|
||||||
|
_: A,
|
||||||
|
a: LA,
|
||||||
|
b: LB,
|
||||||
|
c: LC
|
||||||
|
)
|
||||||
|
where A: FnOnce() -> AR, AR: Into<String>,
|
||||||
|
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
|
||||||
|
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
|
||||||
|
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
|
||||||
|
{
|
||||||
|
fn eval<E: Engine>(
|
||||||
|
l: LinearCombination<E>,
|
||||||
|
inputs: &mut [Vec<(E::Fr, usize)>],
|
||||||
|
aux: &mut [Vec<(E::Fr, usize)>],
|
||||||
|
this_constraint: usize
|
||||||
|
)
|
||||||
|
{
|
||||||
|
for &(var, coeff) in l.as_ref() {
|
||||||
|
match var.get_unchecked() {
|
||||||
|
Index::Input(id) => inputs[id].push((coeff, this_constraint)),
|
||||||
|
Index::Aux(id) => aux[id].push((coeff, this_constraint))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
eval(a(LinearCombination::zero()), &mut self.at_inputs, &mut self.at_aux, self.num_constraints);
|
||||||
|
eval(b(LinearCombination::zero()), &mut self.bt_inputs, &mut self.bt_aux, self.num_constraints);
|
||||||
|
eval(c(LinearCombination::zero()), &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints);
|
||||||
|
|
||||||
|
self.num_constraints += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn push_namespace<NR, N>(&mut self, _: N)
|
||||||
|
where NR: Into<String>, N: FnOnce() -> NR
|
||||||
|
{
|
||||||
|
// Do nothing; we don't care about namespaces in this context.
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pop_namespace(&mut self)
|
||||||
|
{
|
||||||
|
// Do nothing; we don't care about namespaces in this context.
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_root(&mut self) -> &mut Self::Root {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
1516
phase2/src/lib.rs
1516
phase2/src/lib.rs
File diff suppressed because it is too large
Load Diff
903
phase2/src/parameters.rs
Normal file
903
phase2/src/parameters.rs
Normal file
@ -0,0 +1,903 @@
|
|||||||
|
extern crate bellman_ce;
|
||||||
|
extern crate rand;
|
||||||
|
extern crate byteorder;
|
||||||
|
extern crate num_cpus;
|
||||||
|
extern crate crossbeam;
|
||||||
|
|
||||||
|
#[cfg(feature = "wasm")]
|
||||||
|
use bellman_ce::singlecore::Worker;
|
||||||
|
#[cfg(not(feature = "wasm"))]
|
||||||
|
use bellman_ce::multicore::Worker;
|
||||||
|
|
||||||
|
use byteorder::{
|
||||||
|
BigEndian,
|
||||||
|
ReadBytesExt,
|
||||||
|
WriteBytesExt
|
||||||
|
};
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
io::{
|
||||||
|
self,
|
||||||
|
Read,
|
||||||
|
Write,
|
||||||
|
BufReader
|
||||||
|
},
|
||||||
|
fs::{
|
||||||
|
File
|
||||||
|
},
|
||||||
|
sync::{
|
||||||
|
Arc
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
use bellman_ce::pairing::{
|
||||||
|
ff::{
|
||||||
|
PrimeField,
|
||||||
|
Field,
|
||||||
|
},
|
||||||
|
EncodedPoint,
|
||||||
|
CurveAffine,
|
||||||
|
CurveProjective,
|
||||||
|
Wnaf,
|
||||||
|
bn256::{
|
||||||
|
Bn256,
|
||||||
|
Fr,
|
||||||
|
G1,
|
||||||
|
G2,
|
||||||
|
G1Affine,
|
||||||
|
G1Uncompressed,
|
||||||
|
G2Affine,
|
||||||
|
G2Uncompressed
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
use bellman_ce::{
|
||||||
|
Circuit,
|
||||||
|
SynthesisError,
|
||||||
|
Variable,
|
||||||
|
Index,
|
||||||
|
ConstraintSystem,
|
||||||
|
groth16::{
|
||||||
|
Parameters,
|
||||||
|
VerifyingKey
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use rand::{
|
||||||
|
Rng,
|
||||||
|
Rand,
|
||||||
|
ChaChaRng,
|
||||||
|
SeedableRng
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::hash_writer::*;
|
||||||
|
use super::keypair_assembly::*;
|
||||||
|
use super::keypair::*;
|
||||||
|
use super::utils::*;
|
||||||
|
|
||||||
|
/// MPC parameters are just like bellman `Parameters` except, when serialized,
|
||||||
|
/// they contain a transcript of contributions at the end, which can be verified.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct MPCParameters {
|
||||||
|
params: Parameters<Bn256>,
|
||||||
|
cs_hash: [u8; 64],
|
||||||
|
contributions: Vec<PublicKey>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for MPCParameters {
|
||||||
|
fn eq(&self, other: &MPCParameters) -> bool {
|
||||||
|
self.params == other.params &&
|
||||||
|
&self.cs_hash[..] == &other.cs_hash[..] &&
|
||||||
|
self.contributions == other.contributions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MPCParameters {
|
||||||
|
/// Create new Groth16 parameters (compatible with bellman) for a
|
||||||
|
/// given circuit. The resulting parameters are unsafe to use
|
||||||
|
/// until there are contributions (see `contribute()`).
|
||||||
|
pub fn new<C>(
|
||||||
|
circuit: C,
|
||||||
|
should_filter_points_at_infinity: bool,
|
||||||
|
) -> Result<MPCParameters, SynthesisError>
|
||||||
|
where C: Circuit<Bn256>
|
||||||
|
{
|
||||||
|
let mut assembly = KeypairAssembly {
|
||||||
|
num_inputs: 0,
|
||||||
|
num_aux: 0,
|
||||||
|
num_constraints: 0,
|
||||||
|
at_inputs: vec![],
|
||||||
|
bt_inputs: vec![],
|
||||||
|
ct_inputs: vec![],
|
||||||
|
at_aux: vec![],
|
||||||
|
bt_aux: vec![],
|
||||||
|
ct_aux: vec![]
|
||||||
|
};
|
||||||
|
|
||||||
|
// Allocate the "one" input variable
|
||||||
|
assembly.alloc_input(|| "", || Ok(Fr::one()))?;
|
||||||
|
|
||||||
|
// Synthesize the circuit.
|
||||||
|
circuit.synthesize(&mut assembly)?;
|
||||||
|
|
||||||
|
// Input constraints to ensure full density of IC query
|
||||||
|
// x * 0 = 0
|
||||||
|
for i in 0..assembly.num_inputs {
|
||||||
|
assembly.enforce(|| "",
|
||||||
|
|lc| lc + Variable::new_unchecked(Index::Input(i)),
|
||||||
|
|lc| lc,
|
||||||
|
|lc| lc,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the size of our evaluation domain
|
||||||
|
let mut m = 1;
|
||||||
|
let mut exp = 0;
|
||||||
|
while m < assembly.num_constraints {
|
||||||
|
m *= 2;
|
||||||
|
exp += 1;
|
||||||
|
|
||||||
|
// Powers of Tau ceremony can't support more than 2^28
|
||||||
|
if exp > 28 {
|
||||||
|
return Err(SynthesisError::PolynomialDegreeTooLarge)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to load "phase1radix2m{}"
|
||||||
|
let f = match File::open(format!("phase1radix2m{}", exp)) {
|
||||||
|
Ok(f) => f,
|
||||||
|
Err(e) => {
|
||||||
|
panic!("Couldn't load phase1radix2m{}: {:?}", exp, e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let f = &mut BufReader::with_capacity(1024 * 1024, f);
|
||||||
|
|
||||||
|
let read_g1 = |reader: &mut BufReader<File>| -> io::Result<G1Affine> {
|
||||||
|
let mut repr = G1Uncompressed::empty();
|
||||||
|
reader.read_exact(repr.as_mut())?;
|
||||||
|
|
||||||
|
repr.into_affine_unchecked()
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
|
||||||
|
.and_then(|e| if e.is_zero() {
|
||||||
|
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
|
||||||
|
} else {
|
||||||
|
Ok(e)
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
let read_g2 = |reader: &mut BufReader<File>| -> io::Result<G2Affine> {
|
||||||
|
let mut repr = G2Uncompressed::empty();
|
||||||
|
reader.read_exact(repr.as_mut())?;
|
||||||
|
|
||||||
|
repr.into_affine_unchecked()
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
|
||||||
|
.and_then(|e| if e.is_zero() {
|
||||||
|
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
|
||||||
|
} else {
|
||||||
|
Ok(e)
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
let alpha = read_g1(f)?;
|
||||||
|
let beta_g1 = read_g1(f)?;
|
||||||
|
let beta_g2 = read_g2(f)?;
|
||||||
|
|
||||||
|
let mut coeffs_g1 = Vec::with_capacity(m);
|
||||||
|
for _ in 0..m {
|
||||||
|
coeffs_g1.push(read_g1(f)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut coeffs_g2 = Vec::with_capacity(m);
|
||||||
|
for _ in 0..m {
|
||||||
|
coeffs_g2.push(read_g2(f)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut alpha_coeffs_g1 = Vec::with_capacity(m);
|
||||||
|
for _ in 0..m {
|
||||||
|
alpha_coeffs_g1.push(read_g1(f)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut beta_coeffs_g1 = Vec::with_capacity(m);
|
||||||
|
for _ in 0..m {
|
||||||
|
beta_coeffs_g1.push(read_g1(f)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are `Arc` so that later it'll be easier
|
||||||
|
// to use multiexp during QAP evaluation (which
|
||||||
|
// requires a futures-based API)
|
||||||
|
let coeffs_g1 = Arc::new(coeffs_g1);
|
||||||
|
let coeffs_g2 = Arc::new(coeffs_g2);
|
||||||
|
let alpha_coeffs_g1 = Arc::new(alpha_coeffs_g1);
|
||||||
|
let beta_coeffs_g1 = Arc::new(beta_coeffs_g1);
|
||||||
|
|
||||||
|
let mut h = Vec::with_capacity(m-1);
|
||||||
|
for _ in 0..m-1 {
|
||||||
|
h.push(read_g1(f)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut ic = vec![G1::zero(); assembly.num_inputs];
|
||||||
|
let mut l = vec![G1::zero(); assembly.num_aux];
|
||||||
|
let mut a_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux];
|
||||||
|
let mut b_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux];
|
||||||
|
let mut b_g2 = vec![G2::zero(); assembly.num_inputs + assembly.num_aux];
|
||||||
|
|
||||||
|
fn eval(
|
||||||
|
// Lagrange coefficients for tau
|
||||||
|
coeffs_g1: Arc<Vec<G1Affine>>,
|
||||||
|
coeffs_g2: Arc<Vec<G2Affine>>,
|
||||||
|
alpha_coeffs_g1: Arc<Vec<G1Affine>>,
|
||||||
|
beta_coeffs_g1: Arc<Vec<G1Affine>>,
|
||||||
|
|
||||||
|
// QAP polynomials
|
||||||
|
at: &[Vec<(Fr, usize)>],
|
||||||
|
bt: &[Vec<(Fr, usize)>],
|
||||||
|
ct: &[Vec<(Fr, usize)>],
|
||||||
|
|
||||||
|
// Resulting evaluated QAP polynomials
|
||||||
|
a_g1: &mut [G1],
|
||||||
|
b_g1: &mut [G1],
|
||||||
|
b_g2: &mut [G2],
|
||||||
|
ext: &mut [G1],
|
||||||
|
|
||||||
|
// Worker
|
||||||
|
worker: &Worker
|
||||||
|
)
|
||||||
|
{
|
||||||
|
// Sanity check
|
||||||
|
assert_eq!(a_g1.len(), at.len());
|
||||||
|
assert_eq!(a_g1.len(), bt.len());
|
||||||
|
assert_eq!(a_g1.len(), ct.len());
|
||||||
|
assert_eq!(a_g1.len(), b_g1.len());
|
||||||
|
assert_eq!(a_g1.len(), b_g2.len());
|
||||||
|
assert_eq!(a_g1.len(), ext.len());
|
||||||
|
|
||||||
|
// Evaluate polynomials in multiple threads
|
||||||
|
worker.scope(a_g1.len(), |scope, chunk| {
|
||||||
|
for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in
|
||||||
|
a_g1.chunks_mut(chunk)
|
||||||
|
.zip(b_g1.chunks_mut(chunk))
|
||||||
|
.zip(b_g2.chunks_mut(chunk))
|
||||||
|
.zip(ext.chunks_mut(chunk))
|
||||||
|
.zip(at.chunks(chunk))
|
||||||
|
.zip(bt.chunks(chunk))
|
||||||
|
.zip(ct.chunks(chunk))
|
||||||
|
{
|
||||||
|
let coeffs_g1 = coeffs_g1.clone();
|
||||||
|
let coeffs_g2 = coeffs_g2.clone();
|
||||||
|
let alpha_coeffs_g1 = alpha_coeffs_g1.clone();
|
||||||
|
let beta_coeffs_g1 = beta_coeffs_g1.clone();
|
||||||
|
|
||||||
|
scope.spawn(move |_| {
|
||||||
|
for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in
|
||||||
|
a_g1.iter_mut()
|
||||||
|
.zip(b_g1.iter_mut())
|
||||||
|
.zip(b_g2.iter_mut())
|
||||||
|
.zip(ext.iter_mut())
|
||||||
|
.zip(at.iter())
|
||||||
|
.zip(bt.iter())
|
||||||
|
.zip(ct.iter())
|
||||||
|
{
|
||||||
|
for &(coeff, lag) in at {
|
||||||
|
a_g1.add_assign(&coeffs_g1[lag].mul(coeff));
|
||||||
|
ext.add_assign(&beta_coeffs_g1[lag].mul(coeff));
|
||||||
|
}
|
||||||
|
|
||||||
|
for &(coeff, lag) in bt {
|
||||||
|
b_g1.add_assign(&coeffs_g1[lag].mul(coeff));
|
||||||
|
b_g2.add_assign(&coeffs_g2[lag].mul(coeff));
|
||||||
|
ext.add_assign(&alpha_coeffs_g1[lag].mul(coeff));
|
||||||
|
}
|
||||||
|
|
||||||
|
for &(coeff, lag) in ct {
|
||||||
|
ext.add_assign(&coeffs_g1[lag].mul(coeff));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch normalize
|
||||||
|
G1::batch_normalization(a_g1);
|
||||||
|
G1::batch_normalization(b_g1);
|
||||||
|
G2::batch_normalization(b_g2);
|
||||||
|
G1::batch_normalization(ext);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let worker = Worker::new();
|
||||||
|
|
||||||
|
// Evaluate for inputs.
|
||||||
|
eval(
|
||||||
|
coeffs_g1.clone(),
|
||||||
|
coeffs_g2.clone(),
|
||||||
|
alpha_coeffs_g1.clone(),
|
||||||
|
beta_coeffs_g1.clone(),
|
||||||
|
&assembly.at_inputs,
|
||||||
|
&assembly.bt_inputs,
|
||||||
|
&assembly.ct_inputs,
|
||||||
|
&mut a_g1[0..assembly.num_inputs],
|
||||||
|
&mut b_g1[0..assembly.num_inputs],
|
||||||
|
&mut b_g2[0..assembly.num_inputs],
|
||||||
|
&mut ic,
|
||||||
|
&worker
|
||||||
|
);
|
||||||
|
|
||||||
|
// Evaluate for auxillary variables.
|
||||||
|
eval(
|
||||||
|
coeffs_g1.clone(),
|
||||||
|
coeffs_g2.clone(),
|
||||||
|
alpha_coeffs_g1.clone(),
|
||||||
|
beta_coeffs_g1.clone(),
|
||||||
|
&assembly.at_aux,
|
||||||
|
&assembly.bt_aux,
|
||||||
|
&assembly.ct_aux,
|
||||||
|
&mut a_g1[assembly.num_inputs..],
|
||||||
|
&mut b_g1[assembly.num_inputs..],
|
||||||
|
&mut b_g2[assembly.num_inputs..],
|
||||||
|
&mut l,
|
||||||
|
&worker
|
||||||
|
);
|
||||||
|
|
||||||
|
// Don't allow any elements be unconstrained, so that
|
||||||
|
// the L query is always fully dense.
|
||||||
|
for e in l.iter() {
|
||||||
|
if e.is_zero() {
|
||||||
|
return Err(SynthesisError::UnconstrainedVariable);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let vk = VerifyingKey {
|
||||||
|
alpha_g1: alpha,
|
||||||
|
beta_g1: beta_g1,
|
||||||
|
beta_g2: beta_g2,
|
||||||
|
gamma_g2: G2Affine::one(),
|
||||||
|
delta_g1: G1Affine::one(),
|
||||||
|
delta_g2: G2Affine::one(),
|
||||||
|
ic: ic.into_iter().map(|e| e.into_affine()).collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
let params = if should_filter_points_at_infinity {
|
||||||
|
Parameters {
|
||||||
|
vk: vk,
|
||||||
|
h: Arc::new(h),
|
||||||
|
l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
|
||||||
|
|
||||||
|
// Filter points at infinity away from A/B queries
|
||||||
|
a: Arc::new(a_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
|
||||||
|
b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
|
||||||
|
b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Parameters {
|
||||||
|
vk: vk,
|
||||||
|
h: Arc::new(h),
|
||||||
|
l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
|
||||||
|
a: Arc::new(a_g1.into_iter().map(|e| e.into_affine()).collect()),
|
||||||
|
b_g1: Arc::new(b_g1.into_iter().map(|e| e.into_affine()).collect()),
|
||||||
|
b_g2: Arc::new(b_g2.into_iter().map(|e| e.into_affine()).collect())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let h = {
|
||||||
|
let sink = io::sink();
|
||||||
|
let mut sink = HashWriter::new(sink);
|
||||||
|
|
||||||
|
params.write(&mut sink).unwrap();
|
||||||
|
|
||||||
|
sink.into_hash()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut cs_hash = [0; 64];
|
||||||
|
cs_hash.copy_from_slice(h.as_ref());
|
||||||
|
|
||||||
|
Ok(MPCParameters {
|
||||||
|
params: params,
|
||||||
|
cs_hash: cs_hash,
|
||||||
|
contributions: vec![]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the underlying Groth16 `Parameters`
|
||||||
|
pub fn get_params(&self) -> &Parameters<Bn256> {
|
||||||
|
&self.params
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn filter_params(&mut self) {
|
||||||
|
self.params.vk.ic = self.params.vk.ic.clone().into_iter().filter(|x| !x.is_zero()).collect::<Vec<_>>();
|
||||||
|
self.params.h = Arc::new((*self.params.h).clone().into_iter().filter(|x| !x.is_zero()).collect::<Vec<_>>());
|
||||||
|
self.params.a = Arc::new((*self.params.a).clone().into_iter().filter(|x| !x.is_zero()).collect::<Vec<_>>());
|
||||||
|
self.params.b_g1 = Arc::new((*self.params.b_g1).clone().into_iter().filter(|x| !x.is_zero()).collect::<Vec<_>>());
|
||||||
|
self.params.b_g2 = Arc::new((*self.params.b_g2).clone().into_iter().filter(|x| !x.is_zero()).collect::<Vec<_>>());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Contributes some randomness to the parameters. Only one
|
||||||
|
/// contributor needs to be honest for the parameters to be
|
||||||
|
/// secure.
|
||||||
|
///
|
||||||
|
/// This function returns a "hash" that is bound to the
|
||||||
|
/// contribution. Contributors can use this hash to make
|
||||||
|
/// sure their contribution is in the final parameters, by
|
||||||
|
/// checking to see if it appears in the output of
|
||||||
|
/// `MPCParameters::verify`.
|
||||||
|
pub fn contribute<R: Rng>(
|
||||||
|
&mut self,
|
||||||
|
rng: &mut R
|
||||||
|
) -> [u8; 64]
|
||||||
|
{
|
||||||
|
// Generate a keypair
|
||||||
|
let (pubkey, privkey) = keypair(rng, self);
|
||||||
|
|
||||||
|
#[cfg(not(feature = "wasm"))]
|
||||||
|
fn batch_exp<C: CurveAffine>(bases: &mut [C], coeff: C::Scalar) {
|
||||||
|
let coeff = coeff.into_repr();
|
||||||
|
|
||||||
|
let mut projective = vec![C::Projective::zero(); bases.len()];
|
||||||
|
let cpus = num_cpus::get();
|
||||||
|
let chunk_size = if bases.len() < cpus {
|
||||||
|
1
|
||||||
|
} else {
|
||||||
|
bases.len() / cpus
|
||||||
|
};
|
||||||
|
|
||||||
|
// Perform wNAF over multiple cores, placing results into `projective`.
|
||||||
|
crossbeam::scope(|scope| {
|
||||||
|
for (bases, projective) in bases.chunks_mut(chunk_size)
|
||||||
|
.zip(projective.chunks_mut(chunk_size))
|
||||||
|
{
|
||||||
|
scope.spawn(move || {
|
||||||
|
let mut wnaf = Wnaf::new();
|
||||||
|
|
||||||
|
for (base, projective) in bases.iter_mut()
|
||||||
|
.zip(projective.iter_mut())
|
||||||
|
{
|
||||||
|
*projective = wnaf.base(base.into_projective(), 1).scalar(coeff);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Perform batch normalization
|
||||||
|
crossbeam::scope(|scope| {
|
||||||
|
for projective in projective.chunks_mut(chunk_size)
|
||||||
|
{
|
||||||
|
scope.spawn(move || {
|
||||||
|
C::Projective::batch_normalization(projective);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Turn it all back into affine points
|
||||||
|
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
||||||
|
*affine = projective.into_affine();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "wasm")]
|
||||||
|
fn batch_exp<C: CurveAffine>(bases: &mut [C], coeff: C::Scalar) {
|
||||||
|
let coeff = coeff.into_repr();
|
||||||
|
|
||||||
|
let mut projective = vec![C::Projective::zero(); bases.len()];
|
||||||
|
|
||||||
|
// Perform wNAF, placing results into `projective`.
|
||||||
|
let mut wnaf = Wnaf::new();
|
||||||
|
for (base, projective) in bases.iter_mut().zip(projective.iter_mut()) {
|
||||||
|
*projective = wnaf.base(base.into_projective(), 1).scalar(coeff);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform batch normalization
|
||||||
|
C::Projective::batch_normalization(&mut projective);
|
||||||
|
|
||||||
|
// Turn it all back into affine points
|
||||||
|
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
||||||
|
*affine = projective.into_affine();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let delta_inv = privkey.delta.inverse().expect("nonzero");
|
||||||
|
let mut l = (&self.params.l[..]).to_vec();
|
||||||
|
let mut h = (&self.params.h[..]).to_vec();
|
||||||
|
batch_exp(&mut l, delta_inv);
|
||||||
|
batch_exp(&mut h, delta_inv);
|
||||||
|
self.params.l = Arc::new(l);
|
||||||
|
self.params.h = Arc::new(h);
|
||||||
|
|
||||||
|
self.params.vk.delta_g1 = self.params.vk.delta_g1.mul(privkey.delta).into_affine();
|
||||||
|
self.params.vk.delta_g2 = self.params.vk.delta_g2.mul(privkey.delta).into_affine();
|
||||||
|
|
||||||
|
self.contributions.push(pubkey.clone());
|
||||||
|
|
||||||
|
// Calculate the hash of the public key and return it
|
||||||
|
{
|
||||||
|
let sink = io::sink();
|
||||||
|
let mut sink = HashWriter::new(sink);
|
||||||
|
pubkey.write(&mut sink).unwrap();
|
||||||
|
let h = sink.into_hash();
|
||||||
|
let mut response = [0u8; 64];
|
||||||
|
response.copy_from_slice(h.as_ref());
|
||||||
|
response
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify the correctness of the parameters, given a circuit
|
||||||
|
/// instance. This will return all of the hashes that
|
||||||
|
/// contributors obtained when they ran
|
||||||
|
/// `MPCParameters::contribute`, for ensuring that contributions
|
||||||
|
/// exist in the final parameters.
|
||||||
|
pub fn verify<C: Circuit<Bn256>>(
|
||||||
|
&self,
|
||||||
|
circuit: C,
|
||||||
|
should_filter_points_at_infinity: bool,
|
||||||
|
) -> Result<Vec<[u8; 64]>, ()>
|
||||||
|
{
|
||||||
|
let initial_params = MPCParameters::new(circuit, should_filter_points_at_infinity).map_err(|_| ())?;
|
||||||
|
|
||||||
|
// H/L will change, but should have same length
|
||||||
|
if initial_params.params.h.len() != self.params.h.len() {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if initial_params.params.l.len() != self.params.l.len() {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// A/B_G1/B_G2 doesn't change at all
|
||||||
|
if initial_params.params.a != self.params.a {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if initial_params.params.b_g1 != self.params.b_g1 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if initial_params.params.b_g2 != self.params.b_g2 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// alpha/beta/gamma don't change
|
||||||
|
if initial_params.params.vk.alpha_g1 != self.params.vk.alpha_g1 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if initial_params.params.vk.beta_g1 != self.params.vk.beta_g1 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if initial_params.params.vk.beta_g2 != self.params.vk.beta_g2 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if initial_params.params.vk.gamma_g2 != self.params.vk.gamma_g2 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// IC shouldn't change, as gamma doesn't change
|
||||||
|
if initial_params.params.vk.ic != self.params.vk.ic {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// cs_hash should be the same
|
||||||
|
if &initial_params.cs_hash[..] != &self.cs_hash[..] {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let sink = io::sink();
|
||||||
|
let mut sink = HashWriter::new(sink);
|
||||||
|
sink.write_all(&initial_params.cs_hash[..]).unwrap();
|
||||||
|
|
||||||
|
let mut current_delta = G1Affine::one();
|
||||||
|
let mut result = vec![];
|
||||||
|
|
||||||
|
for pubkey in &self.contributions {
|
||||||
|
let mut our_sink = sink.clone();
|
||||||
|
our_sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap();
|
||||||
|
our_sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap();
|
||||||
|
|
||||||
|
pubkey.write(&mut sink).unwrap();
|
||||||
|
|
||||||
|
let h = our_sink.into_hash();
|
||||||
|
|
||||||
|
// The transcript must be consistent
|
||||||
|
if &pubkey.transcript[..] != h.as_ref() {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let r = hash_to_g2(h.as_ref()).into_affine();
|
||||||
|
|
||||||
|
// Check the signature of knowledge
|
||||||
|
if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the change from the old delta is consistent
|
||||||
|
if !same_ratio(
|
||||||
|
(current_delta, pubkey.delta_after),
|
||||||
|
(r, pubkey.r_delta)
|
||||||
|
) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
current_delta = pubkey.delta_after;
|
||||||
|
|
||||||
|
{
|
||||||
|
let sink = io::sink();
|
||||||
|
let mut sink = HashWriter::new(sink);
|
||||||
|
pubkey.write(&mut sink).unwrap();
|
||||||
|
let h = sink.into_hash();
|
||||||
|
let mut response = [0u8; 64];
|
||||||
|
response.copy_from_slice(h.as_ref());
|
||||||
|
result.push(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current parameters should have consistent delta in G1
|
||||||
|
if current_delta != self.params.vk.delta_g1 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current parameters should have consistent delta in G2
|
||||||
|
if !same_ratio(
|
||||||
|
(G1Affine::one(), current_delta),
|
||||||
|
(G2Affine::one(), self.params.vk.delta_g2)
|
||||||
|
) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// H and L queries should be updated with delta^-1
|
||||||
|
if !same_ratio(
|
||||||
|
merge_pairs(&initial_params.params.h, &self.params.h),
|
||||||
|
(self.params.vk.delta_g2, G2Affine::one()) // reversed for inverse
|
||||||
|
) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !same_ratio(
|
||||||
|
merge_pairs(&initial_params.params.l, &self.params.l),
|
||||||
|
(self.params.vk.delta_g2, G2Affine::one()) // reversed for inverse
|
||||||
|
) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize these parameters. The serialized parameters
|
||||||
|
/// can be read by bellman as Groth16 `Parameters`.
|
||||||
|
pub fn write<W: Write>(
|
||||||
|
&self,
|
||||||
|
mut writer: W
|
||||||
|
) -> io::Result<()>
|
||||||
|
{
|
||||||
|
self.params.write(&mut writer)?;
|
||||||
|
writer.write_all(&self.cs_hash)?;
|
||||||
|
|
||||||
|
writer.write_u32::<BigEndian>(self.contributions.len() as u32)?;
|
||||||
|
for pubkey in &self.contributions {
|
||||||
|
pubkey.write(&mut writer)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deserialize these parameters. If `checked` is false,
|
||||||
|
/// we won't perform curve validity and group order
|
||||||
|
/// checks.
|
||||||
|
pub fn read<R: Read>(
|
||||||
|
mut reader: R,
|
||||||
|
disallow_points_at_infinity: bool,
|
||||||
|
checked: bool
|
||||||
|
) -> io::Result<MPCParameters>
|
||||||
|
{
|
||||||
|
let params = Parameters::read(&mut reader, disallow_points_at_infinity, checked)?;
|
||||||
|
|
||||||
|
let mut cs_hash = [0u8; 64];
|
||||||
|
reader.read_exact(&mut cs_hash)?;
|
||||||
|
|
||||||
|
let contributions_len = reader.read_u32::<BigEndian>()? as usize;
|
||||||
|
|
||||||
|
let mut contributions = vec![];
|
||||||
|
for _ in 0..contributions_len {
|
||||||
|
contributions.push(PublicKey::read(&mut reader)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(MPCParameters {
|
||||||
|
params, cs_hash, contributions
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// This is a cheap helper utility that exists purely
|
||||||
|
/// because Rust still doesn't have type-level integers
|
||||||
|
/// and so doesn't implement `PartialEq` for `[T; 64]`
|
||||||
|
pub fn contains_contribution(
|
||||||
|
contributions: &[[u8; 64]],
|
||||||
|
my_contribution: &[u8; 64]
|
||||||
|
) -> bool
|
||||||
|
{
|
||||||
|
for contrib in contributions {
|
||||||
|
if &contrib[..] == &my_contribution[..] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify a contribution, given the old parameters and
|
||||||
|
/// the new parameters. Returns the hash of the contribution.
|
||||||
|
pub fn verify_contribution(
|
||||||
|
before: &MPCParameters,
|
||||||
|
after: &MPCParameters
|
||||||
|
) -> Result<[u8; 64], ()>
|
||||||
|
{
|
||||||
|
// Transformation involves a single new object
|
||||||
|
if after.contributions.len() != (before.contributions.len() + 1) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// None of the previous transformations should change
|
||||||
|
if &before.contributions[..] != &after.contributions[0..before.contributions.len()] {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// H/L will change, but should have same length
|
||||||
|
if before.params.h.len() != after.params.h.len() {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if before.params.l.len() != after.params.l.len() {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// A/B_G1/B_G2 doesn't change at all
|
||||||
|
if before.params.a != after.params.a {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if before.params.b_g1 != after.params.b_g1 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if before.params.b_g2 != after.params.b_g2 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// alpha/beta/gamma don't change
|
||||||
|
if before.params.vk.alpha_g1 != after.params.vk.alpha_g1 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if before.params.vk.beta_g1 != after.params.vk.beta_g1 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if before.params.vk.beta_g2 != after.params.vk.beta_g2 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
if before.params.vk.gamma_g2 != after.params.vk.gamma_g2 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// IC shouldn't change, as gamma doesn't change
|
||||||
|
if before.params.vk.ic != after.params.vk.ic {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// cs_hash should be the same
|
||||||
|
if &before.cs_hash[..] != &after.cs_hash[..] {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let sink = io::sink();
|
||||||
|
let mut sink = HashWriter::new(sink);
|
||||||
|
sink.write_all(&before.cs_hash[..]).unwrap();
|
||||||
|
|
||||||
|
for pubkey in &before.contributions {
|
||||||
|
pubkey.write(&mut sink).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let pubkey = after.contributions.last().unwrap();
|
||||||
|
sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap();
|
||||||
|
sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap();
|
||||||
|
|
||||||
|
let h = sink.into_hash();
|
||||||
|
|
||||||
|
// The transcript must be consistent
|
||||||
|
if &pubkey.transcript[..] != h.as_ref() {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let r = hash_to_g2(h.as_ref()).into_affine();
|
||||||
|
|
||||||
|
// Check the signature of knowledge
|
||||||
|
if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the change from the old delta is consistent
|
||||||
|
if !same_ratio(
|
||||||
|
(before.params.vk.delta_g1, pubkey.delta_after),
|
||||||
|
(r, pubkey.r_delta)
|
||||||
|
) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current parameters should have consistent delta in G1
|
||||||
|
if pubkey.delta_after != after.params.vk.delta_g1 {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current parameters should have consistent delta in G2
|
||||||
|
if !same_ratio(
|
||||||
|
(G1Affine::one(), pubkey.delta_after),
|
||||||
|
(G2Affine::one(), after.params.vk.delta_g2)
|
||||||
|
) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// H and L queries should be updated with delta^-1
|
||||||
|
if !same_ratio(
|
||||||
|
merge_pairs(&before.params.h, &after.params.h),
|
||||||
|
(after.params.vk.delta_g2, before.params.vk.delta_g2) // reversed for inverse
|
||||||
|
) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !same_ratio(
|
||||||
|
merge_pairs(&before.params.l, &after.params.l),
|
||||||
|
(after.params.vk.delta_g2, before.params.vk.delta_g2) // reversed for inverse
|
||||||
|
) {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let sink = io::sink();
|
||||||
|
let mut sink = HashWriter::new(sink);
|
||||||
|
pubkey.write(&mut sink).unwrap();
|
||||||
|
let h = sink.into_hash();
|
||||||
|
let mut response = [0u8; 64];
|
||||||
|
response.copy_from_slice(h.as_ref());
|
||||||
|
|
||||||
|
Ok(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Compute a keypair, given the current parameters. Keypairs
|
||||||
|
/// cannot be reused for multiple contributions or contributions
|
||||||
|
/// in different parameters.
|
||||||
|
pub fn keypair<R: Rng>(
|
||||||
|
rng: &mut R,
|
||||||
|
current: &MPCParameters,
|
||||||
|
) -> (PublicKey, PrivateKey)
|
||||||
|
{
|
||||||
|
// Sample random delta
|
||||||
|
let delta: Fr = rng.gen();
|
||||||
|
|
||||||
|
// Compute delta s-pair in G1
|
||||||
|
let s = G1::rand(rng).into_affine();
|
||||||
|
let s_delta = s.mul(delta).into_affine();
|
||||||
|
|
||||||
|
// H(cs_hash | <previous pubkeys> | s | s_delta)
|
||||||
|
let h = {
|
||||||
|
let sink = io::sink();
|
||||||
|
let mut sink = HashWriter::new(sink);
|
||||||
|
|
||||||
|
sink.write_all(¤t.cs_hash[..]).unwrap();
|
||||||
|
for pubkey in ¤t.contributions {
|
||||||
|
pubkey.write(&mut sink).unwrap();
|
||||||
|
}
|
||||||
|
sink.write_all(s.into_uncompressed().as_ref()).unwrap();
|
||||||
|
sink.write_all(s_delta.into_uncompressed().as_ref()).unwrap();
|
||||||
|
|
||||||
|
sink.into_hash()
|
||||||
|
};
|
||||||
|
|
||||||
|
// This avoids making a weird assumption about the hash into the
|
||||||
|
// group.
|
||||||
|
let mut transcript = [0; 64];
|
||||||
|
transcript.copy_from_slice(h.as_ref());
|
||||||
|
|
||||||
|
// Compute delta s-pair in G2
|
||||||
|
let r = hash_to_g2(h.as_ref()).into_affine();
|
||||||
|
let r_delta = r.mul(delta).into_affine();
|
||||||
|
|
||||||
|
(
|
||||||
|
PublicKey {
|
||||||
|
delta_after: current.params.vk.delta_g1.mul(delta).into_affine(),
|
||||||
|
s: s,
|
||||||
|
s_delta: s_delta,
|
||||||
|
r_delta: r_delta,
|
||||||
|
transcript: transcript
|
||||||
|
},
|
||||||
|
PrivateKey {
|
||||||
|
delta: delta
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
184
phase2/src/utils.rs
Normal file
184
phase2/src/utils.rs
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
extern crate bellman_ce;
|
||||||
|
extern crate rand;
|
||||||
|
extern crate byteorder;
|
||||||
|
|
||||||
|
use byteorder::{
|
||||||
|
BigEndian,
|
||||||
|
ReadBytesExt,
|
||||||
|
};
|
||||||
|
use num_bigint::BigUint;
|
||||||
|
use num_traits::Num;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use bellman_ce::pairing::{
|
||||||
|
ff::{
|
||||||
|
PrimeField,
|
||||||
|
},
|
||||||
|
CurveAffine,
|
||||||
|
CurveProjective,
|
||||||
|
Wnaf,
|
||||||
|
Engine,
|
||||||
|
bn256::{
|
||||||
|
Bn256,
|
||||||
|
G2,
|
||||||
|
G1Affine,
|
||||||
|
G2Affine,
|
||||||
|
Fq12,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
use rand::{
|
||||||
|
Rng,
|
||||||
|
Rand,
|
||||||
|
ChaChaRng,
|
||||||
|
SeedableRng
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/// Checks if pairs have the same ratio.
|
||||||
|
pub fn same_ratio<G1: CurveAffine>(
|
||||||
|
g1: (G1, G1),
|
||||||
|
g2: (G1::Pair, G1::Pair)
|
||||||
|
) -> bool
|
||||||
|
{
|
||||||
|
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes a random linear combination over v1/v2.
|
||||||
|
///
|
||||||
|
/// Checking that many pairs of elements are exponentiated by
|
||||||
|
/// the same `x` can be achieved (with high probability) with
|
||||||
|
/// the following technique:
|
||||||
|
///
|
||||||
|
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
|
||||||
|
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
|
||||||
|
/// random r1, r2, r3. Given (g, g^s)...
|
||||||
|
///
|
||||||
|
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
|
||||||
|
///
|
||||||
|
/// ... with high probability.
|
||||||
|
pub fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G)
|
||||||
|
{
|
||||||
|
use std::sync::Mutex;
|
||||||
|
use rand::{thread_rng};
|
||||||
|
|
||||||
|
assert_eq!(v1.len(), v2.len());
|
||||||
|
|
||||||
|
let chunk = (v1.len() / num_cpus::get()) + 1;
|
||||||
|
|
||||||
|
let s = Arc::new(Mutex::new(G::Projective::zero()));
|
||||||
|
let sx = Arc::new(Mutex::new(G::Projective::zero()));
|
||||||
|
|
||||||
|
crossbeam::scope(|scope| {
|
||||||
|
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
|
||||||
|
let s = s.clone();
|
||||||
|
let sx = sx.clone();
|
||||||
|
|
||||||
|
scope.spawn(move || {
|
||||||
|
// We do not need to be overly cautious of the RNG
|
||||||
|
// used for this check.
|
||||||
|
let rng = &mut thread_rng();
|
||||||
|
|
||||||
|
let mut wnaf = Wnaf::new();
|
||||||
|
let mut local_s = G::Projective::zero();
|
||||||
|
let mut local_sx = G::Projective::zero();
|
||||||
|
|
||||||
|
for (v1, v2) in v1.iter().zip(v2.iter()) {
|
||||||
|
let rho = G::Scalar::rand(rng);
|
||||||
|
let mut wnaf = wnaf.scalar(rho.into_repr());
|
||||||
|
let v1 = wnaf.base(v1.into_projective());
|
||||||
|
let v2 = wnaf.base(v2.into_projective());
|
||||||
|
|
||||||
|
local_s.add_assign(&v1);
|
||||||
|
local_sx.add_assign(&v2);
|
||||||
|
}
|
||||||
|
|
||||||
|
s.lock().unwrap().add_assign(&local_s);
|
||||||
|
sx.lock().unwrap().add_assign(&local_sx);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let s = s.lock().unwrap().into_affine();
|
||||||
|
let sx = sx.lock().unwrap().into_affine();
|
||||||
|
|
||||||
|
(s, sx)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
|
||||||
|
/// than 32 bytes.
|
||||||
|
pub fn hash_to_g2(mut digest: &[u8]) -> G2
|
||||||
|
{
|
||||||
|
assert!(digest.len() >= 32);
|
||||||
|
|
||||||
|
let mut seed = Vec::with_capacity(8);
|
||||||
|
|
||||||
|
for _ in 0..8 {
|
||||||
|
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
|
||||||
|
}
|
||||||
|
|
||||||
|
ChaChaRng::from_seed(&seed).gen()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn repr_to_big<T: std::fmt::Display>(r: T) -> String {
|
||||||
|
BigUint::from_str_radix(&format!("{}", r)[2..], 16).unwrap().to_str_radix(10)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn p1_to_vec(p: &G1Affine) -> Vec<String> {
|
||||||
|
return vec![
|
||||||
|
repr_to_big(p.get_x().into_repr()),
|
||||||
|
repr_to_big(p.get_y().into_repr()),
|
||||||
|
if p.is_zero() { "0".to_string() } else { "1".to_string() }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn p2_to_vec(p: &G2Affine) -> Vec<Vec<String>> {
|
||||||
|
return vec![
|
||||||
|
vec![
|
||||||
|
repr_to_big(p.get_x().c0.into_repr()),
|
||||||
|
repr_to_big(p.get_x().c1.into_repr()),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
repr_to_big(p.get_y().c0.into_repr()),
|
||||||
|
repr_to_big(p.get_y().c1.into_repr()),
|
||||||
|
],
|
||||||
|
if p.is_zero() {
|
||||||
|
vec!["0".to_string(), "0".to_string()]
|
||||||
|
} else {
|
||||||
|
vec!["1".to_string(), "0".to_string()]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pairing_to_vec(p: &Fq12) -> Vec<Vec<Vec<String>>> {
|
||||||
|
return vec![
|
||||||
|
vec![
|
||||||
|
vec![
|
||||||
|
repr_to_big(p.c0.c0.c0.into_repr()),
|
||||||
|
repr_to_big(p.c0.c0.c1.into_repr()),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
repr_to_big(p.c0.c1.c0.into_repr()),
|
||||||
|
repr_to_big(p.c0.c1.c1.into_repr()),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
repr_to_big(p.c0.c2.c0.into_repr()),
|
||||||
|
repr_to_big(p.c0.c2.c1.into_repr()),
|
||||||
|
]
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
vec![
|
||||||
|
repr_to_big(p.c1.c0.c0.into_repr()),
|
||||||
|
repr_to_big(p.c1.c0.c1.into_repr()),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
repr_to_big(p.c1.c1.c0.into_repr()),
|
||||||
|
repr_to_big(p.c1.c1.c1.into_repr()),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
repr_to_big(p.c1.c2.c0.into_repr()),
|
||||||
|
repr_to_big(p.c1.c2.c1.into_repr()),
|
||||||
|
]
|
||||||
|
],
|
||||||
|
]
|
||||||
|
}
|
223
phase2/src/verifier_groth.sol
Normal file
223
phase2/src/verifier_groth.sol
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
//
|
||||||
|
// Copyright 2017 Christian Reitwiessner
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
//
|
||||||
|
// 2019 OKIMS
|
||||||
|
// ported to solidity 0.5
|
||||||
|
// fixed linter warnings
|
||||||
|
// added require error messages
|
||||||
|
//
|
||||||
|
pragma solidity ^0.5.0;
|
||||||
|
library Pairing {
|
||||||
|
struct G1Point {
|
||||||
|
uint X;
|
||||||
|
uint Y;
|
||||||
|
}
|
||||||
|
// Encoding of field elements is: X[0] * z + X[1]
|
||||||
|
struct G2Point {
|
||||||
|
uint[2] X;
|
||||||
|
uint[2] Y;
|
||||||
|
}
|
||||||
|
/// @return the generator of G1
|
||||||
|
function P1() internal pure returns (G1Point memory) {
|
||||||
|
return G1Point(1, 2);
|
||||||
|
}
|
||||||
|
/// @return the generator of G2
|
||||||
|
function P2() internal pure returns (G2Point memory) {
|
||||||
|
// Original code point
|
||||||
|
return G2Point(
|
||||||
|
[11559732032986387107991004021392285783925812861821192530917403151452391805634,
|
||||||
|
10857046999023057135944570762232829481370756359578518086990519993285655852781],
|
||||||
|
[4082367875863433681332203403145435568316851327593401208105741076214120093531,
|
||||||
|
8495653923123431417604973247489272438418190587263600148770280649306958101930]
|
||||||
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
// Changed by Jordi point
|
||||||
|
return G2Point(
|
||||||
|
[10857046999023057135944570762232829481370756359578518086990519993285655852781,
|
||||||
|
11559732032986387107991004021392285783925812861821192530917403151452391805634],
|
||||||
|
[8495653923123431417604973247489272438418190587263600148770280649306958101930,
|
||||||
|
4082367875863433681332203403145435568316851327593401208105741076214120093531]
|
||||||
|
);
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
/// @return the negation of p, i.e. p.addition(p.negate()) should be zero.
|
||||||
|
function negate(G1Point memory p) internal pure returns (G1Point memory) {
|
||||||
|
// The prime q in the base field F_q for G1
|
||||||
|
uint q = 21888242871839275222246405745257275088696311157297823662689037894645226208583;
|
||||||
|
if (p.X == 0 && p.Y == 0)
|
||||||
|
return G1Point(0, 0);
|
||||||
|
return G1Point(p.X, q - (p.Y % q));
|
||||||
|
}
|
||||||
|
/// @return the sum of two points of G1
|
||||||
|
function addition(G1Point memory p1, G1Point memory p2) internal view returns (G1Point memory r) {
|
||||||
|
uint[4] memory input;
|
||||||
|
input[0] = p1.X;
|
||||||
|
input[1] = p1.Y;
|
||||||
|
input[2] = p2.X;
|
||||||
|
input[3] = p2.Y;
|
||||||
|
bool success;
|
||||||
|
// solium-disable-next-line security/no-inline-assembly
|
||||||
|
assembly {
|
||||||
|
success := staticcall(sub(gas, 2000), 6, input, 0xc0, r, 0x60)
|
||||||
|
// Use "invalid" to make gas estimation work
|
||||||
|
switch success case 0 { invalid() }
|
||||||
|
}
|
||||||
|
require(success,"pairing-add-failed");
|
||||||
|
}
|
||||||
|
/// @return the product of a point on G1 and a scalar, i.e.
|
||||||
|
/// p == p.scalar_mul(1) and p.addition(p) == p.scalar_mul(2) for all points p.
|
||||||
|
function scalar_mul(G1Point memory p, uint s) internal view returns (G1Point memory r) {
|
||||||
|
uint[3] memory input;
|
||||||
|
input[0] = p.X;
|
||||||
|
input[1] = p.Y;
|
||||||
|
input[2] = s;
|
||||||
|
bool success;
|
||||||
|
// solium-disable-next-line security/no-inline-assembly
|
||||||
|
assembly {
|
||||||
|
success := staticcall(sub(gas, 2000), 7, input, 0x80, r, 0x60)
|
||||||
|
// Use "invalid" to make gas estimation work
|
||||||
|
switch success case 0 { invalid() }
|
||||||
|
}
|
||||||
|
require (success,"pairing-mul-failed");
|
||||||
|
}
|
||||||
|
/// @return the result of computing the pairing check
|
||||||
|
/// e(p1[0], p2[0]) * .... * e(p1[n], p2[n]) == 1
|
||||||
|
/// For example pairing([P1(), P1().negate()], [P2(), P2()]) should
|
||||||
|
/// return true.
|
||||||
|
function pairing(G1Point[] memory p1, G2Point[] memory p2) internal view returns (bool) {
|
||||||
|
require(p1.length == p2.length,"pairing-lengths-failed");
|
||||||
|
uint elements = p1.length;
|
||||||
|
uint inputSize = elements * 6;
|
||||||
|
uint[] memory input = new uint[](inputSize);
|
||||||
|
for (uint i = 0; i < elements; i++)
|
||||||
|
{
|
||||||
|
input[i * 6 + 0] = p1[i].X;
|
||||||
|
input[i * 6 + 1] = p1[i].Y;
|
||||||
|
input[i * 6 + 2] = p2[i].X[0];
|
||||||
|
input[i * 6 + 3] = p2[i].X[1];
|
||||||
|
input[i * 6 + 4] = p2[i].Y[0];
|
||||||
|
input[i * 6 + 5] = p2[i].Y[1];
|
||||||
|
}
|
||||||
|
uint[1] memory out;
|
||||||
|
bool success;
|
||||||
|
// solium-disable-next-line security/no-inline-assembly
|
||||||
|
assembly {
|
||||||
|
success := staticcall(sub(gas, 2000), 8, add(input, 0x20), mul(inputSize, 0x20), out, 0x20)
|
||||||
|
// Use "invalid" to make gas estimation work
|
||||||
|
switch success case 0 { invalid() }
|
||||||
|
}
|
||||||
|
require(success,"pairing-opcode-failed");
|
||||||
|
return out[0] != 0;
|
||||||
|
}
|
||||||
|
/// Convenience method for a pairing check for two pairs.
|
||||||
|
function pairingProd2(G1Point memory a1, G2Point memory a2, G1Point memory b1, G2Point memory b2) internal view returns (bool) {
|
||||||
|
G1Point[] memory p1 = new G1Point[](2);
|
||||||
|
G2Point[] memory p2 = new G2Point[](2);
|
||||||
|
p1[0] = a1;
|
||||||
|
p1[1] = b1;
|
||||||
|
p2[0] = a2;
|
||||||
|
p2[1] = b2;
|
||||||
|
return pairing(p1, p2);
|
||||||
|
}
|
||||||
|
/// Convenience method for a pairing check for three pairs.
|
||||||
|
function pairingProd3(
|
||||||
|
G1Point memory a1, G2Point memory a2,
|
||||||
|
G1Point memory b1, G2Point memory b2,
|
||||||
|
G1Point memory c1, G2Point memory c2
|
||||||
|
) internal view returns (bool) {
|
||||||
|
G1Point[] memory p1 = new G1Point[](3);
|
||||||
|
G2Point[] memory p2 = new G2Point[](3);
|
||||||
|
p1[0] = a1;
|
||||||
|
p1[1] = b1;
|
||||||
|
p1[2] = c1;
|
||||||
|
p2[0] = a2;
|
||||||
|
p2[1] = b2;
|
||||||
|
p2[2] = c2;
|
||||||
|
return pairing(p1, p2);
|
||||||
|
}
|
||||||
|
/// Convenience method for a pairing check for four pairs.
|
||||||
|
function pairingProd4(
|
||||||
|
G1Point memory a1, G2Point memory a2,
|
||||||
|
G1Point memory b1, G2Point memory b2,
|
||||||
|
G1Point memory c1, G2Point memory c2,
|
||||||
|
G1Point memory d1, G2Point memory d2
|
||||||
|
) internal view returns (bool) {
|
||||||
|
G1Point[] memory p1 = new G1Point[](4);
|
||||||
|
G2Point[] memory p2 = new G2Point[](4);
|
||||||
|
p1[0] = a1;
|
||||||
|
p1[1] = b1;
|
||||||
|
p1[2] = c1;
|
||||||
|
p1[3] = d1;
|
||||||
|
p2[0] = a2;
|
||||||
|
p2[1] = b2;
|
||||||
|
p2[2] = c2;
|
||||||
|
p2[3] = d2;
|
||||||
|
return pairing(p1, p2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
contract Verifier {
|
||||||
|
using Pairing for *;
|
||||||
|
struct VerifyingKey {
|
||||||
|
Pairing.G1Point alfa1;
|
||||||
|
Pairing.G2Point beta2;
|
||||||
|
Pairing.G2Point gamma2;
|
||||||
|
Pairing.G2Point delta2;
|
||||||
|
Pairing.G1Point[] IC;
|
||||||
|
}
|
||||||
|
struct Proof {
|
||||||
|
Pairing.G1Point A;
|
||||||
|
Pairing.G2Point B;
|
||||||
|
Pairing.G1Point C;
|
||||||
|
}
|
||||||
|
function verifyingKey() internal pure returns (VerifyingKey memory vk) {
|
||||||
|
vk.alfa1 = Pairing.G1Point(<%vk_alfa1%>);
|
||||||
|
vk.beta2 = Pairing.G2Point(<%vk_beta2%>);
|
||||||
|
vk.gamma2 = Pairing.G2Point(<%vk_gamma2%>);
|
||||||
|
vk.delta2 = Pairing.G2Point(<%vk_delta2%>);
|
||||||
|
vk.IC = new Pairing.G1Point[](<%vk_ic_length%>);
|
||||||
|
<%vk_ic_pts%>
|
||||||
|
}
|
||||||
|
function verify(uint[] memory input, Proof memory proof) internal view returns (uint) {
|
||||||
|
uint256 snark_scalar_field = 21888242871839275222246405745257275088548364400416034343698204186575808495617;
|
||||||
|
VerifyingKey memory vk = verifyingKey();
|
||||||
|
require(input.length + 1 == vk.IC.length,"verifier-bad-input");
|
||||||
|
// Compute the linear combination vk_x
|
||||||
|
Pairing.G1Point memory vk_x = Pairing.G1Point(0, 0);
|
||||||
|
for (uint i = 0; i < input.length; i++) {
|
||||||
|
require(input[i] < snark_scalar_field,"verifier-gte-snark-scalar-field");
|
||||||
|
vk_x = Pairing.addition(vk_x, Pairing.scalar_mul(vk.IC[i + 1], input[i]));
|
||||||
|
}
|
||||||
|
vk_x = Pairing.addition(vk_x, vk.IC[0]);
|
||||||
|
if (!Pairing.pairingProd4(
|
||||||
|
Pairing.negate(proof.A), proof.B,
|
||||||
|
vk.alfa1, vk.beta2,
|
||||||
|
vk_x, vk.gamma2,
|
||||||
|
proof.C, vk.delta2
|
||||||
|
)) return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
function verifyProof(
|
||||||
|
uint[2] memory a,
|
||||||
|
uint[2][2] memory b,
|
||||||
|
uint[2] memory c,
|
||||||
|
uint[<%vk_input_length%>] memory input
|
||||||
|
) public view returns (bool r) {
|
||||||
|
Proof memory proof;
|
||||||
|
proof.A = Pairing.G1Point(a[0], a[1]);
|
||||||
|
proof.B = Pairing.G2Point([b[0][0], b[0][1]], [b[1][0], b[1][1]]);
|
||||||
|
proof.C = Pairing.G1Point(c[0], c[1]);
|
||||||
|
return verify(input, proof);
|
||||||
|
}
|
||||||
|
function verifyProof(bytes memory proof, uint[<%vk_input_length%>] memory inputs) public view returns (bool r) {
|
||||||
|
uint[8] memory p = abi.decode(proof, (uint[8]));
|
||||||
|
Proof memory proof;
|
||||||
|
proof.A = Pairing.G1Point(p[0], p[1]);
|
||||||
|
proof.B = Pairing.G2Point([p[2], p[3]], [p[4], p[5]]);
|
||||||
|
proof.C = Pairing.G1Point(p[6], c[7]);
|
||||||
|
return verify(inputs, proof) == 0;
|
||||||
|
}
|
||||||
|
}
|
44
phase2/test.sh
Executable file
44
phase2/test.sh
Executable file
@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ ! -f ../powersoftau/phase1radix2m0 ]; then
|
||||||
|
echo "Please run powers of tau test first to generate radix files"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# move results of powers of tau here
|
||||||
|
cp ../powersoftau/phase1radix* .
|
||||||
|
|
||||||
|
# compile circuit
|
||||||
|
npx circom circuit.circom -o circuit.json && npx snarkjs info -c circuit.json
|
||||||
|
# npx snarkjs info -c circuit.json
|
||||||
|
|
||||||
|
# initialize ceremony
|
||||||
|
cargo run --release --bin new circuit.json circom1.params
|
||||||
|
|
||||||
|
cargo run --release --bin contribute circom1.params circom2.params asdajdzixcjlzxjczxlkcjzxlkcj
|
||||||
|
cargo run --release --bin verify_contribution circuit.json circom1.params circom2.params
|
||||||
|
|
||||||
|
cargo run --release --bin contribute circom2.params circom3.params dsfjkshdfakjhsdf
|
||||||
|
cargo run --release --bin verify_contribution circuit.json circom2.params circom3.params
|
||||||
|
|
||||||
|
cargo run --release --bin contribute circom3.params circom4.params askldfjklasdf
|
||||||
|
cargo run --release --bin verify_contribution circuit.json circom3.params circom4.params
|
||||||
|
|
||||||
|
# generate resulting keys
|
||||||
|
cargo run --release --bin export_keys circom4.params vk.json pk.json
|
||||||
|
# create dummy keys in circom format
|
||||||
|
echo "Generating dummy key files..."
|
||||||
|
npx snarkjs setup --protocol groth
|
||||||
|
# patch dummy keys with actual keys params
|
||||||
|
cargo run --release --bin copy_json proving_key.json pk.json transformed_pk.json
|
||||||
|
cargo run --release --bin copy_json verification_key.json vk.json transformed_vk.json
|
||||||
|
|
||||||
|
# generate solidity verifier
|
||||||
|
cargo run --release --bin generate_verifier circom4.params verifier.sol
|
||||||
|
|
||||||
|
# try to generate and verify proof
|
||||||
|
npx snarkjs calculatewitness
|
||||||
|
cargo run --release --bin prove circuit.json witness.json circom4.params proof.json public.json
|
||||||
|
npx snarkjs verify --vk transformed_vk.json --proof proof.json
|
@ -1,23 +0,0 @@
|
|||||||
import json
|
|
||||||
|
|
||||||
f = json.load(open('proving_key.json'))
|
|
||||||
f2 = json.load(open('pk.json'))
|
|
||||||
|
|
||||||
for k in f2:
|
|
||||||
f[k] = f2[k]
|
|
||||||
|
|
||||||
f3 = open('transformed_pk.json', 'w')
|
|
||||||
f3.write(json.dumps(f))
|
|
||||||
f3.close()
|
|
||||||
|
|
||||||
f = json.load(open('verification_key.json'))
|
|
||||||
f2 = json.load(open('vk.json'))
|
|
||||||
|
|
||||||
for k in f2:
|
|
||||||
f[k] = f2[k]
|
|
||||||
|
|
||||||
del f['vk_alfabeta_12']
|
|
||||||
|
|
||||||
f3 = open('transformed_vk.json', 'w')
|
|
||||||
f3.write(json.dumps(f))
|
|
||||||
f3.close()
|
|
1236
phase2/tools/patch_vk/package-lock.json
generated
1236
phase2/tools/patch_vk/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -1,14 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "patch_vk",
|
|
||||||
"version": "1.0.0",
|
|
||||||
"description": "",
|
|
||||||
"main": "patch_vk.js",
|
|
||||||
"scripts": {
|
|
||||||
"test": "echo \"Error: no test specified\" && exit 1"
|
|
||||||
},
|
|
||||||
"author": "",
|
|
||||||
"license": "ISC",
|
|
||||||
"dependencies": {
|
|
||||||
"snarkjs": "^0.1.20"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,9 +0,0 @@
|
|||||||
const fs = require('fs');
|
|
||||||
const {stringifyBigInts, unstringifyBigInts} = require('snarkjs/src/stringifybigint.js');
|
|
||||||
const BN128 = require('snarkjs/src/bn128');
|
|
||||||
|
|
||||||
const bn128 = new BN128();
|
|
||||||
|
|
||||||
const json = unstringifyBigInts(JSON.parse(fs.readFileSync('transformed_vk.json')));
|
|
||||||
json.vk_alfabeta_12 = bn128.F12.affine(bn128.pairing( json.vk_alfa_1 , json.vk_beta_2 ));
|
|
||||||
fs.writeFileSync('patched_transformed_vk.json', JSON.stringify(stringifyBigInts(json)));
|
|
5
powersoftau/.gitignore
vendored
5
powersoftau/.gitignore
vendored
@ -1,6 +1,7 @@
|
|||||||
/target/
|
/target/
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
transcript*
|
transcript
|
||||||
challenge*
|
challenge*
|
||||||
response*
|
response*
|
||||||
new_challenge*
|
phase1radix2m*
|
||||||
|
tmp_*
|
7
powersoftau/Cargo.lock
generated
7
powersoftau/Cargo.lock
generated
@ -147,6 +147,11 @@ name = "either"
|
|||||||
version = "1.5.1"
|
version = "1.5.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "exitcode"
|
||||||
|
version = "1.1.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ff_ce"
|
name = "ff_ce"
|
||||||
version = "0.7.1"
|
version = "0.7.1"
|
||||||
@ -310,6 +315,7 @@ dependencies = [
|
|||||||
"blake2 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"blake2 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"exitcode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hex-literal 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hex-literal 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -490,6 +496,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
"checksum crypto-mac 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "779015233ac67d65098614aec748ac1c756ab6677fa2e14cf8b37c08dfed1198"
|
"checksum crypto-mac 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "779015233ac67d65098614aec748ac1c756ab6677fa2e14cf8b37c08dfed1198"
|
||||||
"checksum digest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e5b29bf156f3f4b3c4f610a25ff69370616ae6e0657d416de22645483e72af0a"
|
"checksum digest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e5b29bf156f3f4b3c4f610a25ff69370616ae6e0657d416de22645483e72af0a"
|
||||||
"checksum either 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c67353c641dc847124ea1902d69bd753dee9bb3beff9aa3662ecf86c971d1fac"
|
"checksum either 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c67353c641dc847124ea1902d69bd753dee9bb3beff9aa3662ecf86c971d1fac"
|
||||||
|
"checksum exitcode 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "de853764b47027c2e862a995c34978ffa63c1501f2e15f987ba11bd4f9bba193"
|
||||||
"checksum ff_ce 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "18af1ea1b80a4b474fae13af4c58cf0a5a2bc33832d5fa70f68a4b286178fdb5"
|
"checksum ff_ce 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "18af1ea1b80a4b474fae13af4c58cf0a5a2bc33832d5fa70f68a4b286178fdb5"
|
||||||
"checksum ff_derive_ce 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d245b4e76c5b36bb7721ea15b7fbc61bebf0c5d2890eaf49fe1e2a3eed36db9"
|
"checksum ff_derive_ce 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d245b4e76c5b36bb7721ea15b7fbc61bebf0c5d2890eaf49fe1e2a3eed36db9"
|
||||||
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
|
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
|
||||||
|
@ -20,6 +20,7 @@ typenum = "1.9.0"
|
|||||||
byteorder = "1.1.0"
|
byteorder = "1.1.0"
|
||||||
hex-literal = "0.1"
|
hex-literal = "0.1"
|
||||||
rust-crypto = "0.2"
|
rust-crypto = "0.2"
|
||||||
|
exitcode = "1.1.2"
|
||||||
|
|
||||||
memmap = "0.7.0"
|
memmap = "0.7.0"
|
||||||
itertools = "0.8.0"
|
itertools = "0.8.0"
|
||||||
@ -27,4 +28,5 @@ itertools = "0.8.0"
|
|||||||
bellman_ce = { path = "../bellman" }
|
bellman_ce = { path = "../bellman" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
smalltest = []
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ pub enum AccumulatorState{
|
|||||||
///
|
///
|
||||||
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
||||||
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
||||||
pub struct BachedAccumulator<E: Engine, P: PowersOfTauParameters> {
|
pub struct BatchedAccumulator<E: Engine, P: PowersOfTauParameters> {
|
||||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
||||||
pub tau_powers_g1: Vec<E::G1Affine>,
|
pub tau_powers_g1: Vec<E::G1Affine>,
|
||||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
||||||
@ -61,8 +61,8 @@ pub struct BachedAccumulator<E: Engine, P: PowersOfTauParameters> {
|
|||||||
marker: std::marker::PhantomData<P>,
|
marker: std::marker::PhantomData<P>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
impl<E:Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
||||||
/// Calcualte the contibution hash from the resulting file. Original powers of tau implementaiton
|
/// Calculate the contribution hash from the resulting file. Original powers of tau implementation
|
||||||
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
|
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
|
||||||
/// implementation now writes without a particular order, so plain recalculation at the end
|
/// implementation now writes without a particular order, so plain recalculation at the end
|
||||||
/// of the procedure is more efficient
|
/// of the procedure is more efficient
|
||||||
@ -78,7 +78,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
impl<E:Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
||||||
pub fn empty() -> Self {
|
pub fn empty() -> Self {
|
||||||
Self {
|
Self {
|
||||||
tau_powers_g1: vec![],
|
tau_powers_g1: vec![],
|
||||||
@ -92,7 +92,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
impl<E:Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
||||||
fn g1_size(compression: UseCompression) -> usize {
|
fn g1_size(compression: UseCompression) -> usize {
|
||||||
match compression {
|
match compression {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
@ -189,7 +189,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||||
pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(before: &BachedAccumulator<E, P>, after: &BachedAccumulator<E, P>, key: &PublicKey<E>, digest: &[u8]) -> bool
|
pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(before: &BatchedAccumulator<E, P>, after: &BatchedAccumulator<E, P>, key: &PublicKey<E>, digest: &[u8]) -> bool
|
||||||
{
|
{
|
||||||
assert_eq!(digest.len(), 64);
|
assert_eq!(digest.len(), 64);
|
||||||
|
|
||||||
@ -253,7 +253,7 @@ pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(before: &BachedAccu
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
impl<E:Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
||||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||||
pub fn verify_transformation(
|
pub fn verify_transformation(
|
||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
@ -449,7 +449,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
check_input_for_correctness: CheckForCorrectness,
|
check_input_for_correctness: CheckForCorrectness,
|
||||||
compression: UseCompression,
|
compression: UseCompression,
|
||||||
) -> io::Result<BachedAccumulator<E, P>>
|
) -> io::Result<BatchedAccumulator<E, P>>
|
||||||
{
|
{
|
||||||
use itertools::MinMaxResult::{MinMax};
|
use itertools::MinMaxResult::{MinMax};
|
||||||
|
|
||||||
@ -494,7 +494,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(BachedAccumulator {
|
Ok(BatchedAccumulator {
|
||||||
tau_powers_g1: tau_powers_g1,
|
tau_powers_g1: tau_powers_g1,
|
||||||
tau_powers_g2: tau_powers_g2,
|
tau_powers_g2: tau_powers_g2,
|
||||||
alpha_tau_powers_g1: alpha_tau_powers_g1,
|
alpha_tau_powers_g1: alpha_tau_powers_g1,
|
||||||
@ -515,7 +515,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
|
|
||||||
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let mut tmp_acc = BachedAccumulator::<E,P> {
|
let mut tmp_acc = BatchedAccumulator::<E,P> {
|
||||||
tau_powers_g1: (&self.tau_powers_g1[start..end+1]).to_vec(),
|
tau_powers_g1: (&self.tau_powers_g1[start..end+1]).to_vec(),
|
||||||
tau_powers_g2: (&self.tau_powers_g2[start..end+1]).to_vec(),
|
tau_powers_g2: (&self.tau_powers_g2[start..end+1]).to_vec(),
|
||||||
alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..end+1]).to_vec(),
|
alpha_tau_powers_g1: (&self.alpha_tau_powers_g1[start..end+1]).to_vec(),
|
||||||
@ -532,7 +532,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
|
|
||||||
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||||
if let MinMax(start, end) = chunk.minmax() {
|
if let MinMax(start, end) = chunk.minmax() {
|
||||||
let mut tmp_acc = BachedAccumulator::<E,P> {
|
let mut tmp_acc = BatchedAccumulator::<E,P> {
|
||||||
tau_powers_g1: (&self.tau_powers_g1[start..end+1]).to_vec(),
|
tau_powers_g1: (&self.tau_powers_g1[start..end+1]).to_vec(),
|
||||||
tau_powers_g2: vec![],
|
tau_powers_g2: vec![],
|
||||||
alpha_tau_powers_g1: vec![],
|
alpha_tau_powers_g1: vec![],
|
||||||
@ -552,7 +552,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
impl<E:Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
||||||
pub fn read_chunk (
|
pub fn read_chunk (
|
||||||
&mut self,
|
&mut self,
|
||||||
from: usize,
|
from: usize,
|
||||||
@ -721,7 +721,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
impl<E:Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
||||||
fn write_all(
|
fn write_all(
|
||||||
&mut self,
|
&mut self,
|
||||||
chunk_start: usize,
|
chunk_start: usize,
|
||||||
@ -826,12 +826,12 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
impl<E:Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
||||||
/// Transforms the accumulator with a private key.
|
/// Transforms the accumulator with a private key.
|
||||||
/// Due to large amount of data in a previous accumulator even in the compressed form
|
/// Due to large amount of data in a previous accumulator even in the compressed form
|
||||||
/// this function can now work on compressed input. Output can be made in any form
|
/// this function can now work on compressed input. Output can be made in any form
|
||||||
/// WARNING: Contributor does not have to check that values from challenge file were serialized
|
/// WARNING: Contributor does not have to check that values from challenge file were serialized
|
||||||
/// corrently, but we may want to enforce it if a ceremony coordinator does not recompress the previous
|
/// correctly, but we may want to enforce it if a ceremony coordinator does not recompress the previous
|
||||||
/// contribution into the new challenge file
|
/// contribution into the new challenge file
|
||||||
pub fn transform(
|
pub fn transform(
|
||||||
input_map: &Mmap,
|
input_map: &Mmap,
|
||||||
@ -887,7 +887,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
// Turn it all back into affine points
|
// Turn it all back into affine points
|
||||||
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
||||||
*affine = projective.into_affine();
|
*affine = projective.into_affine();
|
||||||
assert!(!affine.is_zero(), "your contribution happed to produce a point at infinity, please re-run");
|
assert!(!affine.is_zero(), "your contribution happened to produce a point at infinity, please re-run");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -923,7 +923,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
batch_exp::<E, _>(&mut accumulator.alpha_tau_powers_g1, &taupowers[0..], Some(&key.alpha));
|
batch_exp::<E, _>(&mut accumulator.alpha_tau_powers_g1, &taupowers[0..], Some(&key.alpha));
|
||||||
batch_exp::<E, _>(&mut accumulator.beta_tau_powers_g1, &taupowers[0..], Some(&key.beta));
|
batch_exp::<E, _>(&mut accumulator.beta_tau_powers_g1, &taupowers[0..], Some(&key.beta));
|
||||||
accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
|
accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
|
||||||
assert!(!accumulator.beta_g2.is_zero(), "your contribution happed to produce a point at infinity, please re-run");
|
assert!(!accumulator.beta_g2.is_zero(), "your contribution happened to produce a point at infinity, please re-run");
|
||||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||||
println!("Done processing {} powers of tau", end);
|
println!("Done processing {} powers of tau", end);
|
||||||
} else {
|
} else {
|
||||||
@ -957,7 +957,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
|
|
||||||
batch_exp::<E, _>(&mut accumulator.tau_powers_g1, &taupowers[0..], None);
|
batch_exp::<E, _>(&mut accumulator.tau_powers_g1, &taupowers[0..], None);
|
||||||
//accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
|
//accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
|
||||||
//assert!(!accumulator.beta_g2.is_zero(), "your contribution happed to produce a point at infinity, please re-run");
|
//assert!(!accumulator.beta_g2.is_zero(), "your contribution happened to produce a point at infinity, please re-run");
|
||||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||||
|
|
||||||
println!("Done processing {} powers of tau", end);
|
println!("Done processing {} powers of tau", end);
|
||||||
@ -970,7 +970,7 @@ impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
impl<E:Engine, P: PowersOfTauParameters> BatchedAccumulator<E, P> {
|
||||||
/// Transforms the accumulator with a private key.
|
/// Transforms the accumulator with a private key.
|
||||||
pub fn generate_initial(
|
pub fn generate_initial(
|
||||||
output_map: &mut MmapMut,
|
output_map: &mut MmapMut,
|
||||||
|
@ -6,9 +6,9 @@ extern crate blake2;
|
|||||||
extern crate byteorder;
|
extern crate byteorder;
|
||||||
extern crate crypto;
|
extern crate crypto;
|
||||||
|
|
||||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
|
||||||
use powersoftau::batched_accumulator::{BachedAccumulator};
|
use powersoftau::batched_accumulator::{BatchedAccumulator};
|
||||||
use powersoftau::keypair::{keypair};
|
use powersoftau::keypair::{keypair};
|
||||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
||||||
|
|
||||||
@ -29,6 +29,14 @@ const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
|
|||||||
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 3 {
|
||||||
|
println!("Usage: \n<challenge_file> <response_file>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let challenge_filename = &args[1];
|
||||||
|
let response_filename = &args[2];
|
||||||
|
|
||||||
println!("Will contribute a random beacon to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
println!("Will contribute a random beacon to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
||||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
||||||
|
|
||||||
@ -44,7 +52,7 @@ fn main() {
|
|||||||
let mut cur_hash: [u8; 32] = hex!("0000000000000000000a558a61ddc8ee4e488d647a747fe4dcc362fe2026c620");
|
let mut cur_hash: [u8; 32] = hex!("0000000000000000000a558a61ddc8ee4e488d647a747fe4dcc362fe2026c620");
|
||||||
|
|
||||||
// Performs 2^n hash iterations over it
|
// Performs 2^n hash iterations over it
|
||||||
const N: usize = 10;
|
const N: u64 = 10;
|
||||||
|
|
||||||
for i in 0..(1u64<<N) {
|
for i in 0..(1u64<<N) {
|
||||||
// Print 1024 of the interstitial states
|
// Print 1024 of the interstitial states
|
||||||
@ -56,7 +64,7 @@ fn main() {
|
|||||||
for b in cur_hash.iter() {
|
for b in cur_hash.iter() {
|
||||||
print!("{:02x}", b);
|
print!("{:02x}", b);
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut h = Sha256::new();
|
let mut h = Sha256::new();
|
||||||
@ -68,7 +76,7 @@ fn main() {
|
|||||||
for b in cur_hash.iter() {
|
for b in cur_hash.iter() {
|
||||||
print!("{:02x}", b);
|
print!("{:02x}", b);
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
|
|
||||||
let mut digest = &cur_hash[..];
|
let mut digest = &cur_hash[..];
|
||||||
|
|
||||||
@ -82,13 +90,14 @@ fn main() {
|
|||||||
|
|
||||||
println!("Done creating a beacon RNG");
|
println!("Done creating a beacon RNG");
|
||||||
|
|
||||||
// Try to load `./challenge` from disk.
|
// Try to load challenge file from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open("challenge").expect("unable open `./challenge` in this directory");
|
.open(challenge_filename)
|
||||||
|
.expect("unable open challenge file in this directory");
|
||||||
|
|
||||||
{
|
{
|
||||||
let metadata = reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
|
let metadata = reader.metadata().expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||||
@ -99,18 +108,19 @@ fn main() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
||||||
|
|
||||||
// Create `./response` in this directory
|
// Create response file in this directory
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open("response").expect("unable to create `./response` in this directory");
|
.open(response_filename)
|
||||||
|
.expect("unable to create response file in this directory");
|
||||||
|
|
||||||
let required_output_length = match COMPRESS_THE_OUTPUT {
|
let required_output_length = match COMPRESS_THE_OUTPUT {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
@ -127,7 +137,7 @@ fn main() {
|
|||||||
|
|
||||||
println!("Calculating previous contribution hash...");
|
println!("Calculating previous contribution hash...");
|
||||||
|
|
||||||
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
let current_accumulator_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
||||||
|
|
||||||
{
|
{
|
||||||
println!("Contributing on top of the hash:");
|
println!("Contributing on top of the hash:");
|
||||||
@ -139,12 +149,12 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
|
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
|
||||||
|
|
||||||
writable_map.flush().expect("unable to write hash to `./response`");
|
writable_map.flush().expect("unable to write hash to response file");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct our keypair using the RNG we created above
|
// Construct our keypair using the RNG we created above
|
||||||
@ -154,7 +164,7 @@ fn main() {
|
|||||||
println!("Computing and writing your contribution, this could take a while...");
|
println!("Computing and writing your contribution, this could take a while...");
|
||||||
|
|
||||||
// this computes a transformation and writes it
|
// this computes a transformation and writes it
|
||||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
||||||
&readable_map,
|
&readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
INPUT_IS_COMPRESSED,
|
INPUT_IS_COMPRESSED,
|
||||||
@ -162,18 +172,18 @@ fn main() {
|
|||||||
CHECK_INPUT_CORRECTNESS,
|
CHECK_INPUT_CORRECTNESS,
|
||||||
&privkey
|
&privkey
|
||||||
).expect("must transform with the key");
|
).expect("must transform with the key");
|
||||||
println!("Finihsing writing your contribution to `./response`...");
|
println!("Finishing writing your contribution to response file...");
|
||||||
|
|
||||||
// Write the public key
|
// Write the public key
|
||||||
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key");
|
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key");
|
||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
||||||
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
let contribution_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||||
|
|
||||||
print!("Done!\n\n\
|
print!("Done!\n\n\
|
||||||
Your contribution has been written to `./response`\n\n\
|
Your contribution has been written to response file\n\n\
|
||||||
The BLAKE2b hash of `./response` is:\n");
|
The BLAKE2b hash of response file is:\n");
|
||||||
|
|
||||||
for line in contribution_hash.as_slice().chunks(16) {
|
for line in contribution_hash.as_slice().chunks(16) {
|
||||||
print!("\t");
|
print!("\t");
|
||||||
@ -183,7 +193,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Thank you for your participation, much appreciated! :)");
|
println!("Thank you for your participation, much appreciated! :)");
|
||||||
|
@ -4,10 +4,10 @@ extern crate memmap;
|
|||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate blake2;
|
extern crate blake2;
|
||||||
extern crate byteorder;
|
extern crate byteorder;
|
||||||
|
extern crate exitcode;
|
||||||
|
|
||||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
use powersoftau::batched_accumulator::{BatchedAccumulator};
|
||||||
use powersoftau::batched_accumulator::{BachedAccumulator};
|
|
||||||
use powersoftau::keypair::{keypair};
|
use powersoftau::keypair::{keypair};
|
||||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
||||||
|
|
||||||
@ -24,6 +24,14 @@ const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
|
|||||||
const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
|
const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 3 {
|
||||||
|
println!("Usage: \n<challenge_file> <response_file>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let challenge_filename = &args[1];
|
||||||
|
let response_filename = &args[2];
|
||||||
|
|
||||||
println!("Will contribute to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
println!("Will contribute to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
||||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
||||||
|
|
||||||
@ -65,13 +73,13 @@ fn main() {
|
|||||||
ChaChaRng::from_seed(&seed)
|
ChaChaRng::from_seed(&seed)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Try to load `./challenge` from disk.
|
// Try to load challenge file from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open("challenge").expect("unable open `./challenge` in this directory");
|
.open(challenge_filename)
|
||||||
|
.expect("unable open challenge file");
|
||||||
{
|
{
|
||||||
let metadata = reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
|
let metadata = reader.metadata().expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
let expected_challenge_length = match INPUT_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||||
@ -82,18 +90,19 @@ fn main() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
||||||
|
|
||||||
// Create `./response` in this directory
|
// Create response file in this directory
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open("response").expect("unable to create `./response` in this directory");
|
.open(response_filename)
|
||||||
|
.expect("unable to create response file");
|
||||||
|
|
||||||
let required_output_length = match COMPRESS_THE_OUTPUT {
|
let required_output_length = match COMPRESS_THE_OUTPUT {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
@ -111,7 +120,7 @@ fn main() {
|
|||||||
println!("Calculating previous contribution hash...");
|
println!("Calculating previous contribution hash...");
|
||||||
|
|
||||||
assert!(UseCompression::No == INPUT_IS_COMPRESSED, "Hashing the compressed file in not yet defined");
|
assert!(UseCompression::No == INPUT_IS_COMPRESSED, "Hashing the compressed file in not yet defined");
|
||||||
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
let current_accumulator_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
||||||
|
|
||||||
{
|
{
|
||||||
println!("`challenge` file contains decompressed points and has a hash:");
|
println!("`challenge` file contains decompressed points and has a hash:");
|
||||||
@ -123,12 +132,12 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
|
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
|
||||||
|
|
||||||
writable_map.flush().expect("unable to write hash to `./response`");
|
writable_map.flush().expect("unable to write hash to response file");
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -145,7 +154,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,7 +165,7 @@ fn main() {
|
|||||||
println!("Computing and writing your contribution, this could take a while...");
|
println!("Computing and writing your contribution, this could take a while...");
|
||||||
|
|
||||||
// this computes a transformation and writes it
|
// this computes a transformation and writes it
|
||||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
||||||
&readable_map,
|
&readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
INPUT_IS_COMPRESSED,
|
INPUT_IS_COMPRESSED,
|
||||||
@ -165,7 +174,7 @@ fn main() {
|
|||||||
&privkey
|
&privkey
|
||||||
).expect("must transform with the key");
|
).expect("must transform with the key");
|
||||||
|
|
||||||
println!("Finihsing writing your contribution to `./response`...");
|
println!("Finishing writing your contribution to response file...");
|
||||||
|
|
||||||
// Write the public key
|
// Write the public key
|
||||||
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key");
|
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key");
|
||||||
@ -174,11 +183,11 @@ fn main() {
|
|||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
||||||
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
let contribution_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||||
|
|
||||||
print!("Done!\n\n\
|
print!("Done!\n\n\
|
||||||
Your contribution has been written to `./response`\n\n\
|
Your contribution has been written to response file\n\n\
|
||||||
The BLAKE2b hash of `./response` is:\n");
|
The BLAKE2b hash of response file is:\n");
|
||||||
|
|
||||||
for line in contribution_hash.as_slice().chunks(16) {
|
for line in contribution_hash.as_slice().chunks(16) {
|
||||||
print!("\t");
|
print!("\t");
|
||||||
@ -188,7 +197,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Thank you for your participation, much appreciated! :)");
|
println!("Thank you for your participation, much appreciated! :)");
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
extern crate powersoftau;
|
extern crate powersoftau;
|
||||||
extern crate bellman_ce;
|
extern crate bellman_ce;
|
||||||
|
|
||||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
|
||||||
use powersoftau::accumulator::{Accumulator};
|
use powersoftau::accumulator::{Accumulator};
|
||||||
use powersoftau::utils::{blank_hash};
|
use powersoftau::utils::{blank_hash};
|
||||||
use powersoftau::parameters::{UseCompression};
|
use powersoftau::parameters::{UseCompression};
|
||||||
@ -12,23 +11,31 @@ use std::io::{Write, BufWriter};
|
|||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let writer = OpenOptions::new()
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 2 {
|
||||||
|
println!("Usage: \n<challenge_file>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let challenge_filename = &args[1];
|
||||||
|
|
||||||
|
let file = OpenOptions::new()
|
||||||
.read(false)
|
.read(false)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open("challenge").expect("unable to create `./challenge`");
|
.open(challenge_filename)
|
||||||
|
.expect("unable to create challenge file");
|
||||||
|
|
||||||
let mut writer = BufWriter::new(writer);
|
let mut writer = BufWriter::new(file);
|
||||||
|
|
||||||
// Write a blank BLAKE2b hash:
|
// Write a blank BLAKE2b hash:
|
||||||
writer.write_all(&blank_hash().as_slice()).expect("unable to write blank hash to `./challenge`");
|
writer.write_all(&blank_hash().as_slice()).expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
let parameters = Bn256CeremonyParameters{};
|
let parameters = Bn256CeremonyParameters{};
|
||||||
|
|
||||||
let acc: Accumulator<Bn256, _> = Accumulator::new(parameters);
|
let acc: Accumulator<Bn256, _> = Accumulator::new(parameters);
|
||||||
println!("Writing an empty accumulator to disk");
|
println!("Writing an empty accumulator to disk");
|
||||||
acc.serialize(&mut writer, UseCompression::No).expect("unable to write fresh accumulator to `./challenge`");
|
acc.serialize(&mut writer, UseCompression::No).expect("unable to write fresh accumulator to challenge file");
|
||||||
writer.flush().expect("unable to flush accumulator to disk");
|
writer.flush().expect("unable to flush accumulator to disk");
|
||||||
|
|
||||||
println!("Wrote a fresh accumulator to `./challenge`");
|
println!("Wrote a fresh accumulator to challenge file");
|
||||||
}
|
}
|
||||||
|
@ -2,9 +2,9 @@ extern crate powersoftau;
|
|||||||
extern crate bellman_ce;
|
extern crate bellman_ce;
|
||||||
extern crate memmap;
|
extern crate memmap;
|
||||||
|
|
||||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
|
||||||
use powersoftau::batched_accumulator::{BachedAccumulator};
|
use powersoftau::batched_accumulator::{BatchedAccumulator};
|
||||||
use powersoftau::parameters::{UseCompression};
|
use powersoftau::parameters::{UseCompression};
|
||||||
use powersoftau::utils::{blank_hash};
|
use powersoftau::utils::{blank_hash};
|
||||||
|
|
||||||
@ -18,6 +18,13 @@ use powersoftau::parameters::PowersOfTauParameters;
|
|||||||
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
|
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 2 {
|
||||||
|
println!("Usage: \n<challenge_file>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let challenge_filename = &args[1];
|
||||||
|
|
||||||
println!("Will generate an empty accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
println!("Will generate an empty accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
||||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
||||||
|
|
||||||
@ -25,7 +32,8 @@ fn main() {
|
|||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open("challenge").expect("unable to create `./challenge`");
|
.open(challenge_filename)
|
||||||
|
.expect("unable to create challenge file");
|
||||||
|
|
||||||
let expected_challenge_length = match COMPRESS_NEW_CHALLENGE {
|
let expected_challenge_length = match COMPRESS_NEW_CHALLENGE {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
@ -43,7 +51,7 @@ fn main() {
|
|||||||
// Write a blank BLAKE2b hash:
|
// Write a blank BLAKE2b hash:
|
||||||
let hash = blank_hash();
|
let hash = blank_hash();
|
||||||
(&mut writable_map[0..]).write(hash.as_slice()).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..]).write(hash.as_slice()).expect("unable to write a default hash to mmap");
|
||||||
writable_map.flush().expect("unable to write blank hash to `./challenge`");
|
writable_map.flush().expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
println!("Blank hash for an empty challenge:");
|
println!("Blank hash for an empty challenge:");
|
||||||
for line in hash.as_slice().chunks(16) {
|
for line in hash.as_slice().chunks(16) {
|
||||||
@ -54,15 +62,15 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, COMPRESS_NEW_CHALLENGE).expect("generation of initial accumulator is successful");
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, COMPRESS_NEW_CHALLENGE).expect("generation of initial accumulator is successful");
|
||||||
writable_map.flush().expect("unable to flush memmap to disk");
|
writable_map.flush().expect("unable to flush memmap to disk");
|
||||||
|
|
||||||
// Get the hash of the contribution, so the user can compare later
|
// Get the hash of the contribution, so the user can compare later
|
||||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
||||||
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
let contribution_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||||
|
|
||||||
println!("Empty contribution is formed with a hash:");
|
println!("Empty contribution is formed with a hash:");
|
||||||
|
|
||||||
@ -74,8 +82,8 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Wrote a fresh accumulator to `./challenge`");
|
println!("Wrote a fresh accumulator to challenge file");
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ extern crate bellman_ce;
|
|||||||
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use bellman_ce::pairing::bn256::{G1, G2};
|
use bellman_ce::pairing::bn256::{G1, G2};
|
||||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||||
use powersoftau::batched_accumulator::*;
|
use powersoftau::batched_accumulator::*;
|
||||||
use powersoftau::*;
|
use powersoftau::*;
|
||||||
|
|
||||||
@ -29,14 +29,21 @@ fn log_2(x: u64) -> u32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
// Try to load `./response` from disk.
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 2 {
|
||||||
|
println!("Usage: \n<response_filename>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let response_filename = &args[1];
|
||||||
|
|
||||||
|
// Try to load response file from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open("response")
|
.open(response_filename)
|
||||||
.expect("unable open `./response` in this directory");
|
.expect("unable open response file in this directory");
|
||||||
let response_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let response_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
||||||
|
|
||||||
let current_accumulator = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::deserialize(
|
let current_accumulator = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::deserialize(
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::Yes,
|
UseCompression::Yes,
|
||||||
|
@ -7,7 +7,7 @@ extern crate bellman_ce;
|
|||||||
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
use bellman_ce::pairing::{CurveAffine, CurveProjective};
|
||||||
use bellman_ce::pairing::bn256::Bn256;
|
use bellman_ce::pairing::bn256::Bn256;
|
||||||
use bellman_ce::pairing::bn256::{G1, G2};
|
use bellman_ce::pairing::bn256::{G1, G2};
|
||||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||||
use powersoftau::batched_accumulator::*;
|
use powersoftau::batched_accumulator::*;
|
||||||
use powersoftau::accumulator::HashWriter;
|
use powersoftau::accumulator::HashWriter;
|
||||||
use powersoftau::*;
|
use powersoftau::*;
|
||||||
@ -36,7 +36,7 @@ fn log_2(x: u64) -> u32 {
|
|||||||
// given the current state of the accumulator and the last
|
// given the current state of the accumulator and the last
|
||||||
// response file hash.
|
// response file hash.
|
||||||
fn get_challenge_file_hash(
|
fn get_challenge_file_hash(
|
||||||
acc: &mut BachedAccumulator::<Bn256, Bn256CeremonyParameters>,
|
acc: &mut BatchedAccumulator::<Bn256, Bn256CeremonyParameters>,
|
||||||
last_response_file_hash: &[u8; 64],
|
last_response_file_hash: &[u8; 64],
|
||||||
is_initial: bool,
|
is_initial: bool,
|
||||||
) -> [u8; 64]
|
) -> [u8; 64]
|
||||||
@ -61,10 +61,10 @@ fn get_challenge_file_hash(
|
|||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(&last_response_file_hash[..]).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..]).write(&last_response_file_hash[..]).expect("unable to write a default hash to mmap");
|
||||||
writable_map.flush().expect("unable to write blank hash to `./challenge`");
|
writable_map.flush().expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
if is_initial {
|
if is_initial {
|
||||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, UseCompression::No).expect("generation of initial accumulator is successful");
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, UseCompression::No).expect("generation of initial accumulator is successful");
|
||||||
} else {
|
} else {
|
||||||
acc.serialize(
|
acc.serialize(
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
@ -95,7 +95,7 @@ fn get_challenge_file_hash(
|
|||||||
// accumulator, the player's public key, and the challenge
|
// accumulator, the player's public key, and the challenge
|
||||||
// file's hash.
|
// file's hash.
|
||||||
fn get_response_file_hash(
|
fn get_response_file_hash(
|
||||||
acc: &mut BachedAccumulator::<Bn256, Bn256CeremonyParameters>,
|
acc: &mut BatchedAccumulator::<Bn256, Bn256CeremonyParameters>,
|
||||||
pubkey: &PublicKey::<Bn256>,
|
pubkey: &PublicKey::<Bn256>,
|
||||||
last_challenge_file_hash: &[u8; 64]
|
last_challenge_file_hash: &[u8; 64]
|
||||||
) -> [u8; 64]
|
) -> [u8; 64]
|
||||||
@ -119,7 +119,7 @@ fn get_response_file_hash(
|
|||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
||||||
|
|
||||||
(&mut writable_map[0..]).write(&last_challenge_file_hash[..]).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..]).write(&last_challenge_file_hash[..]).expect("unable to write a default hash to mmap");
|
||||||
writable_map.flush().expect("unable to write blank hash to `./challenge`");
|
writable_map.flush().expect("unable to write blank hash to challenge file");
|
||||||
|
|
||||||
acc.serialize(
|
acc.serialize(
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
@ -147,7 +147,7 @@ fn get_response_file_hash(
|
|||||||
tmp
|
tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_accumulator_for_verify() -> BachedAccumulator<Bn256, Bn256CeremonyParameters> {
|
fn new_accumulator_for_verify() -> BatchedAccumulator<Bn256, Bn256CeremonyParameters> {
|
||||||
let file_name = "tmp_initial_challenge";
|
let file_name = "tmp_initial_challenge";
|
||||||
{
|
{
|
||||||
if Path::new(file_name).exists() {
|
if Path::new(file_name).exists() {
|
||||||
@ -158,22 +158,24 @@ fn new_accumulator_for_verify() -> BachedAccumulator<Bn256, Bn256CeremonyParamet
|
|||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open(file_name).expect("unable to create `./tmp_initial_challenge`");
|
.open(file_name)
|
||||||
|
.expect("unable to create `./tmp_initial_challenge`");
|
||||||
|
|
||||||
let expected_challenge_length = Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE;
|
let expected_challenge_length = Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE;
|
||||||
file.set_len(expected_challenge_length as u64).expect("unable to allocate large enough file");
|
file.set_len(expected_challenge_length as u64).expect("unable to allocate large enough file");
|
||||||
|
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&file).expect("unable to create a memory map") };
|
let mut writable_map = unsafe { MmapOptions::new().map_mut(&file).expect("unable to create a memory map") };
|
||||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, UseCompression::No).expect("generation of initial accumulator is successful");
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, UseCompression::No).expect("generation of initial accumulator is successful");
|
||||||
writable_map.flush().expect("unable to flush memmap to disk");
|
writable_map.flush().expect("unable to flush memmap to disk");
|
||||||
}
|
}
|
||||||
|
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(file_name)
|
.open(file_name)
|
||||||
.expect("unable open `./transcript` in this directory");
|
.expect("unable open transcript file in this directory");
|
||||||
|
|
||||||
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
||||||
let initial_accumulator = BachedAccumulator::deserialize(
|
let initial_accumulator = BatchedAccumulator::deserialize(
|
||||||
&readable_map,
|
&readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::No,
|
UseCompression::No,
|
||||||
@ -183,11 +185,19 @@ fn new_accumulator_for_verify() -> BachedAccumulator<Bn256, Bn256CeremonyParamet
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
// Try to load `./transcript` from disk.
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 2 {
|
||||||
|
println!("Usage: \n<transcript_file>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let transcript_filename = &args[1];
|
||||||
|
|
||||||
|
// Try to load transcript file from disk.
|
||||||
let reader = OpenOptions::new()
|
let reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open("transcript")
|
.open(transcript_filename)
|
||||||
.expect("unable open `./transcript` in this directory");
|
.expect("unable open transcript file in this directory");
|
||||||
|
|
||||||
let transcript_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
let transcript_readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
||||||
|
|
||||||
// Initialize the accumulator
|
// Initialize the accumulator
|
||||||
@ -235,7 +245,7 @@ fn main() {
|
|||||||
// uncompressed form so that we can more efficiently
|
// uncompressed form so that we can more efficiently
|
||||||
// deserialize it.
|
// deserialize it.
|
||||||
|
|
||||||
let mut response_file_accumulator = BachedAccumulator::deserialize(
|
let mut response_file_accumulator = BatchedAccumulator::deserialize(
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
CheckForCorrectness::Yes,
|
CheckForCorrectness::Yes,
|
||||||
UseCompression::Yes,
|
UseCompression::Yes,
|
||||||
@ -264,7 +274,7 @@ fn main() {
|
|||||||
println!(" ... FAILED");
|
println!(" ... FAILED");
|
||||||
panic!("INVALID RESPONSE FILE!");
|
panic!("INVALID RESPONSE FILE!");
|
||||||
} else {
|
} else {
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
current_accumulator = response_file_accumulator;
|
current_accumulator = response_file_accumulator;
|
||||||
|
@ -5,9 +5,8 @@ extern crate rand;
|
|||||||
extern crate blake2;
|
extern crate blake2;
|
||||||
extern crate byteorder;
|
extern crate byteorder;
|
||||||
|
|
||||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
use powersoftau::batched_accumulator::{BatchedAccumulator};
|
||||||
use powersoftau::batched_accumulator::{BachedAccumulator};
|
|
||||||
use powersoftau::keypair::{PublicKey};
|
use powersoftau::keypair::{PublicKey};
|
||||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
||||||
|
|
||||||
@ -24,15 +23,25 @@ const CONTRIBUTION_IS_COMPRESSED: UseCompression = UseCompression::Yes;
|
|||||||
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
|
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
let args: Vec<String> = std::env::args().collect();
|
||||||
|
if args.len() != 4 {
|
||||||
|
println!("Usage: \n<challenge_file> <response_file> <new_challenge_file>");
|
||||||
|
std::process::exit(exitcode::USAGE);
|
||||||
|
}
|
||||||
|
let challenge_filename = &args[1];
|
||||||
|
let response_filename = &args[2];
|
||||||
|
let new_challenge_filename = &args[3];
|
||||||
|
|
||||||
println!("Will verify and decompress a contribution to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
println!("Will verify and decompress a contribution to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
||||||
|
|
||||||
// Try to load `./challenge` from disk.
|
// Try to load challenge file from disk.
|
||||||
let challenge_reader = OpenOptions::new()
|
let challenge_reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open("challenge").expect("unable open `./challenge` in this directory");
|
.open(challenge_filename)
|
||||||
|
.expect("unable open challenge file in this directory");
|
||||||
|
|
||||||
{
|
{
|
||||||
let metadata = challenge_reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
|
let metadata = challenge_reader.metadata().expect("unable to get filesystem metadata for challenge file");
|
||||||
let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED {
|
let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||||
@ -42,19 +51,20 @@ fn main() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
if metadata.len() != (expected_challenge_length as u64) {
|
if metadata.len() != (expected_challenge_length as u64) {
|
||||||
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
panic!("The size of challenge file should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let challenge_readable_map = unsafe { MmapOptions::new().map(&challenge_reader).expect("unable to create a memory map for input") };
|
let challenge_readable_map = unsafe { MmapOptions::new().map(&challenge_reader).expect("unable to create a memory map for input") };
|
||||||
|
|
||||||
// Try to load `./response` from disk.
|
// Try to load response file from disk.
|
||||||
let response_reader = OpenOptions::new()
|
let response_reader = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open("response").expect("unable open `./response` in this directory");
|
.open(response_filename)
|
||||||
|
.expect("unable open response file in this directory");
|
||||||
|
|
||||||
{
|
{
|
||||||
let metadata = response_reader.metadata().expect("unable to get filesystem metadata for `./response`");
|
let metadata = response_reader.metadata().expect("unable to get filesystem metadata for response file");
|
||||||
let expected_response_length = match CONTRIBUTION_IS_COMPRESSED {
|
let expected_response_length = match CONTRIBUTION_IS_COMPRESSED {
|
||||||
UseCompression::Yes => {
|
UseCompression::Yes => {
|
||||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||||
@ -64,7 +74,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
if metadata.len() != (expected_response_length as u64) {
|
if metadata.len() != (expected_response_length as u64) {
|
||||||
panic!("The size of `./response` should be {}, but it's {}, so something isn't right.", expected_response_length, metadata.len());
|
panic!("The size of response file should be {}, but it's {}, so something isn't right.", expected_response_length, metadata.len());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,7 +84,7 @@ fn main() {
|
|||||||
|
|
||||||
// Check that contribution is correct
|
// Check that contribution is correct
|
||||||
|
|
||||||
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&challenge_readable_map);
|
let current_accumulator_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&challenge_readable_map);
|
||||||
|
|
||||||
println!("Hash of the `challenge` file for verification:");
|
println!("Hash of the `challenge` file for verification:");
|
||||||
for line in current_accumulator_hash.as_slice().chunks(16) {
|
for line in current_accumulator_hash.as_slice().chunks(16) {
|
||||||
@ -85,7 +95,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the hash chain - a new response must be based on the previous challenge!
|
// Check the hash chain - a new response must be based on the previous challenge!
|
||||||
@ -103,7 +113,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
if &response_challenge_hash[..] != current_accumulator_hash.as_slice() {
|
if &response_challenge_hash[..] != current_accumulator_hash.as_slice() {
|
||||||
@ -111,9 +121,9 @@ fn main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let response_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&response_readable_map);
|
let response_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&response_readable_map);
|
||||||
|
|
||||||
println!("Hash of the `response` file for verification:");
|
println!("Hash of the response file for verification:");
|
||||||
for line in response_hash.as_slice().chunks(16) {
|
for line in response_hash.as_slice().chunks(16) {
|
||||||
print!("\t");
|
print!("\t");
|
||||||
for section in line.chunks(4) {
|
for section in line.chunks(4) {
|
||||||
@ -122,7 +132,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the contributor's public key
|
// get the contributor's public key
|
||||||
@ -134,7 +144,7 @@ fn main() {
|
|||||||
|
|
||||||
println!("Verifying a contribution to contain proper powers and correspond to the public key...");
|
println!("Verifying a contribution to contain proper powers and correspond to the public key...");
|
||||||
|
|
||||||
let valid = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::verify_transformation(
|
let valid = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::verify_transformation(
|
||||||
&challenge_readable_map,
|
&challenge_readable_map,
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
&public_key,
|
&public_key,
|
||||||
@ -153,20 +163,21 @@ fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if COMPRESS_NEW_CHALLENGE == UseCompression::Yes {
|
if COMPRESS_NEW_CHALLENGE == UseCompression::Yes {
|
||||||
println!("Don't need to recompress the contribution, please copy `./response` as `./new_challenge`");
|
println!("Don't need to recompress the contribution, please copy response file as new challenge");
|
||||||
} else {
|
} else {
|
||||||
println!("Verification succeeded! Writing to `./new_challenge`...");
|
println!("Verification succeeded! Writing to new challenge file...");
|
||||||
|
|
||||||
// Create `./new_challenge` in this directory
|
// Create new challenge file in this directory
|
||||||
let writer = OpenOptions::new()
|
let writer = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
.open("new_challenge").expect("unable to create `./new_challenge` in this directory");
|
.open(new_challenge_filename)
|
||||||
|
.expect("unable to create new challenge file in this directory");
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
|
// Recomputation strips the public key and uses hashing to link with the previous contribution after decompression
|
||||||
writer.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough");
|
writer.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough");
|
||||||
|
|
||||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
||||||
@ -174,10 +185,10 @@ fn main() {
|
|||||||
{
|
{
|
||||||
(&mut writable_map[0..]).write(response_hash.as_slice()).expect("unable to write a default hash to mmap");
|
(&mut writable_map[0..]).write(response_hash.as_slice()).expect("unable to write a default hash to mmap");
|
||||||
|
|
||||||
writable_map.flush().expect("unable to write hash to `./new_challenge`");
|
writable_map.flush().expect("unable to write hash to new challenge file");
|
||||||
}
|
}
|
||||||
|
|
||||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::decompress(
|
BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::decompress(
|
||||||
&response_readable_map,
|
&response_readable_map,
|
||||||
&mut writable_map,
|
&mut writable_map,
|
||||||
CheckForCorrectness::No).expect("must decompress a response for a new challenge");
|
CheckForCorrectness::No).expect("must decompress a response for a new challenge");
|
||||||
@ -186,9 +197,9 @@ fn main() {
|
|||||||
|
|
||||||
let new_challenge_readable_map = writable_map.make_read_only().expect("must make a map readonly");
|
let new_challenge_readable_map = writable_map.make_read_only().expect("must make a map readonly");
|
||||||
|
|
||||||
let recompressed_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&new_challenge_readable_map);
|
let recompressed_hash = BatchedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&new_challenge_readable_map);
|
||||||
|
|
||||||
println!("Here's the BLAKE2b hash of the decompressed participant's response as `new_challenge` file:");
|
println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:");
|
||||||
|
|
||||||
for line in recompressed_hash.as_slice().chunks(16) {
|
for line in recompressed_hash.as_slice().chunks(16) {
|
||||||
print!("\t");
|
print!("\t");
|
||||||
@ -198,10 +209,10 @@ fn main() {
|
|||||||
}
|
}
|
||||||
print!(" ");
|
print!(" ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Done! `./new_challenge` contains the new challenge file. The other files");
|
println!("Done! new challenge file contains the new challenge file. The other files");
|
||||||
println!("were left alone.");
|
println!("were left alone.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,809 +0,0 @@
|
|||||||
//! This ceremony constructs the "powers of tau" for Jens Groth's 2016 zk-SNARK proving
|
|
||||||
//! system using the BLS12-381 pairing-friendly elliptic curve construction.
|
|
||||||
//!
|
|
||||||
//! # Overview
|
|
||||||
//!
|
|
||||||
//! Participants of the ceremony receive a "challenge" file containing:
|
|
||||||
//!
|
|
||||||
//! * the BLAKE2b hash of the last file entered into the transcript
|
|
||||||
//! * an `Accumulator` (with curve points encoded in uncompressed form for fast deserialization)
|
|
||||||
//!
|
|
||||||
//! The participant runs a tool which generates a random keypair (`PublicKey`, `PrivateKey`)
|
|
||||||
//! used for modifying the `Accumulator` from the "challenge" file. The keypair is then used to
|
|
||||||
//! transform the `Accumulator`, and a "response" file is generated containing:
|
|
||||||
//!
|
|
||||||
//! * the BLAKE2b hash of the "challenge" file (thus forming a hash chain over the entire transcript)
|
|
||||||
//! * an `Accumulator` (with curve points encoded in compressed form for fast uploading)
|
|
||||||
//! * the `PublicKey`
|
|
||||||
//!
|
|
||||||
//! This "challenge" file is entered into the protocol transcript. A given transcript is valid
|
|
||||||
//! if the transformations between consecutive `Accumulator`s verify with their respective
|
|
||||||
//! `PublicKey`s. Participants (and the public) can ensure that their contribution to the
|
|
||||||
//! `Accumulator` was accepted by ensuring the transcript contains their "response" file, ideally
|
|
||||||
//! by comparison of the BLAKE2b hash of the "response" file.
|
|
||||||
//!
|
|
||||||
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
|
|
||||||
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
|
|
||||||
//! public parameters for all circuits within a bounded size.
|
|
||||||
|
|
||||||
extern crate pairing;
|
|
||||||
extern crate rand;
|
|
||||||
extern crate crossbeam;
|
|
||||||
extern crate num_cpus;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate generic_array;
|
|
||||||
extern crate typenum;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman;
|
|
||||||
|
|
||||||
use byteorder::{ReadBytesExt, BigEndian};
|
|
||||||
use rand::{SeedableRng, Rng, Rand};
|
|
||||||
use rand::chacha::ChaChaRng;
|
|
||||||
use bellman::pairing::bls12_381::*;
|
|
||||||
use bellman::pairing::*;
|
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use generic_array::GenericArray;
|
|
||||||
use typenum::consts::U64;
|
|
||||||
use blake2::{Blake2b, Digest};
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
// This ceremony is based on the BLS12-381 elliptic curve construction.
|
|
||||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 96;
|
|
||||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 192;
|
|
||||||
const G1_COMPRESSED_BYTE_SIZE: usize = 48;
|
|
||||||
const G2_COMPRESSED_BYTE_SIZE: usize = 96;
|
|
||||||
|
|
||||||
/// The accumulator supports circuits with 2^21 multiplication gates.
|
|
||||||
const TAU_POWERS_LENGTH: usize = (1 << 21);
|
|
||||||
|
|
||||||
/// More tau powers are needed in G1 because the Groth16 H query
|
|
||||||
/// includes terms of the form tau^i * (tau^m - 1) = tau^(i+m) - tau^i
|
|
||||||
/// where the largest i = m - 2, requiring the computation of tau^(2m - 2)
|
|
||||||
/// and thus giving us a vector length of 2^22 - 1.
|
|
||||||
const TAU_POWERS_G1_LENGTH: usize = (TAU_POWERS_LENGTH << 1) - 1;
|
|
||||||
|
|
||||||
/// The size of the accumulator on disk.
|
|
||||||
pub const ACCUMULATOR_BYTE_SIZE: usize = (TAU_POWERS_G1_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) + // g1 tau powers
|
|
||||||
(TAU_POWERS_LENGTH * G2_UNCOMPRESSED_BYTE_SIZE) + // g2 tau powers
|
|
||||||
(TAU_POWERS_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) + // alpha tau powers
|
|
||||||
(TAU_POWERS_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) // beta tau powers
|
|
||||||
+ G2_UNCOMPRESSED_BYTE_SIZE // beta in g2
|
|
||||||
+ 64; // blake2b hash of previous contribution
|
|
||||||
|
|
||||||
/// The "public key" is used to verify a contribution was correctly
|
|
||||||
/// computed.
|
|
||||||
pub const PUBLIC_KEY_SIZE: usize = 3 * G2_UNCOMPRESSED_BYTE_SIZE + // tau, alpha, and beta in g2
|
|
||||||
6 * G1_UNCOMPRESSED_BYTE_SIZE; // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1
|
|
||||||
|
|
||||||
/// The size of the contribution on disk.
|
|
||||||
pub const CONTRIBUTION_BYTE_SIZE: usize = (TAU_POWERS_G1_LENGTH * G1_COMPRESSED_BYTE_SIZE) + // g1 tau powers
|
|
||||||
(TAU_POWERS_LENGTH * G2_COMPRESSED_BYTE_SIZE) + // g2 tau powers
|
|
||||||
(TAU_POWERS_LENGTH * G1_COMPRESSED_BYTE_SIZE) + // alpha tau powers
|
|
||||||
(TAU_POWERS_LENGTH * G1_COMPRESSED_BYTE_SIZE) // beta tau powers
|
|
||||||
+ G2_COMPRESSED_BYTE_SIZE // beta in g2
|
|
||||||
+ 64 // blake2b hash of input accumulator
|
|
||||||
+ PUBLIC_KEY_SIZE; // public key
|
|
||||||
|
|
||||||
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
|
|
||||||
/// than 32 bytes.
|
|
||||||
fn hash_to_g2(mut digest: &[u8]) -> G2
|
|
||||||
{
|
|
||||||
assert!(digest.len() >= 32);
|
|
||||||
|
|
||||||
let mut seed = Vec::with_capacity(8);
|
|
||||||
|
|
||||||
for _ in 0..8 {
|
|
||||||
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
|
|
||||||
}
|
|
||||||
|
|
||||||
ChaChaRng::from_seed(&seed).gen()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_hash_to_g2() {
|
|
||||||
assert!(
|
|
||||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])
|
|
||||||
==
|
|
||||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34])
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(
|
|
||||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])
|
|
||||||
!=
|
|
||||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33])
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
|
|
||||||
/// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity.
|
|
||||||
///
|
|
||||||
/// The elements in G2 are used to verify transformations of the accumulator. By its nature, the public key proves
|
|
||||||
/// knowledge of τ, α and β.
|
|
||||||
///
|
|
||||||
/// It is necessary to verify `same_ratio`((s<sub>1</sub>, s<sub>1</sub><sup>x</sup>), (H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)).
|
|
||||||
#[derive(PartialEq, Eq)]
|
|
||||||
pub struct PublicKey {
|
|
||||||
tau_g1: (G1Affine, G1Affine),
|
|
||||||
alpha_g1: (G1Affine, G1Affine),
|
|
||||||
beta_g1: (G1Affine, G1Affine),
|
|
||||||
tau_g2: G2Affine,
|
|
||||||
alpha_g2: G2Affine,
|
|
||||||
beta_g2: G2Affine
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains the secrets τ, α and β that the participant of the ceremony must destroy.
|
|
||||||
pub struct PrivateKey {
|
|
||||||
tau: Fr,
|
|
||||||
alpha: Fr,
|
|
||||||
beta: Fr
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Constructs a keypair given an RNG and a 64-byte transcript `digest`.
|
|
||||||
pub fn keypair<R: Rng>(rng: &mut R, digest: &[u8]) -> (PublicKey, PrivateKey)
|
|
||||||
{
|
|
||||||
assert_eq!(digest.len(), 64);
|
|
||||||
|
|
||||||
let tau = Fr::rand(rng);
|
|
||||||
let alpha = Fr::rand(rng);
|
|
||||||
let beta = Fr::rand(rng);
|
|
||||||
|
|
||||||
let mut op = |x, personalization: u8| {
|
|
||||||
// Sample random g^s
|
|
||||||
let g1_s = G1::rand(rng).into_affine();
|
|
||||||
// Compute g^{s*x}
|
|
||||||
let g1_s_x = g1_s.mul(x).into_affine();
|
|
||||||
// Compute BLAKE2b(personalization | transcript | g^s | g^{s*x})
|
|
||||||
let h = {
|
|
||||||
let mut h = Blake2b::default();
|
|
||||||
h.input(&[personalization]);
|
|
||||||
h.input(digest);
|
|
||||||
h.input(g1_s.into_uncompressed().as_ref());
|
|
||||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
|
||||||
h.result()
|
|
||||||
};
|
|
||||||
// Hash into G2 as g^{s'}
|
|
||||||
let g2_s = hash_to_g2(h.as_ref()).into_affine();
|
|
||||||
// Compute g^{s'*x}
|
|
||||||
let g2_s_x = g2_s.mul(x).into_affine();
|
|
||||||
|
|
||||||
((g1_s, g1_s_x), g2_s_x)
|
|
||||||
};
|
|
||||||
|
|
||||||
let pk_tau = op(tau, 0);
|
|
||||||
let pk_alpha = op(alpha, 1);
|
|
||||||
let pk_beta = op(beta, 2);
|
|
||||||
|
|
||||||
(
|
|
||||||
PublicKey {
|
|
||||||
tau_g1: pk_tau.0,
|
|
||||||
alpha_g1: pk_alpha.0,
|
|
||||||
beta_g1: pk_beta.0,
|
|
||||||
tau_g2: pk_tau.1,
|
|
||||||
alpha_g2: pk_alpha.1,
|
|
||||||
beta_g2: pk_beta.1,
|
|
||||||
},
|
|
||||||
PrivateKey {
|
|
||||||
tau: tau,
|
|
||||||
alpha: alpha,
|
|
||||||
beta: beta
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_point<W, G>(
|
|
||||||
writer: &mut W,
|
|
||||||
p: &G,
|
|
||||||
compression: UseCompression
|
|
||||||
) -> io::Result<()>
|
|
||||||
where W: Write,
|
|
||||||
G: CurveAffine
|
|
||||||
{
|
|
||||||
match compression {
|
|
||||||
UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()),
|
|
||||||
UseCompression::No => writer.write_all(p.into_uncompressed().as_ref()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Errors that might occur during deserialization.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum DeserializationError {
|
|
||||||
IoError(io::Error),
|
|
||||||
DecodingError(GroupDecodingError),
|
|
||||||
PointAtInfinity
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for DeserializationError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e),
|
|
||||||
DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e),
|
|
||||||
DeserializationError::PointAtInfinity => write!(f, "Point at infinity found")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<io::Error> for DeserializationError {
|
|
||||||
fn from(err: io::Error) -> DeserializationError {
|
|
||||||
DeserializationError::IoError(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<GroupDecodingError> for DeserializationError {
|
|
||||||
fn from(err: GroupDecodingError) -> DeserializationError {
|
|
||||||
DeserializationError::DecodingError(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PublicKey {
|
|
||||||
/// Serialize the public key. Points are always in uncompressed form.
|
|
||||||
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>
|
|
||||||
{
|
|
||||||
write_point(writer, &self.tau_g1.0, UseCompression::No)?;
|
|
||||||
write_point(writer, &self.tau_g1.1, UseCompression::No)?;
|
|
||||||
|
|
||||||
write_point(writer, &self.alpha_g1.0, UseCompression::No)?;
|
|
||||||
write_point(writer, &self.alpha_g1.1, UseCompression::No)?;
|
|
||||||
|
|
||||||
write_point(writer, &self.beta_g1.0, UseCompression::No)?;
|
|
||||||
write_point(writer, &self.beta_g1.1, UseCompression::No)?;
|
|
||||||
|
|
||||||
write_point(writer, &self.tau_g2, UseCompression::No)?;
|
|
||||||
write_point(writer, &self.alpha_g2, UseCompression::No)?;
|
|
||||||
write_point(writer, &self.beta_g2, UseCompression::No)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize the public key. Points are always in uncompressed form, and
|
|
||||||
/// always checked, since there aren't very many of them. Does not allow any
|
|
||||||
/// points at infinity.
|
|
||||||
pub fn deserialize<R: Read>(reader: &mut R) -> Result<PublicKey, DeserializationError>
|
|
||||||
{
|
|
||||||
fn read_uncompressed<C: CurveAffine, R: Read>(reader: &mut R) -> Result<C, DeserializationError> {
|
|
||||||
let mut repr = C::Uncompressed::empty();
|
|
||||||
reader.read_exact(repr.as_mut())?;
|
|
||||||
let v = repr.into_affine()?;
|
|
||||||
|
|
||||||
if v.is_zero() {
|
|
||||||
Err(DeserializationError::PointAtInfinity)
|
|
||||||
} else {
|
|
||||||
Ok(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let tau_g1_s = read_uncompressed(reader)?;
|
|
||||||
let tau_g1_s_tau = read_uncompressed(reader)?;
|
|
||||||
|
|
||||||
let alpha_g1_s = read_uncompressed(reader)?;
|
|
||||||
let alpha_g1_s_alpha = read_uncompressed(reader)?;
|
|
||||||
|
|
||||||
let beta_g1_s = read_uncompressed(reader)?;
|
|
||||||
let beta_g1_s_beta = read_uncompressed(reader)?;
|
|
||||||
|
|
||||||
let tau_g2 = read_uncompressed(reader)?;
|
|
||||||
let alpha_g2 = read_uncompressed(reader)?;
|
|
||||||
let beta_g2 = read_uncompressed(reader)?;
|
|
||||||
|
|
||||||
Ok(PublicKey {
|
|
||||||
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
|
||||||
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
|
||||||
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
|
||||||
tau_g2: tau_g2,
|
|
||||||
alpha_g2: alpha_g2,
|
|
||||||
beta_g2: beta_g2
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_pubkey_serialization() {
|
|
||||||
use rand::thread_rng;
|
|
||||||
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
|
||||||
let (pk, _) = keypair(rng, &digest);
|
|
||||||
let mut v = vec![];
|
|
||||||
pk.serialize(&mut v).unwrap();
|
|
||||||
assert_eq!(v.len(), PUBLIC_KEY_SIZE);
|
|
||||||
let deserialized = PublicKey::deserialize(&mut &v[..]).unwrap();
|
|
||||||
assert!(pk == deserialized);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The `Accumulator` is an object that participants of the ceremony contribute
|
|
||||||
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
|
|
||||||
/// fixed generators, and additionally in G1 over two other generators of exponents
|
|
||||||
/// `alpha` and `beta` over those fixed generators. In other words:
|
|
||||||
///
|
|
||||||
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
|
||||||
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
|
||||||
#[derive(PartialEq, Eq, Clone)]
|
|
||||||
pub struct Accumulator {
|
|
||||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
|
||||||
pub tau_powers_g1: Vec<G1Affine>,
|
|
||||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
|
||||||
pub tau_powers_g2: Vec<G2Affine>,
|
|
||||||
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
|
|
||||||
pub alpha_tau_powers_g1: Vec<G1Affine>,
|
|
||||||
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
|
|
||||||
pub beta_tau_powers_g1: Vec<G1Affine>,
|
|
||||||
/// beta
|
|
||||||
pub beta_g2: G2Affine
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Accumulator {
|
|
||||||
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Accumulator {
|
|
||||||
tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_G1_LENGTH],
|
|
||||||
tau_powers_g2: vec![G2Affine::one(); TAU_POWERS_LENGTH],
|
|
||||||
alpha_tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_LENGTH],
|
|
||||||
beta_tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_LENGTH],
|
|
||||||
beta_g2: G2Affine::one()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write the accumulator with some compression behavior.
|
|
||||||
pub fn serialize<W: Write>(
|
|
||||||
&self,
|
|
||||||
writer: &mut W,
|
|
||||||
compression: UseCompression
|
|
||||||
) -> io::Result<()>
|
|
||||||
{
|
|
||||||
fn write_all<W: Write, C: CurveAffine>(
|
|
||||||
writer: &mut W,
|
|
||||||
c: &[C],
|
|
||||||
compression: UseCompression
|
|
||||||
) -> io::Result<()>
|
|
||||||
{
|
|
||||||
for c in c {
|
|
||||||
write_point(writer, c, compression)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
write_all(writer, &self.tau_powers_g1, compression)?;
|
|
||||||
write_all(writer, &self.tau_powers_g2, compression)?;
|
|
||||||
write_all(writer, &self.alpha_tau_powers_g1, compression)?;
|
|
||||||
write_all(writer, &self.beta_tau_powers_g1, compression)?;
|
|
||||||
write_all(writer, &[self.beta_g2], compression)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read the accumulator from disk with some compression behavior. `checked`
|
|
||||||
/// indicates whether we should check it's a valid element of the group and
|
|
||||||
/// not the point at infinity.
|
|
||||||
pub fn deserialize<R: Read>(
|
|
||||||
reader: &mut R,
|
|
||||||
compression: UseCompression,
|
|
||||||
checked: CheckForCorrectness
|
|
||||||
) -> Result<Self, DeserializationError>
|
|
||||||
{
|
|
||||||
fn read_all<R: Read, C: CurveAffine>(
|
|
||||||
reader: &mut R,
|
|
||||||
size: usize,
|
|
||||||
compression: UseCompression,
|
|
||||||
checked: CheckForCorrectness
|
|
||||||
) -> Result<Vec<C>, DeserializationError>
|
|
||||||
{
|
|
||||||
fn decompress_all<R: Read, E: EncodedPoint>(
|
|
||||||
reader: &mut R,
|
|
||||||
size: usize,
|
|
||||||
checked: CheckForCorrectness
|
|
||||||
) -> Result<Vec<E::Affine>, DeserializationError>
|
|
||||||
{
|
|
||||||
// Read the encoded elements
|
|
||||||
let mut res = vec![E::empty(); size];
|
|
||||||
|
|
||||||
for encoded in &mut res {
|
|
||||||
reader.read_exact(encoded.as_mut())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate space for the deserialized elements
|
|
||||||
let mut res_affine = vec![E::Affine::zero(); size];
|
|
||||||
|
|
||||||
let mut chunk_size = res.len() / num_cpus::get();
|
|
||||||
if chunk_size == 0 {
|
|
||||||
chunk_size = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If any of our threads encounter a deserialization/IO error, catch
|
|
||||||
// it with this.
|
|
||||||
let decoding_error = Arc::new(Mutex::new(None));
|
|
||||||
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
|
|
||||||
let decoding_error = decoding_error.clone();
|
|
||||||
|
|
||||||
scope.spawn(move || {
|
|
||||||
for (source, target) in source.iter().zip(target.iter_mut()) {
|
|
||||||
match {
|
|
||||||
// If we're a participant, we don't need to check all of the
|
|
||||||
// elements in the accumulator, which saves a lot of time.
|
|
||||||
// The hash chain prevents this from being a problem: the
|
|
||||||
// transcript guarantees that the accumulator was properly
|
|
||||||
// formed.
|
|
||||||
match checked {
|
|
||||||
CheckForCorrectness::Yes => {
|
|
||||||
// Points at infinity are never expected in the accumulator
|
|
||||||
source.into_affine().map_err(|e| e.into()).and_then(|source| {
|
|
||||||
if source.is_zero() {
|
|
||||||
Err(DeserializationError::PointAtInfinity)
|
|
||||||
} else {
|
|
||||||
Ok(source)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
},
|
|
||||||
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
Ok(source) => {
|
|
||||||
*target = source;
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
*decoding_error.lock().unwrap() = Some(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
|
|
||||||
Some(e) => {
|
|
||||||
Err(e)
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
Ok(res_affine)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match compression {
|
|
||||||
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
|
|
||||||
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let tau_powers_g1 = read_all(reader, TAU_POWERS_G1_LENGTH, compression, checked)?;
|
|
||||||
let tau_powers_g2 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
|
||||||
let alpha_tau_powers_g1 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
|
||||||
let beta_tau_powers_g1 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
|
||||||
let beta_g2 = read_all(reader, 1, compression, checked)?[0];
|
|
||||||
|
|
||||||
Ok(Accumulator {
|
|
||||||
tau_powers_g1: tau_powers_g1,
|
|
||||||
tau_powers_g2: tau_powers_g2,
|
|
||||||
alpha_tau_powers_g1: alpha_tau_powers_g1,
|
|
||||||
beta_tau_powers_g1: beta_tau_powers_g1,
|
|
||||||
beta_g2: beta_g2
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Transforms the accumulator with a private key.
|
|
||||||
pub fn transform(&mut self, key: &PrivateKey)
|
|
||||||
{
|
|
||||||
// Construct the powers of tau
|
|
||||||
let mut taupowers = vec![Fr::zero(); TAU_POWERS_G1_LENGTH];
|
|
||||||
let chunk_size = TAU_POWERS_G1_LENGTH / num_cpus::get();
|
|
||||||
|
|
||||||
// Construct exponents in parallel
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
|
|
||||||
scope.spawn(move || {
|
|
||||||
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
|
|
||||||
|
|
||||||
for t in taupowers {
|
|
||||||
*t = acc;
|
|
||||||
acc.mul_assign(&key.tau);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
|
||||||
/// exponent.
|
|
||||||
fn batch_exp<C: CurveAffine>(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
|
|
||||||
assert_eq!(bases.len(), exp.len());
|
|
||||||
let mut projective = vec![C::Projective::zero(); bases.len()];
|
|
||||||
let chunk_size = bases.len() / num_cpus::get();
|
|
||||||
|
|
||||||
// Perform wNAF over multiple cores, placing results into `projective`.
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
|
|
||||||
.zip(exp.chunks(chunk_size))
|
|
||||||
.zip(projective.chunks_mut(chunk_size))
|
|
||||||
{
|
|
||||||
scope.spawn(move || {
|
|
||||||
let mut wnaf = Wnaf::new();
|
|
||||||
|
|
||||||
for ((base, exp), projective) in bases.iter_mut()
|
|
||||||
.zip(exp.iter())
|
|
||||||
.zip(projective.iter_mut())
|
|
||||||
{
|
|
||||||
let mut exp = *exp;
|
|
||||||
if let Some(coeff) = coeff {
|
|
||||||
exp.mul_assign(coeff);
|
|
||||||
}
|
|
||||||
|
|
||||||
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Perform batch normalization
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for projective in projective.chunks_mut(chunk_size)
|
|
||||||
{
|
|
||||||
scope.spawn(move || {
|
|
||||||
C::Projective::batch_normalization(projective);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Turn it all back into affine points
|
|
||||||
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
|
||||||
*affine = projective.into_affine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
batch_exp(&mut self.tau_powers_g1, &taupowers[0..], None);
|
|
||||||
batch_exp(&mut self.tau_powers_g2, &taupowers[0..TAU_POWERS_LENGTH], None);
|
|
||||||
batch_exp(&mut self.alpha_tau_powers_g1, &taupowers[0..TAU_POWERS_LENGTH], Some(&key.alpha));
|
|
||||||
batch_exp(&mut self.beta_tau_powers_g1, &taupowers[0..TAU_POWERS_LENGTH], Some(&key.beta));
|
|
||||||
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
|
||||||
pub fn verify_transform(before: &Accumulator, after: &Accumulator, key: &PublicKey, digest: &[u8]) -> bool
|
|
||||||
{
|
|
||||||
assert_eq!(digest.len(), 64);
|
|
||||||
|
|
||||||
let compute_g2_s = |g1_s: G1Affine, g1_s_x: G1Affine, personalization: u8| {
|
|
||||||
let mut h = Blake2b::default();
|
|
||||||
h.input(&[personalization]);
|
|
||||||
h.input(digest);
|
|
||||||
h.input(g1_s.into_uncompressed().as_ref());
|
|
||||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
|
||||||
hash_to_g2(h.result().as_ref()).into_affine()
|
|
||||||
};
|
|
||||||
|
|
||||||
let tau_g2_s = compute_g2_s(key.tau_g1.0, key.tau_g1.1, 0);
|
|
||||||
let alpha_g2_s = compute_g2_s(key.alpha_g1.0, key.alpha_g1.1, 1);
|
|
||||||
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
|
|
||||||
|
|
||||||
// Check the proofs-of-knowledge for tau/alpha/beta
|
|
||||||
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the correctness of the generators for tau powers
|
|
||||||
if after.tau_powers_g1[0] != G1Affine::one() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if after.tau_powers_g2[0] != G2Affine::one() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Did the participant multiply the previous tau by the new one?
|
|
||||||
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Did the participant multiply the previous alpha by the new one?
|
|
||||||
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Did the participant multiply the previous beta by the new one?
|
|
||||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Are the powers of tau correct?
|
|
||||||
if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Computes a random linear combination over v1/v2.
|
|
||||||
///
|
|
||||||
/// Checking that many pairs of elements are exponentiated by
|
|
||||||
/// the same `x` can be achieved (with high probability) with
|
|
||||||
/// the following technique:
|
|
||||||
///
|
|
||||||
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
|
|
||||||
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
|
|
||||||
/// random r1, r2, r3. Given (g, g^s)...
|
|
||||||
///
|
|
||||||
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
|
|
||||||
///
|
|
||||||
/// ... with high probability.
|
|
||||||
fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G)
|
|
||||||
{
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use rand::{thread_rng};
|
|
||||||
|
|
||||||
assert_eq!(v1.len(), v2.len());
|
|
||||||
|
|
||||||
let chunk = (v1.len() / num_cpus::get()) + 1;
|
|
||||||
|
|
||||||
let s = Arc::new(Mutex::new(G::Projective::zero()));
|
|
||||||
let sx = Arc::new(Mutex::new(G::Projective::zero()));
|
|
||||||
|
|
||||||
crossbeam::scope(|scope| {
|
|
||||||
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
|
|
||||||
let s = s.clone();
|
|
||||||
let sx = sx.clone();
|
|
||||||
|
|
||||||
scope.spawn(move || {
|
|
||||||
// We do not need to be overly cautious of the RNG
|
|
||||||
// used for this check.
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
|
|
||||||
let mut wnaf = Wnaf::new();
|
|
||||||
let mut local_s = G::Projective::zero();
|
|
||||||
let mut local_sx = G::Projective::zero();
|
|
||||||
|
|
||||||
for (v1, v2) in v1.iter().zip(v2.iter()) {
|
|
||||||
let rho = G::Scalar::rand(rng);
|
|
||||||
let mut wnaf = wnaf.scalar(rho.into_repr());
|
|
||||||
let v1 = wnaf.base(v1.into_projective());
|
|
||||||
let v2 = wnaf.base(v2.into_projective());
|
|
||||||
|
|
||||||
local_s.add_assign(&v1);
|
|
||||||
local_sx.add_assign(&v2);
|
|
||||||
}
|
|
||||||
|
|
||||||
s.lock().unwrap().add_assign(&local_s);
|
|
||||||
sx.lock().unwrap().add_assign(&local_sx);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let s = s.lock().unwrap().into_affine();
|
|
||||||
let sx = sx.lock().unwrap().into_affine();
|
|
||||||
|
|
||||||
(s, sx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Construct a single pair (s, s^x) for a vector of
|
|
||||||
/// the form [1, x, x^2, x^3, ...].
|
|
||||||
fn power_pairs<G: CurveAffine>(v: &[G]) -> (G, G)
|
|
||||||
{
|
|
||||||
merge_pairs(&v[0..(v.len()-1)], &v[1..])
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_power_pairs() {
|
|
||||||
use rand::thread_rng;
|
|
||||||
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
|
|
||||||
let mut v = vec![];
|
|
||||||
let x = Fr::rand(rng);
|
|
||||||
let mut acc = Fr::one();
|
|
||||||
for _ in 0..100 {
|
|
||||||
v.push(G1Affine::one().mul(acc).into_affine());
|
|
||||||
acc.mul_assign(&x);
|
|
||||||
}
|
|
||||||
|
|
||||||
let gx = G2Affine::one().mul(x).into_affine();
|
|
||||||
|
|
||||||
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
|
||||||
|
|
||||||
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
|
|
||||||
|
|
||||||
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if pairs have the same ratio.
|
|
||||||
fn same_ratio<G1: CurveAffine>(
|
|
||||||
g1: (G1, G1),
|
|
||||||
g2: (G1::Pair, G1::Pair)
|
|
||||||
) -> bool
|
|
||||||
{
|
|
||||||
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_same_ratio() {
|
|
||||||
use rand::thread_rng;
|
|
||||||
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
|
|
||||||
let s = Fr::rand(rng);
|
|
||||||
let g1 = G1Affine::one();
|
|
||||||
let g2 = G2Affine::one();
|
|
||||||
let g1_s = g1.mul(s).into_affine();
|
|
||||||
let g2_s = g2.mul(s).into_affine();
|
|
||||||
|
|
||||||
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
|
|
||||||
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_accumulator_serialization() {
|
|
||||||
use rand::thread_rng;
|
|
||||||
|
|
||||||
let rng = &mut thread_rng();
|
|
||||||
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut acc = Accumulator::new();
|
|
||||||
let before = acc.clone();
|
|
||||||
let (pk, sk) = keypair(rng, &digest);
|
|
||||||
acc.transform(&sk);
|
|
||||||
assert!(verify_transform(&before, &acc, &pk, &digest));
|
|
||||||
digest[0] = !digest[0];
|
|
||||||
assert!(!verify_transform(&before, &acc, &pk, &digest));
|
|
||||||
let mut v = Vec::with_capacity(ACCUMULATOR_BYTE_SIZE - 64);
|
|
||||||
acc.serialize(&mut v, UseCompression::No).unwrap();
|
|
||||||
assert_eq!(v.len(), ACCUMULATOR_BYTE_SIZE - 64);
|
|
||||||
let deserialized = Accumulator::deserialize(&mut &v[..], UseCompression::No, CheckForCorrectness::No).unwrap();
|
|
||||||
assert!(acc == deserialized);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute BLAKE2b("")
|
|
||||||
pub fn blank_hash() -> GenericArray<u8, U64> {
|
|
||||||
Blake2b::new().result()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Abstraction over a reader which hashes the data being read.
|
|
||||||
pub struct HashReader<R: Read> {
|
|
||||||
reader: R,
|
|
||||||
hasher: Blake2b
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R: Read> HashReader<R> {
|
|
||||||
/// Construct a new `HashReader` given an existing `reader` by value.
|
|
||||||
pub fn new(reader: R) -> Self {
|
|
||||||
HashReader {
|
|
||||||
reader: reader,
|
|
||||||
hasher: Blake2b::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Destroy this reader and return the hash of what was read.
|
|
||||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
|
||||||
self.hasher.result()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<R: Read> Read for HashReader<R> {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
let bytes = self.reader.read(buf)?;
|
|
||||||
|
|
||||||
if bytes > 0 {
|
|
||||||
self.hasher.input(&buf[0..bytes]);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
@ -30,7 +30,13 @@ pub struct Bn256CeremonyParameters {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PowersOfTauParameters for Bn256CeremonyParameters {
|
impl PowersOfTauParameters for Bn256CeremonyParameters {
|
||||||
const REQUIRED_POWER: usize = 26; // generate to have roughly 64 million constraints
|
#[cfg(not(feature = "smalltest"))]
|
||||||
|
const REQUIRED_POWER: usize = 28;
|
||||||
|
|
||||||
|
#[cfg(feature = "smalltest")]
|
||||||
|
const REQUIRED_POWER: usize = 10;
|
||||||
|
#[cfg(feature = "smalltest")]
|
||||||
|
const EMPIRICAL_BATCH_SIZE: usize = 1 << 8;
|
||||||
|
|
||||||
// This ceremony is based on the BN256 elliptic curve construction.
|
// This ceremony is based on the BN256 elliptic curve construction.
|
||||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
|
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
|
||||||
|
@ -70,9 +70,9 @@ pub fn keypair<R: Rng, E: Engine>(rng: &mut R, digest: &[u8]) -> (PublicKey<E>,
|
|||||||
{
|
{
|
||||||
assert_eq!(digest.len(), 64);
|
assert_eq!(digest.len(), 64);
|
||||||
|
|
||||||
// tau is a conribution to the "powers of tau", in a set of points of the form "tau^i * G"
|
// tau is a contribution to the "powers of tau", in a set of points of the form "tau^i * G"
|
||||||
let tau = E::Fr::rand(rng);
|
let tau = E::Fr::rand(rng);
|
||||||
// alpha and beta are a set of conrtibuitons in a form "alpha * tau^i * G" and that are required
|
// alpha and beta are a set of contributions in a form "alpha * tau^i * G" and that are required
|
||||||
// for construction of the polynomials
|
// for construction of the polynomials
|
||||||
let alpha = E::Fr::rand(rng);
|
let alpha = E::Fr::rand(rng);
|
||||||
let beta = E::Fr::rand(rng);
|
let beta = E::Fr::rand(rng);
|
||||||
@ -99,7 +99,7 @@ pub fn keypair<R: Rng, E: Engine>(rng: &mut R, digest: &[u8]) -> (PublicKey<E>,
|
|||||||
((g1_s, g1_s_x), g2_s_x)
|
((g1_s, g1_s_x), g2_s_x)
|
||||||
};
|
};
|
||||||
|
|
||||||
// these "public keys" are requried for for next participants to check that points are in fact
|
// these "public keys" are required for for next participants to check that points are in fact
|
||||||
// sequential powers
|
// sequential powers
|
||||||
let pk_tau = op(tau, 0);
|
let pk_tau = op(tau, 0);
|
||||||
let pk_alpha = op(alpha, 1);
|
let pk_alpha = op(alpha, 1);
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
#![allow(unused_imports)]
|
#![allow(unused_imports)]
|
||||||
|
|
||||||
// pub mod bls12_381;
|
|
||||||
pub mod bn256;
|
pub mod bn256;
|
||||||
pub mod small_bn256;
|
|
||||||
pub mod accumulator;
|
pub mod accumulator;
|
||||||
pub mod batched_accumulator;
|
pub mod batched_accumulator;
|
||||||
pub mod keypair;
|
pub mod keypair;
|
||||||
|
@ -1,40 +0,0 @@
|
|||||||
extern crate rand;
|
|
||||||
extern crate crossbeam;
|
|
||||||
extern crate num_cpus;
|
|
||||||
extern crate blake2;
|
|
||||||
extern crate generic_array;
|
|
||||||
extern crate typenum;
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bellman_ce;
|
|
||||||
|
|
||||||
use self::bellman_ce::pairing::ff::{Field, PrimeField};
|
|
||||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
|
||||||
use self::rand::{SeedableRng, Rng, Rand};
|
|
||||||
use self::rand::chacha::ChaChaRng;
|
|
||||||
use self::bellman_ce::pairing::bn256::{Bn256};
|
|
||||||
use self::bellman_ce::pairing::*;
|
|
||||||
use std::io::{self, Read, Write};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use self::generic_array::GenericArray;
|
|
||||||
use self::typenum::consts::U64;
|
|
||||||
use self::blake2::{Blake2b, Digest};
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use crate::parameters::*;
|
|
||||||
use crate::keypair::*;
|
|
||||||
use crate::utils::*;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Bn256CeremonyParameters {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PowersOfTauParameters for Bn256CeremonyParameters {
|
|
||||||
const REQUIRED_POWER: usize = 28;
|
|
||||||
|
|
||||||
// This ceremony is based on the BN256 elliptic curve construction.
|
|
||||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
|
|
||||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128;
|
|
||||||
const G1_COMPRESSED_BYTE_SIZE: usize = 32;
|
|
||||||
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
|
||||||
}
|
|
@ -1,22 +1,25 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
rm challenge
|
rm challenge*
|
||||||
rm response
|
rm response*
|
||||||
rm new_challenge
|
rm transcript
|
||||||
rm challenge_old
|
|
||||||
rm response_old
|
|
||||||
rm phase1radix*
|
rm phase1radix*
|
||||||
|
rm tmp_*
|
||||||
|
|
||||||
cargo run --release --bin new_constrained
|
set -e
|
||||||
cargo run --release --bin compute_constrained
|
|
||||||
cargo run --release --bin verify_transform_constrained
|
|
||||||
|
|
||||||
mv challenge challenge_old
|
cargo run --release --features smalltest --bin new_constrained challenge1
|
||||||
mv response response_old
|
yes | cargo run --release --features smalltest --bin compute_constrained challenge1 response1
|
||||||
|
cargo run --release --features smalltest --bin verify_transform_constrained challenge1 response1 challenge2
|
||||||
|
|
||||||
mv new_challenge challenge
|
yes | cargo run --release --features smalltest --bin compute_constrained challenge2 response2
|
||||||
cargo run --release --bin beacon_constrained
|
cargo run --release --features smalltest --bin verify_transform_constrained challenge2 response2 challenge3
|
||||||
cargo run --release --bin verify_transform_constrained
|
|
||||||
|
|
||||||
cat response_old response > transcript
|
yes | cargo run --release --features smalltest --bin compute_constrained challenge3 response3
|
||||||
cargo run --release --bin verify
|
cargo run --release --features smalltest --bin verify_transform_constrained challenge3 response3 challenge4
|
||||||
|
|
||||||
|
cargo run --release --features smalltest --bin beacon_constrained challenge4 response4
|
||||||
|
cargo run --release --features smalltest --bin verify_transform_constrained challenge4 response4 challenge5
|
||||||
|
|
||||||
|
cat response1 response2 response3 response4 > transcript
|
||||||
|
cargo run --release --features smalltest --bin verify transcript
|
||||||
|
Loading…
Reference in New Issue
Block a user