Merge pull request #7 from matter-labs/sonic

- Make singlethreaded version as feature to be used already - Introduce features to later add full SONIC and GM17
This commit is contained in:
Alexander 2019-03-05 10:27:31 +01:00 committed by GitHub
commit 6e45a4b233
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 10255 additions and 747 deletions

@ -6,7 +6,8 @@ homepage = "https://github.com/matterinc/bellman"
license = "MIT/Apache-2.0"
name = "bellman"
repository = "https://github.com/matterinc/bellman"
version = "0.1.3"
version = "0.2.0"
edition = "2018"
[lib]
crate-type = ["cdylib", "lib", "staticlib"]
@ -15,12 +16,26 @@ crate-type = ["cdylib", "lib", "staticlib"]
rand = "0.4"
bit-vec = "0.4.4"
futures = "0.1"
futures-cpupool = "0.1"
num_cpus = "1"
crossbeam = "0.3"
pairing = { git = 'https://github.com/matterinc/pairing' }
pairing = { git = 'https://github.com/matterinc/pairing', tag = "0.16.2" }
#pairing = { path = "../pairing" }
byteorder = "1"
ff = { git = 'https://github.com/matterinc/ff', features = ["derive"] }
futures-cpupool = {version = "0.1", optional = true}
num_cpus = {version = "1", optional = true}
crossbeam = {version = "0.7.1", optional = true}
tiny-keccak = {version = "1.4.2", optional = true}
[dependencies.blake2-rfc]
git = "https://github.com/gtank/blake2-rfc"
rev = "7a5b5fc99ae483a0043db7547fb79a6fa44b88a9"
[features]
default = []
default = ["multicore"]
#default = ["multicore", "gm17", "sonic"]
#default = ["singlecore"]
multicore = ["futures-cpupool", "num_cpus", "crossbeam"]
sonic = ["tiny-keccak"]
gm17 = []
singlecore = []

411
src/cs.rs Normal file

@ -0,0 +1,411 @@
use pairing::{Engine};
use pairing::ff::Field;
use std::ops::{Add, Sub};
use std::fmt;
use std::error::Error;
use std::io;
use std::marker::PhantomData;
/// Computations are expressed in terms of arithmetic circuits, in particular
/// rank-1 quadratic constraint systems. The `Circuit` trait represents a
/// circuit that can be synthesized. The `synthesize` method is called during
/// CRS generation and during proving.
pub trait Circuit<E: Engine> {
/// Synthesize the circuit into a rank-1 quadratic constraint system
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>;
}
/// Represents a variable in our constraint system.
#[derive(Copy, Clone, Debug)]
pub struct Variable(pub(crate) Index);
impl Variable {
/// This constructs a variable with an arbitrary index.
/// Circuit implementations are not recommended to use this.
pub fn new_unchecked(idx: Index) -> Variable {
Variable(idx)
}
/// This returns the index underlying the variable.
/// Circuit implementations are not recommended to use this.
pub fn get_unchecked(&self) -> Index {
self.0
}
}
/// Represents the index of either an input variable or
/// auxillary variable.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Index {
Input(usize),
Aux(usize)
}
/// This represents a linear combination of some variables, with coefficients
/// in the scalar field of a pairing-friendly elliptic curve group.
#[derive(Clone)]
pub struct LinearCombination<E: Engine>(pub(crate) Vec<(Variable, E::Fr)>);
impl<E: Engine> AsRef<[(Variable, E::Fr)]> for LinearCombination<E> {
fn as_ref(&self) -> &[(Variable, E::Fr)] {
&self.0
}
}
impl<E: Engine> LinearCombination<E> {
pub fn zero() -> LinearCombination<E> {
LinearCombination(vec![])
}
}
impl<E: Engine> Add<(E::Fr, Variable)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, (coeff, var): (E::Fr, Variable)) -> LinearCombination<E> {
self.0.push((var, coeff));
self
}
}
impl<E: Engine> Sub<(E::Fr, Variable)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(self, (mut coeff, var): (E::Fr, Variable)) -> LinearCombination<E> {
coeff.negate();
self + (coeff, var)
}
}
impl<E: Engine> Add<Variable> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(self, other: Variable) -> LinearCombination<E> {
self + (E::Fr::one(), other)
}
}
impl<E: Engine> Sub<Variable> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(self, other: Variable) -> LinearCombination<E> {
self - (E::Fr::one(), other)
}
}
impl<'a, E: Engine> Add<&'a LinearCombination<E>> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, other: &'a LinearCombination<E>) -> LinearCombination<E> {
for s in &other.0 {
self = self + (s.1, s.0);
}
self
}
}
impl<'a, E: Engine> Sub<&'a LinearCombination<E>> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(mut self, other: &'a LinearCombination<E>) -> LinearCombination<E> {
for s in &other.0 {
self = self - (s.1, s.0);
}
self
}
}
impl<'a, E: Engine> Add<(E::Fr, &'a LinearCombination<E>)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, (coeff, other): (E::Fr, &'a LinearCombination<E>)) -> LinearCombination<E> {
for s in &other.0 {
let mut tmp = s.1;
tmp.mul_assign(&coeff);
self = self + (tmp, s.0);
}
self
}
}
impl<'a, E: Engine> Sub<(E::Fr, &'a LinearCombination<E>)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(mut self, (coeff, other): (E::Fr, &'a LinearCombination<E>)) -> LinearCombination<E> {
for s in &other.0 {
let mut tmp = s.1;
tmp.mul_assign(&coeff);
self = self - (tmp, s.0);
}
self
}
}
/// This is an error that could occur during circuit synthesis contexts,
/// such as CRS generation, proving or verification.
#[derive(Debug)]
pub enum SynthesisError {
/// During synthesis, we lacked knowledge of a variable assignment.
AssignmentMissing,
/// During synthesis, we divided by zero.
DivisionByZero,
/// During synthesis, we constructed an unsatisfiable constraint system.
Unsatisfiable,
/// During synthesis, our polynomials ended up being too high of degree
PolynomialDegreeTooLarge,
/// During proof generation, we encountered an identity in the CRS
UnexpectedIdentity,
/// During proof generation, we encountered an I/O error with the CRS
IoError(io::Error),
/// During verification, our verifying key was malformed.
MalformedVerifyingKey,
/// During CRS generation, we observed an unconstrained auxillary variable
UnconstrainedVariable
}
impl From<io::Error> for SynthesisError {
fn from(e: io::Error) -> SynthesisError {
SynthesisError::IoError(e)
}
}
impl Error for SynthesisError {
fn description(&self) -> &str {
match *self {
SynthesisError::AssignmentMissing => "an assignment for a variable could not be computed",
SynthesisError::DivisionByZero => "division by zero",
SynthesisError::Unsatisfiable => "unsatisfiable constraint system",
SynthesisError::PolynomialDegreeTooLarge => "polynomial degree is too large",
SynthesisError::UnexpectedIdentity => "encountered an identity element in the CRS",
SynthesisError::IoError(_) => "encountered an I/O error",
SynthesisError::MalformedVerifyingKey => "malformed verifying key",
SynthesisError::UnconstrainedVariable => "auxillary variable was unconstrained"
}
}
}
impl fmt::Display for SynthesisError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
if let &SynthesisError::IoError(ref e) = self {
write!(f, "I/O error: ")?;
e.fmt(f)
} else {
write!(f, "{}", self.description())
}
}
}
/// Represents a constraint system which can have new variables
/// allocated and constrains between them formed.
pub trait ConstraintSystem<E: Engine>: Sized {
/// Represents the type of the "root" of this constraint system
/// so that nested namespaces can minimize indirection.
type Root: ConstraintSystem<E>;
/// Return the "one" input variable
fn one() -> Variable {
Variable::new_unchecked(Index::Input(0))
}
/// Allocate a private variable in the constraint system. The provided function is used to
/// determine the assignment of the variable. The given `annotation` function is invoked
/// in testing contexts in order to derive a unique name for this variable in the current
/// namespace.
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>;
/// Allocate a public variable in the constraint system. The provided function is used to
/// determine the assignment of the variable.
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>;
/// Enforce that `A` * `B` = `C`. The `annotation` function is invoked in testing contexts
/// in order to derive a unique name for the constraint in the current namespace.
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>;
/// Create a new (sub)namespace and enter into it. Not intended
/// for downstream use; use `namespace` instead.
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR;
/// Exit out of the existing namespace. Not intended for
/// downstream use; use `namespace` instead.
fn pop_namespace(&mut self);
/// Gets the "root" constraint system, bypassing the namespacing.
/// Not intended for downstream use; use `namespace` instead.
fn get_root(&mut self) -> &mut Self::Root;
/// Begin a namespace for this constraint system.
fn namespace<'a, NR, N>(
&'a mut self,
name_fn: N
) -> Namespace<'a, E, Self::Root>
where NR: Into<String>, N: FnOnce() -> NR
{
self.get_root().push_namespace(name_fn);
Namespace(self.get_root(), PhantomData)
}
}
/// This is a "namespaced" constraint system which borrows a constraint system (pushing
/// a namespace context) and, when dropped, pops out of the namespace context.
pub struct Namespace<'a, E: Engine, CS: ConstraintSystem<E> + 'a>(&'a mut CS, PhantomData<E>);
impl<'cs, E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for Namespace<'cs, E, CS> {
type Root = CS::Root;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.0.alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.0.alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
self.0.enforce(annotation, a, b, c)
}
// Downstream users who use `namespace` will never interact with these
// functions and they will never be invoked because the namespace is
// never a root constraint system.
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
panic!("only the root's push_namespace should be called");
}
fn pop_namespace(&mut self)
{
panic!("only the root's pop_namespace should be called");
}
fn get_root(&mut self) -> &mut Self::Root
{
self.0.get_root()
}
}
impl<'a, E: Engine, CS: ConstraintSystem<E>> Drop for Namespace<'a, E, CS> {
fn drop(&mut self) {
self.get_root().pop_namespace()
}
}
/// Convenience implementation of ConstraintSystem<E> for mutable references to
/// constraint systems.
impl<'cs, E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for &'cs mut CS {
type Root = CS::Root;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
(**self).alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
(**self).alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
(**self).enforce(annotation, a, b, c)
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR
{
(**self).push_namespace(name_fn)
}
fn pop_namespace(&mut self)
{
(**self).pop_namespace()
}
fn get_root(&mut self) -> &mut Self::Root
{
(**self).get_root()
}
}

@ -15,7 +15,7 @@ use pairing::{
CurveProjective
};
use ff::{
use pairing::ff::{
Field,
PrimeField
};
@ -24,7 +24,8 @@ use super::{
SynthesisError
};
use super::multicore::Worker;
use super::worker::Worker;
pub use super::group::*;
pub struct EvaluationDomain<E: Engine, G: Group<E>> {
coeffs: Vec<G>,
@ -50,7 +51,7 @@ impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
pub fn from_coeffs(mut coeffs: Vec<G>) -> Result<EvaluationDomain<E, G>, SynthesisError>
{
use ff::PrimeField;
use pairing::ff::PrimeField;
// Compute the size of our evaluation domain
let coeffs_len = coeffs.len();
@ -97,6 +98,59 @@ impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
})
}
// this one does expect coefficients to be smaller than `num_roots_of_unity/2` as we expect multiplication
pub fn from_coeffs_into_sized(mut coeffs: Vec<G>, size: usize) -> Result<EvaluationDomain<E, G>, SynthesisError>
{
use pairing::ff::PrimeField;
// Compute the size of our evaluation domain
assert!(size >= coeffs.len());
let coeffs_len = size;
// m is a size of domain where Z polynomial does NOT vanish
// in normal domain Z is in a form of (X-1)(X-2)...(X-N)
let mut m = 1;
let mut exp = 0;
let mut omega = E::Fr::root_of_unity();
let max_degree = (1 << E::Fr::S) - 1;
if coeffs_len > max_degree {
return Err(SynthesisError::PolynomialDegreeTooLarge)
}
while m < coeffs_len {
m *= 2;
exp += 1;
// The pairing-friendly curve may not be able to support
// large enough (radix2) evaluation domains.
if exp > E::Fr::S {
return Err(SynthesisError::PolynomialDegreeTooLarge)
}
}
// If full domain is not needed - limit it,
// e.g. if (2^N)th power is not required, just double omega and get 2^(N-1)th
// Compute omega, the 2^exp primitive root of unity
for _ in exp..E::Fr::S {
omega.square();
}
// Extend the coeffs vector with zeroes if necessary
coeffs.resize(m, G::group_zero());
Ok(EvaluationDomain {
coeffs: coeffs,
exp: exp,
omega: omega,
omegainv: omega.inverse().unwrap(),
geninv: E::Fr::multiplicative_generator().inverse().unwrap(),
minv: E::Fr::from_str(&format!("{}", m)).unwrap().inverse().unwrap()
})
}
pub fn fft(&mut self, worker: &Worker)
{
best_fft(&mut self.coeffs, worker, &self.omega, self.exp);
@ -110,7 +164,7 @@ impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
let minv = self.minv;
for v in self.coeffs.chunks_mut(chunk) {
scope.spawn(move || {
scope.spawn(move |_| {
for v in v {
v.group_mul_assign(&minv);
}
@ -123,7 +177,7 @@ impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
{
worker.scope(self.coeffs.len(), |scope, chunk| {
for (i, v) in self.coeffs.chunks_mut(chunk).enumerate() {
scope.spawn(move || {
scope.spawn(move |_| {
let mut u = g.pow(&[(i * chunk) as u64]);
for v in v.iter_mut() {
v.group_mul_assign(&u);
@ -166,7 +220,7 @@ impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
worker.scope(self.coeffs.len(), |scope, chunk| {
for v in self.coeffs.chunks_mut(chunk) {
scope.spawn(move || {
scope.spawn(move |_| {
for v in v {
v.group_mul_assign(&i);
}
@ -181,7 +235,7 @@ impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
worker.scope(self.coeffs.len(), |scope, chunk| {
for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) {
scope.spawn(move || {
scope.spawn(move |_| {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_mul_assign(&b.0);
}
@ -196,7 +250,7 @@ impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
worker.scope(self.coeffs.len(), |scope, chunk| {
for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) {
scope.spawn(move || {
scope.spawn(move |_| {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_sub_assign(&b);
}
@ -206,76 +260,7 @@ impl<E: Engine, G: Group<E>> EvaluationDomain<E, G> {
}
}
pub trait Group<E: Engine>: Sized + Copy + Clone + Send + Sync {
fn group_zero() -> Self;
fn group_mul_assign(&mut self, by: &E::Fr);
fn group_add_assign(&mut self, other: &Self);
fn group_sub_assign(&mut self, other: &Self);
}
pub struct Point<G: CurveProjective>(pub G);
impl<G: CurveProjective> PartialEq for Point<G> {
fn eq(&self, other: &Point<G>) -> bool {
self.0 == other.0
}
}
impl<G: CurveProjective> Copy for Point<G> { }
impl<G: CurveProjective> Clone for Point<G> {
fn clone(&self) -> Point<G> {
*self
}
}
impl<G: CurveProjective> Group<G::Engine> for Point<G> {
fn group_zero() -> Self {
Point(G::zero())
}
fn group_mul_assign(&mut self, by: &G::Scalar) {
self.0.mul_assign(by.into_repr());
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}
pub struct Scalar<E: Engine>(pub E::Fr);
impl<E: Engine> PartialEq for Scalar<E> {
fn eq(&self, other: &Scalar<E>) -> bool {
self.0 == other.0
}
}
impl<E: Engine> Copy for Scalar<E> { }
impl<E: Engine> Clone for Scalar<E> {
fn clone(&self) -> Scalar<E> {
*self
}
}
impl<E: Engine> Group<E> for Scalar<E> {
fn group_zero() -> Self {
Scalar(E::Fr::zero())
}
fn group_mul_assign(&mut self, by: &E::Fr) {
self.0.mul_assign(by);
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}
fn best_fft<E: Engine, T: Group<E>>(a: &mut [T], worker: &Worker, omega: &E::Fr, log_n: u32)
pub(crate) fn best_fft<E: Engine, T: Group<E>>(a: &mut [T], worker: &Worker, omega: &E::Fr, log_n: u32)
{
let log_cpus = worker.log_num_cpus();
@ -286,7 +271,7 @@ fn best_fft<E: Engine, T: Group<E>>(a: &mut [T], worker: &Worker, omega: &E::Fr,
}
}
fn serial_fft<E: Engine, T: Group<E>>(a: &mut [T], omega: &E::Fr, log_n: u32)
pub(crate) fn serial_fft<E: Engine, T: Group<E>>(a: &mut [T], omega: &E::Fr, log_n: u32)
{
fn bitreverse(mut n: u32, l: u32) -> u32 {
let mut r = 0;
@ -331,7 +316,7 @@ fn serial_fft<E: Engine, T: Group<E>>(a: &mut [T], omega: &E::Fr, log_n: u32)
}
}
fn parallel_fft<E: Engine, T: Group<E>>(
pub(crate) fn parallel_fft<E: Engine, T: Group<E>>(
a: &mut [T],
worker: &Worker,
omega: &E::Fr,
@ -350,7 +335,7 @@ fn parallel_fft<E: Engine, T: Group<E>>(
let a = &*a;
for (j, tmp) in tmp.iter_mut().enumerate() {
scope.spawn(move || {
scope.spawn(move |_| {
// Shuffle into a sub-FFT
let omega_j = omega.pow(&[j as u64]);
let omega_step = omega.pow(&[(j as u64) << log_new_n]);
@ -378,7 +363,7 @@ fn parallel_fft<E: Engine, T: Group<E>>(
let tmp = &tmp;
for (idx, a) in a.chunks_mut(chunk).enumerate() {
scope.spawn(move || {
scope.spawn(move |_| {
let mut idx = idx * chunk;
let mask = (1 << log_cpus) - 1;
for a in a {
@ -518,7 +503,7 @@ fn test_field_element_multiplication_bn256() {
use num_cpus;
let cpus = num_cpus::get();
const SAMPLES: usize = 1 << 27;
const SAMPLES: usize = 1 << 22;
let rng = &mut rand::thread_rng();
let v1 = (0..SAMPLES).map(|_| Scalar::<Bn256>(Fr::rand(rng))).collect::<Vec<_>>();

700
src/gm17/generator.rs Normal file

@ -0,0 +1,700 @@
use super::super::verbose_flag;
use rand::Rng;
use std::sync::Arc;
use pairing::{
Engine,
Wnaf,
CurveProjective,
CurveAffine
};
use pairing::ff::{
PrimeField,
Field
};
use super::{
Parameters,
VerifyingKey
};
use crate::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use crate::domain::{
EvaluationDomain,
Scalar
};
use crate::worker::{
Worker
};
// /// Generates a random common reference string for
// /// a circuit.
// pub fn generate_random_parameters<E, C, R>(
// circuit: C,
// rng: &mut R
// ) -> Result<Parameters<E>, SynthesisError>
// where E: Engine, C: Circuit<E>, R: Rng
// {
// let g1 = rng.gen();
// let g2 = rng.gen();
// let alpha = rng.gen();
// let beta = rng.gen();
// let gamma = rng.gen();
// let delta = rng.gen();
// let tau = rng.gen();
// generate_parameters::<E, C>(
// circuit,
// g1,
// g2,
// alpha,
// beta,
// gamma,
// delta,
// tau
// )
// }
/// This is our assembly structure that we'll use to synthesize the
/// circuit into a SAP. Square arithmetic problem is different from QAP in a form:
/// it's A*A - C = 0 instead of A*B - C = 0
struct KeypairAssembly<E: Engine> {
num_inputs: usize,
num_aux: usize,
num_constraints: usize,
num_r1cs_aux: usize,
num_r1cs_constraints: usize,
at_inputs: Vec<Vec<(E::Fr, usize)>>,
ct_inputs: Vec<Vec<(E::Fr, usize)>>,
at_aux: Vec<Vec<(E::Fr, usize)>>,
ct_aux: Vec<Vec<(E::Fr, usize)>>
}
impl<E: Engine> ConstraintSystem<E> for KeypairAssembly<E> {
type Root = Self;
fn alloc<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_aux;
self.num_aux += 1;
self.num_r1cs_aux += 1;
self.at_aux.push(vec![]);
self.ct_aux.push(vec![]);
Ok(Variable(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
_: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_inputs;
self.num_inputs += 1;
self.at_inputs.push(vec![]);
self.ct_inputs.push(vec![]);
Ok(Variable(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
_: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
use std::ops::{Add, Sub};
// this is where reduction happens. First we need to re-arrange initial constraints
// from the form <a,x>*<b,x> = <c,x> to an artificial
// <a - b,x> * <a - b,x> = y
// <a + b,x> * <a + b,x> = 4*<c,x> + y
fn quadruple<E: Engine>(
coeff: E::Fr
) -> E::Fr {
let mut tmp = coeff;
tmp.double();
tmp.double();
tmp
}
fn eval<E: Engine>(
l: LinearCombination<E>,
inputs: &mut [Vec<(E::Fr, usize)>],
aux: &mut [Vec<(E::Fr, usize)>],
this_constraint: usize
)
{
for (index, coeff) in l.0 {
match index {
Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)),
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint))
}
}
}
// <a - b,x> * <a - b,x> = x_i
let i = self.num_constraints;
let y = self.alloc(
|| format!("SAP reduction y_{}", i),
|| Ok(E::Fr::one())
).expect("must allocate SAP reduction variable");
self.num_r1cs_aux -= 1;
let lc_a = a(LinearCombination::zero());
let lc_b = b(LinearCombination::zero());
let lc_c = c(LinearCombination::zero());
let lc_a_minus_b = lc_a.clone().sub(&lc_b);
let mut lc_y: LinearCombination<E> = LinearCombination::zero();
lc_y = lc_y.add(y);
eval(lc_a_minus_b, &mut self.at_inputs, &mut self.at_aux, self.num_constraints);
eval(lc_y, &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints);
self.num_constraints += 1;
// <a + b,x> * <a + b,x> = 4*<c,x> + y
let lc_a_plus_b = lc_a.add(&lc_b);
let mut lc_c_quadrupled: LinearCombination<E> = LinearCombination::zero();
for s in &lc_c.0 {
let tmp = quadruple::<E>(s.1);
lc_c_quadrupled = lc_c_quadrupled + (tmp, s.0);
}
lc_c_quadrupled = lc_c_quadrupled.add(y);
eval(lc_a_plus_b, &mut self.at_inputs, &mut self.at_aux, self.num_constraints);
eval(lc_c_quadrupled, &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints);
self.num_constraints += 1;
self.num_r1cs_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self)
{
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
/// Create parameters for a circuit, given some toxic waste.
pub fn generate_parameters<E, C>(
circuit: C,
g1: E::G1,
g2: E::G2,
alpha: E::Fr,
beta: E::Fr,
gamma: E::Fr,
// delta: E::Fr,
tau: E::Fr
) -> Result<(), SynthesisError>
// Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let verbose = verbose_flag();
let mut assembly = KeypairAssembly {
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
num_r1cs_aux: 0,
num_r1cs_constraints: 0,
at_inputs: vec![],
ct_inputs: vec![],
at_aux: vec![],
ct_aux: vec![]
};
// Allocate the "one" input variable
let input_0 = assembly.alloc_input(|| "", || Ok(E::Fr::one()))?;
// Synthesize the circuit.
circuit.synthesize(&mut assembly)?;
let num_inputs_without_identity = assembly.num_inputs - 1;
// inputs must be constrained manually in SAP style,
// so input 0 (identity) is constrained as 1*1=1
{
use std::ops::{Add, Sub};
fn eval_lc<E: Engine>(
l: LinearCombination<E>,
inputs: &mut [Vec<(E::Fr, usize)>],
aux: &mut [Vec<(E::Fr, usize)>],
this_constraint: usize
)
{
for (index, coeff) in l.0 {
match index {
Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)),
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint))
}
}
}
let mut lc_input_0_a: LinearCombination<E> = LinearCombination::zero();
lc_input_0_a = lc_input_0_a.add(input_0.clone());
eval_lc(lc_input_0_a, &mut assembly.at_inputs, &mut assembly.at_aux, assembly.num_constraints);
assembly.num_constraints += 1;
}
let num_constraints_before_inputs_constraining = assembly.num_constraints;
let num_aux_before_inputs_constraining = assembly.num_aux;
// Other inputs are constrained as x_i * 1 = x_i where
// 1 is actually input number 0 (identity)
for i in 1..assembly.num_inputs {
assembly.enforce(|| "",
|lc| lc + Variable(Index::Input(i)),
|lc| lc + Variable(Index::Input(0)),
|lc| lc + Variable(Index::Input(i)),
);
}
// check that each input generates 2 constraints
assert_eq!(num_inputs_without_identity * 2 +
num_constraints_before_inputs_constraining,
assembly.num_constraints,
"each input must produce two extra constraints");
// and that it creates one extra variable
assert_eq!(num_inputs_without_identity +
num_aux_before_inputs_constraining,
assembly.num_aux,
"each input must generate an extra variable");
assert_eq!(assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux,
assembly.num_inputs + assembly.num_aux,
"each constraint in principle adds one variable");
if verbose {eprintln!("Constraint system size is {}", assembly.num_constraints)};
// Create bases for blind evaluation of polynomials at tau
let powers_of_tau = vec![Scalar::<E>(E::Fr::zero()); assembly.num_constraints];
let mut domain = EvaluationDomain::from_coeffs(powers_of_tau)?;
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1_wnaf = g1_wnaf.base(g1, {
2*(assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux)
+ assembly.num_r1cs_constraints + assembly.num_r1cs_aux
+ 2*(assembly.num_inputs + assembly.num_r1cs_constraints)
});
// Compute gamma*G2 window table
let mut g2_wnaf = Wnaf::new();
// let gamma_g2 = g2.into_affine().mul(gamma.into_repr());
let g2_wnaf = g2_wnaf.base(g2, {
// B query
assembly.num_inputs + assembly.num_aux
// alternatively expressed as
// assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux
});
let worker = Worker::new();
// let z_at_tau = {
// // Compute powers of tau
// if verbose {eprintln!("computing powers of tau...")};
// let start = std::time::Instant::now();
// {
// let domain = domain.as_mut();
// worker.scope(domain.len(), |scope, chunk| {
// for (i, subdomain) in domain.chunks_mut(chunk).enumerate()
// {
// scope.spawn(move || {
// let mut current_power = tau.pow(&[(i*chunk) as u64]);
// for p in subdomain {
// p.0 = current_power;
// current_power.mul_assign(&tau);
// }
// });
// }
// });
// }
// if verbose {eprintln!("powers of tau stage 1 done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
// // z_at_tau = t(x)
// let z_at_tau = domain.z(&tau);
// z_at_tau
// };
let domain_length = domain.as_ref().len();
if verbose {eprintln!("Domain length is {} ", domain_length)};
// G1^{gamma^2 * Z(t) * t^i} for 0 <= i < 2^m - 1 for 2^m domains
let mut gamma2_z_t_g1 = vec![E::G1::zero(); domain.as_ref().len() - 1];
let mut z_at_tau = E::Fr::zero();
{
// Compute powers of tau
if verbose {eprintln!("computing powers of tau...")};
let start = std::time::Instant::now();
{
let domain = domain.as_mut();
worker.scope(domain.len(), |scope, chunk| {
for (i, subdomain) in domain.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = tau.pow(&[(i*chunk) as u64]);
for p in subdomain {
p.0 = current_power;
current_power.mul_assign(&tau);
}
});
}
});
}
if verbose {eprintln!("powers of tau stage 1 done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
// z_at_tau = t(x)
z_at_tau = domain.z(&tau);
let mut gamma2_z_t = z_at_tau;
gamma2_z_t.mul_assign(&gamma);
gamma2_z_t.mul_assign(&gamma);
if verbose {eprintln!("computing the `G1^(gamma^2 * Z(t) * t^i)` query with multiple threads...")};
let start = std::time::Instant::now();
// Compute the H query with multiple threads
worker.scope(gamma2_z_t_g1.len(), |scope, chunk| {
for (gamma2_z_t_g1, p) in gamma2_z_t_g1.chunks_mut(chunk).zip(domain.as_ref().chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move |_| {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
for (gamma2_z_t_g1, p) in gamma2_z_t_g1.iter_mut().zip(p.iter())
{
// Compute final exponent
let mut exp = p.0;
exp.mul_assign(&gamma2_z_t);
// Exponentiate
*gamma2_z_t_g1 = g1_wnaf.scalar(exp.into_repr());
}
// Batch normalize
E::G1::batch_normalization(gamma2_z_t_g1);
});
}
});
if verbose {eprintln!("computing the `G1^(gamma^2 * Z(t) * t^i)` query done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
}
// G1^{gamma * A_i(t)} for 0 <= i <= num_variables
let mut a_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
// G2^{gamma * A_i(t)} for 0 <= i <= num_variables
let mut a_g2 = vec![E::G2::zero(); assembly.num_inputs + assembly.num_aux];
// G1^{gamma^2 * C_i(t) + (alpha + beta) * gamma * A_i(t)}
// for num_inputs + 1 < i <= num_variables
let mut c_1_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
// G1^{2 * gamma^2 * Z(t) * A_i(t)} for 0 <= i <= num_variables
let mut c_2_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux];
// G1^{gamma * Z(t)}
let mut gamma_zt = gamma;
gamma_zt.mul_assign(&z_at_tau);
let gamma_z = g1.into_affine().mul(gamma.into_repr());
// G2^{gamma * Z(t)}
let gamma_z_g2 = g2.into_affine().mul(gamma.into_repr());
let mut ab_gamma = alpha;
ab_gamma.add_assign(&beta);
ab_gamma.mul_assign(&gamma);
// G1^{(alpha + beta) * gamma * Z(t)}
let ab_gamma_z_g1 = g1.into_affine().mul(ab_gamma.into_repr());
let mut gamma2_z2 = gamma;
gamma2_z2.mul_assign(&z_at_tau);
gamma2_z2.square();
// G1^{gamma^2 * Z(t)^2}
let gamma2_z2_g1 = g1.into_affine().mul(gamma2_z2.into_repr());
// G^{gamma^2 * Z(t) * t^i} for 0 <= i < 2^m - 1 for 2^m domains
let mut gamma2_z_t = vec![E::G1::zero(); domain.as_ref().len() - 1];
if verbose {eprintln!("using inverse FFT to convert to intepolation coefficients...")};
let start = std::time::Instant::now();
// Use inverse FFT to convert to intepolation coefficients
domain.ifft(&worker);
let powers_of_tau = domain.into_coeffs();
// domain is now a set of scalars
if verbose {eprintln!("powers of tau evaluation in radix2 domain in {} s", start.elapsed().as_millis() as f64 / 1000.0)};
if verbose {eprintln!("evaluating polynomials...")};
let start = std::time::Instant::now();
// overall strategy:
// a_g1, a_g2, c_1_g1, c_2_g1 should be combined together by computing
// ab = (alpha + beta)
// g_2 = gamma^2
// t0 = gamma*A_i(t)
// t1 = g_2*C_t(t)
// a_g1 = t0*G1
// a_g2 = t0*G2
// c_1_g1 = (t1 + ab*t0)*G1
// c_2_g1 = (2*gamma*z_at_tau*t0)*G1
fn eval_stage_1<E: Engine>(
// wNAF window tables
g1_wnaf: &Wnaf<usize, &[E::G1], &mut Vec<i64>>,
g2_wnaf: &Wnaf<usize, &[E::G2], &mut Vec<i64>>,
// powers of tau coefficients
powers_of_tau: &[Scalar<E>],
// SAP polynomials
at: &[Vec<(E::Fr, usize)>],
ct: &[Vec<(E::Fr, usize)>],
// Resulting evaluated SAP polynomials
a_g1: &mut [E::G1],
a_g2: &mut [E::G2],
c_1_g1: &mut [E::G1],
c_2_g1: &mut [E::G1],
// Trapdoors
alpha: &E::Fr,
beta: &E::Fr,
gamma: &E::Fr,
z_at_tau: &E::Fr,
// Worker
worker: &Worker
)
{
// Sanity check
assert_eq!(a_g1.len(), at.len());
assert_eq!(a_g1.len(), ct.len());
assert_eq!(a_g1.len(), a_g2.len());
assert_eq!(a_g1.len(), c_1_g1.len());
assert_eq!(a_g1.len(), c_2_g1.len());
// compute once
let mut ab = *alpha;
ab.add_assign(&beta);
let mut gamma2 = *gamma;
gamma2.square();
// Evaluate polynomials in multiple threads
worker.scope(a_g1.len(), |scope, chunk| {
for (((((a_g1, a_g2), c_1_g1), c_2_g1), at), ct) in a_g1.chunks_mut(chunk)
.zip(a_g2.chunks_mut(chunk))
.zip(c_1_g1.chunks_mut(chunk))
.zip(c_2_g1.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(ct.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move |_| {
for (((((a_g1, a_g2), c_1_g1), c_2_g1), at), ct) in a_g1.iter_mut()
.zip(a_g2.iter_mut())
.zip(c_1_g1.iter_mut())
.zip(c_2_g1.iter_mut())
.zip(at.iter())
.zip(ct.iter())
{
fn eval_at_tau<E: Engine>(
powers_of_tau: &[Scalar<E>],
p: &[(E::Fr, usize)]
) -> E::Fr
{
let mut acc = E::Fr::zero();
for &(ref coeff, index) in p {
let mut n = powers_of_tau[index].0;
n.mul_assign(coeff);
acc.add_assign(&n);
}
acc
}
// Evaluate SAP polynomials at tau
// t0 = gamma*A_i(t)
let mut t0 = eval_at_tau(powers_of_tau, at);
t0.mul_assign(&gamma);
// t1 = gamma^2*C_t(t)
let mut t1 = eval_at_tau(powers_of_tau, ct);
t1.mul_assign(&gamma2);
// a_g1 = t0*G1
// a_g2 = t0*G2
// c_1_g1 = (t1 + ab*t0)*G1
// c_2_g1 = (2*gamma*z_at_tau*t0)*G1
// Compute a_g1 and a_g2
if !t0.is_zero() {
*a_g1 = g1_wnaf.scalar(t0.into_repr());
*a_g2 = g2_wnaf.scalar(t0.into_repr());
}
let mut c_1_g1_factor = t0;
c_1_g1_factor.mul_assign(&ab);
c_1_g1_factor.add_assign(&t1);
// (2*gamma*z_at_tau*t0) inplace
t0.mul_assign(&z_at_tau);
t0.mul_assign(&gamma);
t0.double();
*c_1_g1 = g1_wnaf.scalar(c_1_g1_factor.into_repr());
*c_2_g1 = g1_wnaf.scalar(t0.into_repr());
}
// Batch normalize
E::G1::batch_normalization(a_g1);
E::G2::batch_normalization(a_g2);
E::G1::batch_normalization(c_1_g1);
E::G1::batch_normalization(c_2_g1);
});
};
});
}
// Evaluate for inputs.
eval_stage_1(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_inputs,
&assembly.ct_inputs,
&mut a_g1[0..assembly.num_inputs],
&mut a_g2[0..assembly.num_inputs],
&mut c_1_g1[0..assembly.num_inputs],
&mut c_2_g1[0..assembly.num_inputs],
&alpha,
&beta,
&gamma,
&z_at_tau,
&worker
);
// Evaluate for inputs.
eval_stage_1(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_aux,
&assembly.ct_aux,
&mut a_g1[assembly.num_inputs..],
&mut a_g2[assembly.num_inputs..],
&mut c_1_g1[assembly.num_inputs..],
&mut c_2_g1[assembly.num_inputs..],
&alpha,
&beta,
&gamma,
&z_at_tau,
&worker
);
// for _ in 0..assembly.num_inputs {
// c_1_g1.remove(0);
// }
if verbose {eprintln!("evaluating polynomials done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
// // Don't allow any elements be unconstrained, so that
// // the L query is always fully dense.
// for e in l.iter() {
// if e.is_zero() {
// return Err(SynthesisError::UnconstrainedVariable);
// }
// }
// let g1 = g1.into_affine();
// let g2 = g2.into_affine();
// let vk = VerifyingKey::<E> {
// alpha_g1: g1.mul(alpha).into_affine(),
// beta_g1: g1.mul(beta).into_affine(),
// beta_g2: g2.mul(beta).into_affine(),
// gamma_g2: g2.mul(gamma).into_affine(),
// delta_g1: g1.mul(delta).into_affine(),
// delta_g2: g2.mul(delta).into_affine(),
// ic: ic.into_iter().map(|e| e.into_affine()).collect()
// };
println!("Has generated {} points", a_g1.len());
Ok(())
// Ok(Parameters {
// vk: vk,
// h: Arc::new(h.into_iter().map(|e| e.into_affine()).collect()),
// l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()),
// // Filter points at infinity away from A/B queries
// a: Arc::new(a.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
// b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()),
// b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect())
// })
}

563
src/gm17/mod.rs Normal file

@ -0,0 +1,563 @@
use pairing::{
Engine,
CurveAffine,
EncodedPoint
};
use crate::{
SynthesisError
};
use crate::source::SourceBuilder;
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
#[cfg(test)]
mod tests;
mod generator;
// mod prover;
// mod verifier;
pub use self::generator::*;
// pub use self::prover::*;
// pub use self::verifier::*;
#[derive(Debug, Clone)]
pub struct Proof<E: Engine> {
pub a: E::G1Affine,
pub b: E::G2Affine,
pub c: E::G1Affine
}
impl<E: Engine> PartialEq for Proof<E> {
fn eq(&self, other: &Self) -> bool {
self.a == other.a &&
self.b == other.b &&
self.c == other.c
}
}
impl<E: Engine> Proof<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.a.into_compressed().as_ref())?;
writer.write_all(self.b.into_compressed().as_ref())?;
writer.write_all(self.c.into_compressed().as_ref())?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g1_repr = <E::G1Affine as CurveAffine>::Compressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Compressed::empty();
reader.read_exact(g1_repr.as_mut())?;
let a = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g2_repr.as_mut())?;
let b = g2_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let c = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
Ok(Proof {
a: a,
b: b,
c: c
})
}
}
#[derive(Clone)]
pub struct VerifyingKey<E: Engine> {
pub h_g2: E::G2Affine,
// alpha in g1 for verifying and for creating A/C elements of
// proof. Never the point at infinity.
pub alpha_g1: E::G1Affine,
// beta in g2 for verifying. Never the point at infinity.
pub beta_g2: E::G2Affine,
// gamma in g1 for verifying. Never the point at infinity.
pub gamma_g1: E::G1Affine,
// gamma in g2 for verifying. Never the point at infinity.
pub gamma_g2: E::G2Affine,
// Elements of the form G^{gamma * A_i(t) + (alpha + beta) * A_i(t)}
// for all public inputs. Because all public inputs have a dummy constraint,
// this is the same size as the number of inputs, and never contains points
// at infinity.
pub ic: Vec<E::G1Affine>
}
impl<E: Engine> PartialEq for VerifyingKey<E> {
fn eq(&self, other: &Self) -> bool {
self.h_g2 == other.h_g2 &&
self.alpha_g1 == other.alpha_g1 &&
self.beta_g2 == other.beta_g2 &&
self.gamma_g1 == other.gamma_g1 &&
self.gamma_g2 == other.gamma_g2 &&
self.ic == other.ic
}
}
impl<E: Engine> VerifyingKey<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.h_g2.into_uncompressed().as_ref())?;
writer.write_all(self.alpha_g1.into_uncompressed().as_ref())?;
writer.write_all(self.beta_g2.into_uncompressed().as_ref())?;
writer.write_all(self.gamma_g1.into_uncompressed().as_ref())?;
writer.write_all(self.gamma_g2.into_uncompressed().as_ref())?;
writer.write_u32::<BigEndian>(self.ic.len() as u32)?;
for ic in &self.ic {
writer.write_all(ic.into_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g1_repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
let mut g2_repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(g2_repr.as_mut())?;
let h_h2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g1_repr.as_mut())?;
let alpha_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let beta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g1_repr.as_mut())?;
let gamma_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let gamma_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let ic_len = reader.read_u32::<BigEndian>()? as usize;
let mut ic = vec![];
for _ in 0..ic_len {
reader.read_exact(g1_repr.as_mut())?;
let g1 = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
ic.push(g1);
}
Ok(VerifyingKey {
h_g2: h_h2,
alpha_g1: alpha_g1,
beta_g2: beta_g2,
gamma_g1: gamma_g1,
gamma_g2: gamma_g2,
ic: ic
})
}
}
#[derive(Clone)]
pub struct Parameters<E: Engine> {
pub vk: VerifyingKey<E>,
pub a_g1: Arc<Vec<E::G1Affine>>,
pub a_g2: Arc<Vec<E::G2Affine>>,
pub c_1_g1: Arc<Vec<E::G1Affine>>,
pub c_2_g1: Arc<Vec<E::G1Affine>>,
pub gamma_z: E::G1Affine,
pub gamma_z_g2: E::G2Affine,
pub ab_gamma_z_g1: E::G1Affine,
pub gamma2_z2_g1: E::G1Affine,
pub gamma2_z_t: Arc<Vec<E::G1Affine>>,
}
impl<E: Engine> PartialEq for Parameters<E> {
fn eq(&self, other: &Self) -> bool {
self.vk == other.vk &&
self.a_g1 == other.a_g1 &&
self.a_g2 == other.a_g2 &&
self.c_1_g1 == other.c_1_g1 &&
self.c_2_g1 == other.c_2_g1 &&
self.gamma_z == other.gamma_z &&
self.gamma_z_g2 == other.gamma_z_g2 &&
self.ab_gamma_z_g1 == other.ab_gamma_z_g1 &&
self.gamma2_z2_g1 == other.gamma2_z2_g1 &&
self.gamma2_z_t == other.gamma2_z_t
}
}
// impl<E: Engine> Parameters<E> {
// pub fn write<W: Write>(
// &self,
// mut writer: W
// ) -> io::Result<()>
// {
// self.vk.write(&mut writer)?;
// writer.write_u32::<BigEndian>(self.h.len() as u32)?;
// for g in &self.h[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// writer.write_u32::<BigEndian>(self.l.len() as u32)?;
// for g in &self.l[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// writer.write_u32::<BigEndian>(self.a.len() as u32)?;
// for g in &self.a[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// writer.write_u32::<BigEndian>(self.b_g1.len() as u32)?;
// for g in &self.b_g1[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// writer.write_u32::<BigEndian>(self.b_g2.len() as u32)?;
// for g in &self.b_g2[..] {
// writer.write_all(g.into_uncompressed().as_ref())?;
// }
// Ok(())
// }
// pub fn read<R: Read>(
// mut reader: R,
// checked: bool
// ) -> io::Result<Self>
// {
// let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
// let mut repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
// reader.read_exact(repr.as_mut())?;
// if checked {
// repr
// .into_affine()
// } else {
// repr
// .into_affine_unchecked()
// }
// .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
// .and_then(|e| if e.is_zero() {
// Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
// } else {
// Ok(e)
// })
// };
// let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
// let mut repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
// reader.read_exact(repr.as_mut())?;
// if checked {
// repr
// .into_affine()
// } else {
// repr
// .into_affine_unchecked()
// }
// .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
// .and_then(|e| if e.is_zero() {
// Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
// } else {
// Ok(e)
// })
// };
// let vk = VerifyingKey::<E>::read(&mut reader)?;
// let mut h = vec![];
// let mut l = vec![];
// let mut a = vec![];
// let mut b_g1 = vec![];
// let mut b_g2 = vec![];
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// h.push(read_g1(&mut reader)?);
// }
// }
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// l.push(read_g1(&mut reader)?);
// }
// }
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// a.push(read_g1(&mut reader)?);
// }
// }
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// b_g1.push(read_g1(&mut reader)?);
// }
// }
// {
// let len = reader.read_u32::<BigEndian>()? as usize;
// for _ in 0..len {
// b_g2.push(read_g2(&mut reader)?);
// }
// }
// Ok(Parameters {
// vk: vk,
// h: Arc::new(h),
// l: Arc::new(l),
// a: Arc::new(a),
// b_g1: Arc::new(b_g1),
// b_g2: Arc::new(b_g2)
// })
// }
// }
// pub struct PreparedVerifyingKey<E: Engine> {
// /// Pairing result of alpha*beta
// alpha_g1_beta_g2: E::Fqk,
// /// -gamma in G2
// neg_gamma_g2: <E::G2Affine as CurveAffine>::Prepared,
// /// -delta in G2
// neg_delta_g2: <E::G2Affine as CurveAffine>::Prepared,
// /// Copy of IC from `VerifiyingKey`.
// ic: Vec<E::G1Affine>
// }
// pub trait ParameterSource<E: Engine> {
// type G1Builder: SourceBuilder<E::G1Affine>;
// type G2Builder: SourceBuilder<E::G2Affine>;
// fn get_vk(
// &mut self,
// num_ic: usize
// ) -> Result<VerifyingKey<E>, SynthesisError>;
// fn get_h(
// &mut self,
// num_h: usize
// ) -> Result<Self::G1Builder, SynthesisError>;
// fn get_l(
// &mut self,
// num_l: usize
// ) -> Result<Self::G1Builder, SynthesisError>;
// fn get_a(
// &mut self,
// num_inputs: usize,
// num_aux: usize
// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
// fn get_b_g1(
// &mut self,
// num_inputs: usize,
// num_aux: usize
// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
// fn get_b_g2(
// &mut self,
// num_inputs: usize,
// num_aux: usize
// ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>;
// }
// impl<'a, E: Engine> ParameterSource<E> for &'a Parameters<E> {
// type G1Builder = (Arc<Vec<E::G1Affine>>, usize);
// type G2Builder = (Arc<Vec<E::G2Affine>>, usize);
// fn get_vk(
// &mut self,
// _: usize
// ) -> Result<VerifyingKey<E>, SynthesisError>
// {
// Ok(self.vk.clone())
// }
// fn get_h(
// &mut self,
// _: usize
// ) -> Result<Self::G1Builder, SynthesisError>
// {
// Ok((self.h.clone(), 0))
// }
// fn get_l(
// &mut self,
// _: usize
// ) -> Result<Self::G1Builder, SynthesisError>
// {
// Ok((self.l.clone(), 0))
// }
// fn get_a(
// &mut self,
// num_inputs: usize,
// _: usize
// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>
// {
// Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs)))
// }
// fn get_b_g1(
// &mut self,
// num_inputs: usize,
// _: usize
// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>
// {
// Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs)))
// }
// fn get_b_g2(
// &mut self,
// num_inputs: usize,
// _: usize
// ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>
// {
// Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs)))
// }
// }
// #[cfg(test)]
// mod test_with_bls12_381 {
// use super::*;
// use {Circuit, SynthesisError, ConstraintSystem};
// use rand::{Rand, thread_rng};
// use pairing::ff::{Field};
// use pairing::bls12_381::{Bls12, Fr};
// #[test]
// fn serialization() {
// struct MySillyCircuit<E: Engine> {
// a: Option<E::Fr>,
// b: Option<E::Fr>
// }
// impl<E: Engine> Circuit<E> for MySillyCircuit<E> {
// fn synthesize<CS: ConstraintSystem<E>>(
// self,
// cs: &mut CS
// ) -> Result<(), SynthesisError>
// {
// let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?;
// let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?;
// let c = cs.alloc_input(|| "c", || {
// let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
// let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
// a.mul_assign(&b);
// Ok(a)
// })?;
// cs.enforce(
// || "a*b=c",
// |lc| lc + a,
// |lc| lc + b,
// |lc| lc + c
// );
// Ok(())
// }
// }
// let rng = &mut thread_rng();
// let params = generate_random_parameters::<Bls12, _, _>(
// MySillyCircuit { a: None, b: None },
// rng
// ).unwrap();
// {
// let mut v = vec![];
// params.write(&mut v).unwrap();
// assert_eq!(v.len(), 2136);
// let de_params = Parameters::read(&v[..], true).unwrap();
// assert!(params == de_params);
// let de_params = Parameters::read(&v[..], false).unwrap();
// assert!(params == de_params);
// }
// let pvk = prepare_verifying_key::<Bls12>(&params.vk);
// for _ in 0..100 {
// let a = Fr::rand(rng);
// let b = Fr::rand(rng);
// let mut c = a;
// c.mul_assign(&b);
// let proof = create_random_proof(
// MySillyCircuit {
// a: Some(a),
// b: Some(b)
// },
// &params,
// rng
// ).unwrap();
// let mut v = vec![];
// proof.write(&mut v).unwrap();
// assert_eq!(v.len(), 192);
// let de_proof = Proof::read(&v[..]).unwrap();
// assert!(proof == de_proof);
// assert!(verify_proof(&pvk, &proof, &[c]).unwrap());
// assert!(!verify_proof(&pvk, &proof, &[a]).unwrap());
// }
// }
// }

329
src/gm17/tests/mod.rs Normal file

@ -0,0 +1,329 @@
use pairing::{
Engine
};
use pairing::ff:: {
Field,
PrimeField,
};
use super::super::tests::dummy_engine::*;
use super::super::tests::XORDemo;
use std::marker::PhantomData;
use crate::{
Circuit,
ConstraintSystem,
SynthesisError
};
use super::{
generate_parameters,
// prepare_verifying_key,
// create_proof,
// verify_proof
};
#[test]
fn test_gm17_xordemo() {
let g1 = Fr::one();
let g2 = Fr::one();
let alpha = Fr::from_str("48577").unwrap();
let beta = Fr::from_str("22580").unwrap();
let gamma = Fr::from_str("53332").unwrap();
// let delta = Fr::from_str("5481").unwrap();
let tau = Fr::from_str("3673").unwrap();
let params = {
let c = XORDemo::<DummyEngine> {
a: None,
b: None,
_marker: PhantomData
};
generate_parameters(
c,
g1,
g2,
alpha,
beta,
gamma,
tau
).unwrap()
};
// // This will synthesize the constraint system:
// //
// // public inputs: a_0 = 1, a_1 = c
// // aux inputs: a_2 = a, a_3 = b
// // constraints:
// // (a_0 - a_2) * (a_2) = 0
// // (a_0 - a_3) * (a_3) = 0
// // (a_2 + a_2) * (a_3) = (a_2 + a_3 - a_1)
// // (a_0) * 0 = 0
// // (a_1) * 0 = 0
// // The evaluation domain is 8. The H query should
// // have 7 elements (it's a quotient polynomial)
// assert_eq!(7, params.h.len());
// let mut root_of_unity = Fr::root_of_unity();
// // We expect this to be a 2^10 root of unity
// assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 10]));
// // Let's turn it into a 2^3 root of unity.
// root_of_unity = root_of_unity.pow(&[1 << 7]);
// assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 3]));
// assert_eq!(Fr::from_str("20201").unwrap(), root_of_unity);
// // Let's compute all the points in our evaluation domain.
// let mut points = Vec::with_capacity(8);
// for i in 0..8 {
// points.push(root_of_unity.pow(&[i]));
// }
// // Let's compute t(tau) = (tau - p_0)(tau - p_1)...
// // = tau^8 - 1
// let mut t_at_tau = tau.pow(&[8]);
// t_at_tau.sub_assign(&Fr::one());
// {
// let mut tmp = Fr::one();
// for p in &points {
// let mut term = tau;
// term.sub_assign(p);
// tmp.mul_assign(&term);
// }
// assert_eq!(tmp, t_at_tau);
// }
// // We expect our H query to be 7 elements of the form...
// // {tau^i t(tau) / delta}
// let delta_inverse = delta.inverse().unwrap();
// let gamma_inverse = gamma.inverse().unwrap();
// {
// let mut coeff = delta_inverse;
// coeff.mul_assign(&t_at_tau);
// let mut cur = Fr::one();
// for h in params.h.iter() {
// let mut tmp = cur;
// tmp.mul_assign(&coeff);
// assert_eq!(*h, tmp);
// cur.mul_assign(&tau);
// }
// }
// // The density of the IC query is 2 (2 inputs)
// assert_eq!(2, params.vk.ic.len());
// // The density of the L query is 2 (2 aux variables)
// assert_eq!(2, params.l.len());
// // The density of the A query is 4 (each variable is in at least one A term)
// assert_eq!(4, params.a.len());
// // The density of the B query is 2 (two variables are in at least one B term)
// assert_eq!(2, params.b_g1.len());
// assert_eq!(2, params.b_g2.len());
// /*
// Lagrange interpolation polynomials in our evaluation domain:
// ,-------------------------------. ,-------------------------------. ,-------------------------------.
// | A TERM | | B TERM | | C TERM |
// `-------------------------------. `-------------------------------' `-------------------------------'
// | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 |
// | 1 | 0 | 64512 | 0 | | 0 | 0 | 1 | 0 | | 0 | 0 | 0 | 0 |
// | 1 | 0 | 0 | 64512 | | 0 | 0 | 0 | 1 | | 0 | 0 | 0 | 0 |
// | 0 | 0 | 2 | 0 | | 0 | 0 | 0 | 1 | | 0 | 64512 | 1 | 1 |
// | 1 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
// | 0 | 1 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
// `-------'-------'-------'-------' `-------'-------'-------'-------' `-------'-------'-------'-------'
// Example for u_0:
// sage: r = 64513
// sage: Fr = GF(r)
// sage: omega = (Fr(5)^63)^(2^7)
// sage: tau = Fr(3673)
// sage: R.<x> = PolynomialRing(Fr, 'x')
// sage: def eval(tau, c0, c1, c2, c3, c4):
// ....: p = R.lagrange_polynomial([(omega^0, c0), (omega^1, c1), (omega^2, c2), (omega^3, c3), (omega^4, c4), (omega^5, 0), (omega^6, 0), (omega^7, 0)])
// ....: return p.substitute(tau)
// sage: eval(tau, 1, 1, 0, 1, 0)
// 59158
// */
// let u_i = [59158, 48317, 21767, 10402].iter().map(|e| {
// Fr::from_str(&format!("{}", e)).unwrap()
// }).collect::<Vec<Fr>>();
// let v_i = [0, 0, 60619, 30791].iter().map(|e| {
// Fr::from_str(&format!("{}", e)).unwrap()
// }).collect::<Vec<Fr>>();
// let w_i = [0, 23320, 41193, 41193].iter().map(|e| {
// Fr::from_str(&format!("{}", e)).unwrap()
// }).collect::<Vec<Fr>>();
// for (u, a) in u_i.iter()
// .zip(&params.a[..])
// {
// assert_eq!(u, a);
// }
// for (v, b) in v_i.iter()
// .filter(|&&e| e != Fr::zero())
// .zip(&params.b_g1[..])
// {
// assert_eq!(v, b);
// }
// for (v, b) in v_i.iter()
// .filter(|&&e| e != Fr::zero())
// .zip(&params.b_g2[..])
// {
// assert_eq!(v, b);
// }
// for i in 0..4 {
// let mut tmp1 = beta;
// tmp1.mul_assign(&u_i[i]);
// let mut tmp2 = alpha;
// tmp2.mul_assign(&v_i[i]);
// tmp1.add_assign(&tmp2);
// tmp1.add_assign(&w_i[i]);
// if i < 2 {
// // Check the correctness of the IC query elements
// tmp1.mul_assign(&gamma_inverse);
// assert_eq!(tmp1, params.vk.ic[i]);
// } else {
// // Check the correctness of the L query elements
// tmp1.mul_assign(&delta_inverse);
// assert_eq!(tmp1, params.l[i - 2]);
// }
// }
// // Check consistency of the other elements
// assert_eq!(alpha, params.vk.alpha_g1);
// assert_eq!(beta, params.vk.beta_g1);
// assert_eq!(beta, params.vk.beta_g2);
// assert_eq!(gamma, params.vk.gamma_g2);
// assert_eq!(delta, params.vk.delta_g1);
// assert_eq!(delta, params.vk.delta_g2);
// let pvk = prepare_verifying_key(&params.vk);
// let r = Fr::from_str("27134").unwrap();
// let s = Fr::from_str("17146").unwrap();
// let proof = {
// let c = XORDemo {
// a: Some(true),
// b: Some(false),
// _marker: PhantomData
// };
// create_proof(
// c,
// &params,
// r,
// s
// ).unwrap()
// };
// // A(x) =
// // a_0 * (44865*x^7 + 56449*x^6 + 44865*x^5 + 8064*x^4 + 3520*x^3 + 56449*x^2 + 3520*x + 40321) +
// // a_1 * (8064*x^7 + 56449*x^6 + 8064*x^5 + 56449*x^4 + 8064*x^3 + 56449*x^2 + 8064*x + 56449) +
// // a_2 * (16983*x^7 + 24192*x^6 + 63658*x^5 + 56449*x^4 + 16983*x^3 + 24192*x^2 + 63658*x + 56449) +
// // a_3 * (5539*x^7 + 27797*x^6 + 6045*x^5 + 56449*x^4 + 58974*x^3 + 36716*x^2 + 58468*x + 8064) +
// {
// // proof A = alpha + A(tau) + delta * r
// let mut expected_a = delta;
// expected_a.mul_assign(&r);
// expected_a.add_assign(&alpha);
// expected_a.add_assign(&u_i[0]); // a_0 = 1
// expected_a.add_assign(&u_i[1]); // a_1 = 1
// expected_a.add_assign(&u_i[2]); // a_2 = 1
// // a_3 = 0
// assert_eq!(proof.a, expected_a);
// }
// // B(x) =
// // a_0 * (0) +
// // a_1 * (0) +
// // a_2 * (56449*x^7 + 56449*x^6 + 56449*x^5 + 56449*x^4 + 56449*x^3 + 56449*x^2 + 56449*x + 56449) +
// // a_3 * (31177*x^7 + 44780*x^6 + 21752*x^5 + 42255*x^3 + 35861*x^2 + 33842*x + 48385)
// {
// // proof B = beta + B(tau) + delta * s
// let mut expected_b = delta;
// expected_b.mul_assign(&s);
// expected_b.add_assign(&beta);
// expected_b.add_assign(&v_i[0]); // a_0 = 1
// expected_b.add_assign(&v_i[1]); // a_1 = 1
// expected_b.add_assign(&v_i[2]); // a_2 = 1
// // a_3 = 0
// assert_eq!(proof.b, expected_b);
// }
// // C(x) =
// // a_0 * (0) +
// // a_1 * (27797*x^7 + 56449*x^6 + 36716*x^5 + 8064*x^4 + 27797*x^3 + 56449*x^2 + 36716*x + 8064) +
// // a_2 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) +
// // a_3 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449)
// //
// // If A * B = C at each point in the domain, then the following polynomial...
// // P(x) = A(x) * B(x) - C(x)
// // = 49752*x^14 + 13914*x^13 + 29243*x^12 + 27227*x^11 + 62362*x^10 + 35703*x^9 + 4032*x^8 + 14761*x^6 + 50599*x^5 + 35270*x^4 + 37286*x^3 + 2151*x^2 + 28810*x + 60481
// //
// // ... should be divisible by t(x), producing the quotient polynomial:
// // h(x) = P(x) / t(x)
// // = 49752*x^6 + 13914*x^5 + 29243*x^4 + 27227*x^3 + 62362*x^2 + 35703*x + 4032
// {
// let mut expected_c = Fr::zero();
// // A * s
// let mut tmp = proof.a;
// tmp.mul_assign(&s);
// expected_c.add_assign(&tmp);
// // B * r
// let mut tmp = proof.b;
// tmp.mul_assign(&r);
// expected_c.add_assign(&tmp);
// // delta * r * s
// let mut tmp = delta;
// tmp.mul_assign(&r);
// tmp.mul_assign(&s);
// expected_c.sub_assign(&tmp);
// // L query answer
// // a_2 = 1, a_3 = 0
// expected_c.add_assign(&params.l[0]);
// // H query answer
// for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739].iter().enumerate() {
// let coeff = Fr::from_str(&format!("{}", coeff)).unwrap();
// let mut tmp = params.h[i];
// tmp.mul_assign(&coeff);
// expected_c.add_assign(&tmp);
// }
// assert_eq!(expected_c, proof.c);
// }
// assert!(verify_proof(
// &pvk,
// &proof,
// &[Fr::one()]
// ).unwrap());
}

@ -11,7 +11,7 @@ use pairing::{
CurveAffine
};
use ff::{
use pairing::ff::{
PrimeField,
Field
};
@ -21,7 +21,7 @@ use super::{
VerifyingKey
};
use ::{
use crate::{
SynthesisError,
Circuit,
ConstraintSystem,
@ -30,12 +30,12 @@ use ::{
Index
};
use ::domain::{
use crate::domain::{
EvaluationDomain,
Scalar
};
use ::multicore::{
use crate::worker::{
Worker
};
@ -259,7 +259,7 @@ pub fn generate_parameters<E, C>(
worker.scope(powers_of_tau.len(), |scope, chunk| {
for (i, powers_of_tau) in powers_of_tau.chunks_mut(chunk).enumerate()
{
scope.spawn(move || {
scope.spawn(move |_| {
let mut current_tau_power = tau.pow(&[(i*chunk) as u64]);
for p in powers_of_tau {
@ -285,7 +285,7 @@ pub fn generate_parameters<E, C>(
for (h, p) in h.chunks_mut(chunk).zip(powers_of_tau.as_ref().chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move || {
scope.spawn(move |_| {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
for (h, p) in h.iter_mut().zip(p.iter())
{
@ -376,7 +376,7 @@ pub fn generate_parameters<E, C>(
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move || {
scope.spawn(move |_| {
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.iter_mut()
.zip(b_g1.iter_mut())
.zip(b_g2.iter_mut())

@ -4,11 +4,11 @@ use pairing::{
EncodedPoint
};
use ::{
use crate::{
SynthesisError
};
use multiexp::SourceBuilder;
use crate::source::SourceBuilder;
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
@ -484,10 +484,10 @@ impl<'a, E: Engine> ParameterSource<E> for &'a Parameters<E> {
#[cfg(test)]
mod test_with_bls12_381 {
use super::*;
use {Circuit, SynthesisError, ConstraintSystem};
use crate::{Circuit, SynthesisError, ConstraintSystem};
use rand::{Rand, thread_rng};
use ff::{Field};
use pairing::ff::{Field};
use pairing::bls12_381::{Bls12, Fr};
#[test]

@ -12,7 +12,7 @@ use pairing::{
CurveAffine
};
use ff::{
use pairing::ff::{
PrimeField,
Field
};
@ -22,7 +22,7 @@ use super::{
Proof
};
use ::{
use crate::{
SynthesisError,
Circuit,
ConstraintSystem,
@ -31,18 +31,19 @@ use ::{
Index
};
use ::domain::{
use crate::domain::{
EvaluationDomain,
Scalar
};
use ::multiexp::{
use crate::source::{
DensityTracker,
FullDensity,
multiexp
FullDensity
};
use ::multicore::{
use crate::multiexp::*;
use crate::worker::{
Worker
};
@ -176,6 +177,8 @@ impl<E:Engine> PreparedProver<E> {
let mut a = EvaluationDomain::from_coeffs(prover.a)?;
let mut b = EvaluationDomain::from_coeffs(prover.b)?;
let mut c = EvaluationDomain::from_coeffs(prover.c)?;
if verbose {eprintln!("H query domain size is {}", a.as_ref().len())};
// here a coset is a domain where denominator (z) does not vanish
// inverse FFT is an interpolation
a.ifft(&worker);
@ -206,18 +209,23 @@ impl<E:Engine> PreparedProver<E> {
multiexp(&worker, params.get_h(a.len())?, FullDensity, a)
};
if verbose {eprintln!("{} seconds for prover for H evaluation", start.elapsed().as_secs())};
if verbose {eprintln!("{} seconds for prover for H evaluation (mostly FFT)", start.elapsed().as_millis() as f64 / 1000.0)};
let start = std::time::Instant::now();
// TODO: Check that difference in operations for different chunks is small
// TODO: parallelize if it's even helpful
// TODO: in large settings it may worth to parallelize
let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
let aux_assignment = Arc::new(prover.aux_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
let input_len = input_assignment.len();
let aux_len = aux_assignment.len();
if verbose {eprintln!("H query is dense in G1,\nOther queries are {} elements in G1 and {} elements in G2",
2*(input_len + aux_len) + aux_len, input_len + aux_len)
};
// Run a dedicated process for dense vector
let l = multiexp(&worker, params.get_l(aux_assignment.len())?, FullDensity, aux_assignment.clone());
@ -279,7 +287,7 @@ impl<E:Engine> PreparedProver<E> {
g_c.add_assign(&h.wait()?);
g_c.add_assign(&l.wait()?);
if verbose {eprintln!("{} seconds for prover for point multiplication", start.elapsed().as_secs())};
if verbose {eprintln!("{} seconds for prover for point multiplication", start.elapsed().as_millis() as f64 / 1000.0)};
Ok(Proof {
a: g_a.into_affine(),
@ -438,6 +446,7 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
let mut a = EvaluationDomain::from_coeffs(prover.a)?;
let mut b = EvaluationDomain::from_coeffs(prover.b)?;
let mut c = EvaluationDomain::from_coeffs(prover.c)?;
if verbose {eprintln!("H query domain size is {}", a.as_ref().len())};
// here a coset is a domain where denominator (z) does not vanish
// inverse FFT is an interpolation
a.ifft(&worker);
@ -468,13 +477,12 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
multiexp(&worker, params.get_h(a.len())?, FullDensity, a)
};
if verbose {eprintln!("{} seconds for prover for H evaluation", start.elapsed().as_secs())};
if verbose {eprintln!("{} seconds for prover for H evaluation (mostly FFT)", start.elapsed().as_millis() as f64 / 1000.0)};
let start = std::time::Instant::now();
// TODO: Check that difference in operations for different chunks is small
// TODO: parallelize if it's even helpful
// TODO: in large settings it may worth to parallelize
let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
@ -541,7 +549,7 @@ pub fn create_proof<E, C, P: ParameterSource<E>>(
g_c.add_assign(&h.wait()?);
g_c.add_assign(&l.wait()?);
if verbose {eprintln!("{} seconds for prover for point multiplication", start.elapsed().as_secs())};
if verbose {eprintln!("{} seconds for prover for point multiplication", start.elapsed().as_millis() as f64 / 1000.0)};
Ok(Proof {
a: g_a.into_affine(),

@ -2,17 +2,17 @@ use pairing::{
Engine
};
use ff:: {
use pairing::ff:: {
Field,
PrimeField,
};
mod dummy_engine;
use self::dummy_engine::*;
use super::super::tests::dummy_engine::*;
use super::super::tests::XORDemo;
use std::marker::PhantomData;
use ::{
use crate::{
Circuit,
ConstraintSystem,
SynthesisError
@ -25,79 +25,6 @@ use super::{
verify_proof
};
struct XORDemo<E: Engine> {
a: Option<bool>,
b: Option<bool>,
_marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for XORDemo<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a_var = cs.alloc(|| "a", || {
if self.a.is_some() {
if self.a.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "a_boolean_constraint",
|lc| lc + CS::one() - a_var,
|lc| lc + a_var,
|lc| lc
);
let b_var = cs.alloc(|| "b", || {
if self.b.is_some() {
if self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "b_boolean_constraint",
|lc| lc + CS::one() - b_var,
|lc| lc + b_var,
|lc| lc
);
let c_var = cs.alloc_input(|| "c", || {
if self.a.is_some() && self.b.is_some() {
if self.a.unwrap() ^ self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "c_xor_constraint",
|lc| lc + a_var + a_var,
|lc| lc + b_var,
|lc| lc + a_var + b_var - c_var
);
Ok(())
}
}
#[test]
fn test_xordemo() {
let g1 = Fr::one();

@ -4,7 +4,7 @@ use pairing::{
CurveAffine
};
use ff::{PrimeField};
use pairing::ff::{PrimeField};
use super::{
Proof,
@ -12,7 +12,7 @@ use super::{
PreparedVerifyingKey
};
use ::{
use crate::{
SynthesisError
};

82
src/group.rs Normal file

@ -0,0 +1,82 @@
use pairing::{
Engine,
CurveProjective
};
use pairing::ff::{
Field,
PrimeField
};
use super::{
SynthesisError
};
pub trait Group<E: Engine>: Sized + Copy + Clone + Send + Sync {
fn group_zero() -> Self;
fn group_mul_assign(&mut self, by: &E::Fr);
fn group_add_assign(&mut self, other: &Self);
fn group_sub_assign(&mut self, other: &Self);
}
pub struct Point<G: CurveProjective>(pub G);
impl<G: CurveProjective> PartialEq for Point<G> {
fn eq(&self, other: &Point<G>) -> bool {
self.0 == other.0
}
}
impl<G: CurveProjective> Copy for Point<G> { }
impl<G: CurveProjective> Clone for Point<G> {
fn clone(&self) -> Point<G> {
*self
}
}
impl<G: CurveProjective> Group<G::Engine> for Point<G> {
fn group_zero() -> Self {
Point(G::zero())
}
fn group_mul_assign(&mut self, by: &G::Scalar) {
self.0.mul_assign(by.into_repr());
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}
pub struct Scalar<E: Engine>(pub E::Fr);
impl<E: Engine> PartialEq for Scalar<E> {
fn eq(&self, other: &Scalar<E>) -> bool {
self.0 == other.0
}
}
impl<E: Engine> Copy for Scalar<E> { }
impl<E: Engine> Clone for Scalar<E> {
fn clone(&self) -> Scalar<E> {
*self
}
}
impl<E: Engine> Group<E> for Scalar<E> {
fn group_zero() -> Self {
Scalar(E::Fr::zero())
}
fn group_mul_assign(&mut self, by: &E::Fr) {
self.0.mul_assign(by);
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}

@ -1,431 +1,45 @@
#![allow(unused_imports)]
extern crate pairing;
extern crate pairing as pairing_import;
extern crate rand;
extern crate num_cpus;
extern crate futures;
extern crate futures_cpupool;
extern crate bit_vec;
extern crate crossbeam;
extern crate byteorder;
extern crate ff;
pub mod multicore;
mod multiexp;
pub mod domain;
pub mod groth16;
use pairing::{Engine};
use ff::Field;
#[cfg(feature = "gm17")]
pub mod gm17;
#[cfg(feature = "sonic")]
pub mod sonic;
use std::ops::{Add, Sub};
use std::fmt;
use std::error::Error;
use std::io;
use std::marker::PhantomData;
mod group;
mod source;
mod multiexp;
/// Computations are expressed in terms of arithmetic circuits, in particular
/// rank-1 quadratic constraint systems. The `Circuit` trait represents a
/// circuit that can be synthesized. The `synthesize` method is called during
/// CRS generation and during proving.
pub trait Circuit<E: Engine> {
/// Synthesize the circuit into a rank-1 quadratic constraint system
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>;
#[cfg(test)]
mod tests;
#[cfg(feature = "multicore")]
mod multicore;
#[cfg(feature = "singlecore")]
mod singlecore;
mod worker {
#[cfg(feature = "multicore")]
pub use crate::multicore::*;
#[cfg(feature = "singlecore")]
pub use crate::singlecore::*;
}
/// Represents a variable in our constraint system.
#[derive(Copy, Clone, Debug)]
pub struct Variable(Index);
impl Variable {
/// This constructs a variable with an arbitrary index.
/// Circuit implementations are not recommended to use this.
pub fn new_unchecked(idx: Index) -> Variable {
Variable(idx)
}
/// This returns the index underlying the variable.
/// Circuit implementations are not recommended to use this.
pub fn get_unchecked(&self) -> Index {
self.0
}
pub mod pairing {
pub use pairing_import::*;
}
/// Represents the index of either an input variable or
/// auxillary variable.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Index {
Input(usize),
Aux(usize)
}
/// This represents a linear combination of some variables, with coefficients
/// in the scalar field of a pairing-friendly elliptic curve group.
#[derive(Clone)]
pub struct LinearCombination<E: Engine>(Vec<(Variable, E::Fr)>);
impl<E: Engine> AsRef<[(Variable, E::Fr)]> for LinearCombination<E> {
fn as_ref(&self) -> &[(Variable, E::Fr)] {
&self.0
}
}
impl<E: Engine> LinearCombination<E> {
pub fn zero() -> LinearCombination<E> {
LinearCombination(vec![])
}
}
impl<E: Engine> Add<(E::Fr, Variable)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, (coeff, var): (E::Fr, Variable)) -> LinearCombination<E> {
self.0.push((var, coeff));
self
}
}
impl<E: Engine> Sub<(E::Fr, Variable)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(self, (mut coeff, var): (E::Fr, Variable)) -> LinearCombination<E> {
coeff.negate();
self + (coeff, var)
}
}
impl<E: Engine> Add<Variable> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(self, other: Variable) -> LinearCombination<E> {
self + (E::Fr::one(), other)
}
}
impl<E: Engine> Sub<Variable> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(self, other: Variable) -> LinearCombination<E> {
self - (E::Fr::one(), other)
}
}
impl<'a, E: Engine> Add<&'a LinearCombination<E>> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, other: &'a LinearCombination<E>) -> LinearCombination<E> {
for s in &other.0 {
self = self + (s.1, s.0);
}
self
}
}
impl<'a, E: Engine> Sub<&'a LinearCombination<E>> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(mut self, other: &'a LinearCombination<E>) -> LinearCombination<E> {
for s in &other.0 {
self = self - (s.1, s.0);
}
self
}
}
impl<'a, E: Engine> Add<(E::Fr, &'a LinearCombination<E>)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, (coeff, other): (E::Fr, &'a LinearCombination<E>)) -> LinearCombination<E> {
for s in &other.0 {
let mut tmp = s.1;
tmp.mul_assign(&coeff);
self = self + (tmp, s.0);
}
self
}
}
impl<'a, E: Engine> Sub<(E::Fr, &'a LinearCombination<E>)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(mut self, (coeff, other): (E::Fr, &'a LinearCombination<E>)) -> LinearCombination<E> {
for s in &other.0 {
let mut tmp = s.1;
tmp.mul_assign(&coeff);
self = self - (tmp, s.0);
}
self
}
}
/// This is an error that could occur during circuit synthesis contexts,
/// such as CRS generation, proving or verification.
#[derive(Debug)]
pub enum SynthesisError {
/// During synthesis, we lacked knowledge of a variable assignment.
AssignmentMissing,
/// During synthesis, we divided by zero.
DivisionByZero,
/// During synthesis, we constructed an unsatisfiable constraint system.
Unsatisfiable,
/// During synthesis, our polynomials ended up being too high of degree
PolynomialDegreeTooLarge,
/// During proof generation, we encountered an identity in the CRS
UnexpectedIdentity,
/// During proof generation, we encountered an I/O error with the CRS
IoError(io::Error),
/// During verification, our verifying key was malformed.
MalformedVerifyingKey,
/// During CRS generation, we observed an unconstrained auxillary variable
UnconstrainedVariable
}
impl From<io::Error> for SynthesisError {
fn from(e: io::Error) -> SynthesisError {
SynthesisError::IoError(e)
}
}
impl Error for SynthesisError {
fn description(&self) -> &str {
match *self {
SynthesisError::AssignmentMissing => "an assignment for a variable could not be computed",
SynthesisError::DivisionByZero => "division by zero",
SynthesisError::Unsatisfiable => "unsatisfiable constraint system",
SynthesisError::PolynomialDegreeTooLarge => "polynomial degree is too large",
SynthesisError::UnexpectedIdentity => "encountered an identity element in the CRS",
SynthesisError::IoError(_) => "encountered an I/O error",
SynthesisError::MalformedVerifyingKey => "malformed verifying key",
SynthesisError::UnconstrainedVariable => "auxillary variable was unconstrained"
}
}
}
impl fmt::Display for SynthesisError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
if let &SynthesisError::IoError(ref e) = self {
write!(f, "I/O error: ")?;
e.fmt(f)
} else {
write!(f, "{}", self.description())
}
}
}
/// Represents a constraint system which can have new variables
/// allocated and constrains between them formed.
pub trait ConstraintSystem<E: Engine>: Sized {
/// Represents the type of the "root" of this constraint system
/// so that nested namespaces can minimize indirection.
type Root: ConstraintSystem<E>;
/// Return the "one" input variable
fn one() -> Variable {
Variable::new_unchecked(Index::Input(0))
}
/// Allocate a private variable in the constraint system. The provided function is used to
/// determine the assignment of the variable. The given `annotation` function is invoked
/// in testing contexts in order to derive a unique name for this variable in the current
/// namespace.
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>;
/// Allocate a public variable in the constraint system. The provided function is used to
/// determine the assignment of the variable.
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>;
/// Enforce that `A` * `B` = `C`. The `annotation` function is invoked in testing contexts
/// in order to derive a unique name for the constraint in the current namespace.
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>;
/// Create a new (sub)namespace and enter into it. Not intended
/// for downstream use; use `namespace` instead.
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR;
/// Exit out of the existing namespace. Not intended for
/// downstream use; use `namespace` instead.
fn pop_namespace(&mut self);
/// Gets the "root" constraint system, bypassing the namespacing.
/// Not intended for downstream use; use `namespace` instead.
fn get_root(&mut self) -> &mut Self::Root;
/// Begin a namespace for this constraint system.
fn namespace<'a, NR, N>(
&'a mut self,
name_fn: N
) -> Namespace<'a, E, Self::Root>
where NR: Into<String>, N: FnOnce() -> NR
{
self.get_root().push_namespace(name_fn);
Namespace(self.get_root(), PhantomData)
}
}
/// This is a "namespaced" constraint system which borrows a constraint system (pushing
/// a namespace context) and, when dropped, pops out of the namespace context.
pub struct Namespace<'a, E: Engine, CS: ConstraintSystem<E> + 'a>(&'a mut CS, PhantomData<E>);
impl<'cs, E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for Namespace<'cs, E, CS> {
type Root = CS::Root;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.0.alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
self.0.alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
self.0.enforce(annotation, a, b, c)
}
// Downstream users who use `namespace` will never interact with these
// functions and they will never be invoked because the namespace is
// never a root constraint system.
fn push_namespace<NR, N>(&mut self, _: N)
where NR: Into<String>, N: FnOnce() -> NR
{
panic!("only the root's push_namespace should be called");
}
fn pop_namespace(&mut self)
{
panic!("only the root's pop_namespace should be called");
}
fn get_root(&mut self) -> &mut Self::Root
{
self.0.get_root()
}
}
impl<'a, E: Engine, CS: ConstraintSystem<E>> Drop for Namespace<'a, E, CS> {
fn drop(&mut self) {
self.get_root().pop_namespace()
}
}
/// Convenience implementation of ConstraintSystem<E> for mutable references to
/// constraint systems.
impl<'cs, E: Engine, CS: ConstraintSystem<E>> ConstraintSystem<E> for &'cs mut CS {
type Root = CS::Root;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
(**self).alloc(annotation, f)
}
fn alloc_input<F, A, AR>(
&mut self,
annotation: A,
f: F
) -> Result<Variable, SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>, A: FnOnce() -> AR, AR: Into<String>
{
(**self).alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(
&mut self,
annotation: A,
a: LA,
b: LB,
c: LC
)
where A: FnOnce() -> AR, AR: Into<String>,
LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,
LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>
{
(**self).enforce(annotation, a, b, c)
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where NR: Into<String>, N: FnOnce() -> NR
{
(**self).push_namespace(name_fn)
}
fn pop_namespace(&mut self)
{
(**self).pop_namespace()
}
fn get_root(&mut self) -> &mut Self::Root
{
(**self).get_root()
}
}
mod cs;
pub use self::cs::*;
static mut VERBOSE_SWITCH: i8 = -1;

@ -4,10 +4,14 @@
//! crossbeam but may be extended in the future to
//! allow for various parallelism strategies.
use num_cpus;
use futures::{Future, IntoFuture, Poll};
use futures_cpupool::{CpuPool, CpuFuture};
use crossbeam::{self, Scope};
extern crate num_cpus;
extern crate futures;
extern crate futures_cpupool;
extern crate crossbeam;
use self::futures::{Future, IntoFuture, Poll};
use self::futures_cpupool::{CpuPool, CpuFuture};
use self::crossbeam::thread::{Scope};
#[derive(Clone)]
pub struct Worker {
@ -63,7 +67,7 @@ impl Worker {
crossbeam::scope(|scope| {
f(scope, chunk_size)
})
}).expect("must run")
}
}

@ -4,144 +4,19 @@ use pairing::{
Engine
};
use ff::{
use pairing::ff::{
PrimeField,
Field,
PrimeFieldRepr,
ScalarEngine};
use std::sync::Arc;
use std::io;
use bit_vec::{self, BitVec};
use std::iter;
use super::source::*;
use futures::{Future};
use super::multicore::Worker;
use super::worker::Worker;
use super::SynthesisError;
/// An object that builds a source of bases.
pub trait SourceBuilder<G: CurveAffine>: Send + Sync + 'static + Clone {
type Source: Source<G>;
fn new(self) -> Self::Source;
}
/// A source of bases, like an iterator.
pub trait Source<G: CurveAffine> {
/// Parses the element from the source. Fails if the point is at infinity.
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError>;
/// Skips `amt` elements from the source, avoiding deserialization.
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError>;
}
impl<G: CurveAffine> SourceBuilder<G> for (Arc<Vec<G>>, usize) {
type Source = (Arc<Vec<G>>, usize);
fn new(self) -> (Arc<Vec<G>>, usize) {
(self.0.clone(), self.1)
}
}
impl<G: CurveAffine> Source<G> for (Arc<Vec<G>>, usize) {
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases when adding from source").into());
}
if self.0[self.1].is_zero() {
return Err(SynthesisError::UnexpectedIdentity)
}
to.add_assign_mixed(&self.0[self.1]);
self.1 += 1;
Ok(())
}
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases skipping from source").into());
}
self.1 += amt;
Ok(())
}
}
pub trait QueryDensity {
/// Returns whether the base exists.
type Iter: Iterator<Item=bool>;
fn iter(self) -> Self::Iter;
fn get_query_size(self) -> Option<usize>;
}
#[derive(Clone)]
pub struct FullDensity;
impl AsRef<FullDensity> for FullDensity {
fn as_ref(&self) -> &FullDensity {
self
}
}
impl<'a> QueryDensity for &'a FullDensity {
type Iter = iter::Repeat<bool>;
fn iter(self) -> Self::Iter {
iter::repeat(true)
}
fn get_query_size(self) -> Option<usize> {
None
}
}
#[derive(Clone)]
pub struct DensityTracker {
bv: BitVec,
total_density: usize
}
impl<'a> QueryDensity for &'a DensityTracker {
type Iter = bit_vec::Iter<'a>;
fn iter(self) -> Self::Iter {
self.bv.iter()
}
fn get_query_size(self) -> Option<usize> {
Some(self.bv.len())
}
}
impl DensityTracker {
pub fn new() -> DensityTracker {
DensityTracker {
bv: BitVec::new(),
total_density: 0
}
}
pub fn add_element(&mut self) {
self.bv.push(false);
}
pub fn inc(&mut self, idx: usize) {
if !self.bv.get(idx).unwrap() {
self.bv.set(idx, true);
self.total_density += 1;
}
}
pub fn get_total_density(&self) -> usize {
self.total_density
}
}
/// This genious piece of code works in the following way:
/// - choose `c` - the bit length of the region that one thread works on
/// - make `2^c - 1` buckets and initialize them with `G = infinity` (that's equivalent of zero)
@ -307,6 +182,128 @@ pub fn multiexp<Q, D, G, S>(
multiexp_inner(pool, bases, density_map, exponents, 0, c, true)
}
/// Perform multi-exponentiation. The caller is responsible for ensuring that
/// the number of bases is the same as the number of exponents.
#[allow(dead_code)]
pub fn dense_multiexp<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr]
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
if exponents.len() != bases.len() {
return Err(SynthesisError::AssignmentMissing);
}
let c = if exponents.len() < 32 {
3u32
} else {
(f64::from(exponents.len() as u32)).ln().ceil() as u32
};
dense_multiexp_inner(pool, bases, exponents, 0, c, true)
}
fn dense_multiexp_inner<G: CurveAffine>(
pool: &Worker,
bases: & [G],
exponents: & [<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
mut skip: u32,
c: u32,
handle_trivial: bool
) -> Result<<G as CurveAffine>::Projective, SynthesisError>
{
use std::sync::{Mutex};
// Perform this region of the multiexp. We use a different strategy - go over region in parallel,
// then over another region, etc. No Arc required
let this = {
// let mask = (1u64 << c) - 1u64;
let this_region = Mutex::new(<G as CurveAffine>::Projective::zero());
let arc = Arc::new(this_region);
pool.scope(bases.len(), |scope, chunk| {
for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
let this_region_rwlock = arc.clone();
// let handle =
scope.spawn(move |_| {
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
// Accumulate the result
let mut acc = G::Projective::zero();
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
for (base, &exp) in base.iter().zip(exp.iter()) {
// let index = (exp.as_ref()[0] & mask) as usize;
// if index != 0 {
// buckets[index - 1].add_assign_mixed(base);
// }
// exp.shr(c as u32);
if exp != zero {
if exp == one {
if handle_trivial {
acc.add_assign_mixed(base);
}
} else {
let mut exp = exp;
exp.shr(skip);
let exp = exp.as_ref()[0] % (1 << c);
if exp != 0 {
buckets[(exp - 1) as usize].add_assign_mixed(base);
}
}
}
}
// buckets are filled with the corresponding accumulated value, now sum
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
let mut guard = match this_region_rwlock.lock() {
Ok(guard) => guard,
Err(_) => {
panic!("poisoned!");
// poisoned.into_inner()
}
};
(*guard).add_assign(&acc);
});
}
});
let this_region = Arc::try_unwrap(arc).unwrap();
let this_region = this_region.into_inner().unwrap();
this_region
};
skip += c;
if skip >= <G::Engine as ScalarEngine>::Fr::NUM_BITS {
// There isn't another region, and this will be the highest region
return Ok(this);
} else {
// next region is actually higher than this one, so double it enough times
let mut next_region = dense_multiexp_inner(
pool, bases, exponents, skip, c, false).unwrap();
for _ in 0..c {
next_region.double();
}
next_region.add_assign(&this);
return Ok(next_region);
}
}
#[test]
fn test_with_bls12() {
fn naive_multiexp<G: CurveAffine>(
@ -378,3 +375,44 @@ fn test_speed_with_bn256() {
let time_per_sample = duration_ns/(SAMPLES as f64);
println!("Tested on {} samples on {} CPUs with {} ns per multiplication", SAMPLES, cpus, time_per_sample);
}
#[test]
fn test_dense_multiexp() {
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use pairing::bn256::Bn256;
use num_cpus;
// const SAMPLES: usize = 1 << 22;
const SAMPLES: usize = 1 << 16;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v = (0..SAMPLES).map(|_| <Bn256 as ScalarEngine>::Fr::rand(rng).into_repr()).collect::<Vec<_>>();
let g = (0..SAMPLES).map(|_| <Bn256 as Engine>::G1::rand(rng).into_affine()).collect::<Vec<_>>();
println!("Done generating test points and scalars");
let pool = Worker::new();
let start = std::time::Instant::now();
let dense = dense_multiexp(
&pool, &g, &v.clone()).unwrap();
let duration_ns = start.elapsed().as_nanos() as f64;
println!("{} ns for dense for {} samples", duration_ns, SAMPLES);
let start = std::time::Instant::now();
let sparse = multiexp(
&pool,
(Arc::new(g), 0),
FullDensity,
Arc::new(v)
).wait().unwrap();
let duration_ns = start.elapsed().as_nanos() as f64;
println!("{} ns for sparse for {} samples", duration_ns, SAMPLES);
assert_eq!(dense, sparse);
}

93
src/singlecore.rs Normal file

@ -0,0 +1,93 @@
//! This is a dummy interface to substitute multicore worker
//! in environments like WASM
extern crate futures;
use std::marker::PhantomData;
use self::futures::{Future, IntoFuture, Poll};
use self::futures::future::{result, FutureResult};
#[derive(Clone)]
pub struct Worker {
cpus: usize,
}
impl Worker {
// We don't expose this outside the library so that
// all `Worker` instances have the same number of
// CPUs configured.
pub(crate) fn new_with_cpus(cpus: usize) -> Worker {
Worker {
cpus: 1,
}
}
pub fn new() -> Worker {
Self::new_with_cpus(1)
}
pub fn log_num_cpus(&self) -> u32 {
0u32
}
pub fn compute<F, R>(
&self, f: F
) -> WorkerFuture<R::Item, R::Error>
where F: FnOnce() -> R + Send + 'static,
R: IntoFuture + 'static,
R::Future: Send + 'static,
R::Item: Send + 'static,
R::Error: Send + 'static
{
let future = f().into_future();
WorkerFuture {
future: result(future.wait())
}
}
pub fn scope<'a, F, R>(
&self,
elements: usize,
f: F
) -> R
where F: FnOnce(&Scope<'a>, usize) -> R
{
let chunk_size = elements;
let scope = Scope{
_marker: PhantomData
};
f(&scope, chunk_size)
}
}
#[derive(Clone)]
pub struct Scope<'a> {
_marker: PhantomData<& 'a usize>
}
impl<'a> Scope<'a> {
pub fn spawn<F, R>(
&self,
f: F
) -> R
where F: FnOnce(&Scope<'a>) -> R
{
f(&self)
}
}
pub struct WorkerFuture<T, E> {
future: FutureResult<T, E>
}
impl<T: Send + 'static, E: Send + 'static> Future for WorkerFuture<T, E> {
type Item = T;
type Error = E;
fn poll(&mut self) -> Poll<Self::Item, Self::Error>
{
self.future.poll()
}
}

27
src/sonic/README.md Normal file

@ -0,0 +1,27 @@
# Description
Initial SONIC proof system integration using the code from the [original implementation](https://github.com/zknuckles/sonic.git). It's here for experimental reasons and evaluation of the following properties:
- How applicable is "helped" procedure for a case of Ethereum
- What is a final verification cost for "helped" and "unhelped" procedures
- Prover efficiency in both cases
- Implementation of a memory constrained prover and helper
- Smart-contract implementation of verifiers
- Code cleanup
- Migration for smart-contract compatible transcripts
## TODO Plan
- [x] Test with public inputs
- [x] Test on BN256
- [x] Parallelize using existing primitives
- [x] Implement polynomial parallelized evaluation
- [x] Make custom transcriptor that is easy to transform into the smart-contract
- [x] Basic Ethereum smart-contract
- [x] Add blinding factors
- [ ] Implement unhelped version
- [x] Implement a part of S poly precomputation (S2)
- [x] Implement a "well formed" argument
- [x] Implement a coefficients product argument
- [ ] Implement a premutation argument
- [ ] Implement synthesizer for proper form of S polynomial

139
src/sonic/cs/lc.rs Normal file

@ -0,0 +1,139 @@
use pairing::ff::{Field};
use pairing::{Engine};
use std::ops::{Add, Sub, Neg};
/// This represents a linear combination of some variables, with coefficients
/// in the scalar field of a pairing-friendly elliptic curve group.
#[derive(Clone)]
pub struct LinearCombination<E: Engine>(Vec<(Variable, Coeff<E>)>);
impl<E: Engine> From<Variable> for LinearCombination<E> {
fn from(var: Variable) -> LinearCombination<E> {
LinearCombination::<E>::zero() + var
}
}
impl<E: Engine> AsRef<[(Variable, Coeff<E>)]> for LinearCombination<E> {
fn as_ref(&self) -> &[(Variable, Coeff<E>)] {
&self.0
}
}
impl<E: Engine> LinearCombination<E> {
pub fn zero() -> LinearCombination<E> {
LinearCombination(vec![])
}
}
impl<E: Engine> Add<(Coeff<E>, Variable)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, (coeff, var): (Coeff<E>, Variable)) -> LinearCombination<E> {
self.0.push((var, coeff));
self
}
}
impl<E: Engine> Sub<(Coeff<E>, Variable)> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(self, (coeff, var): (Coeff<E>, Variable)) -> LinearCombination<E> {
self + (-coeff, var)
}
}
impl<E: Engine> Add<Variable> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(self, other: Variable) -> LinearCombination<E> {
self + (Coeff::One, other)
}
}
impl<E: Engine> Sub<Variable> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(self, other: Variable) -> LinearCombination<E> {
self - (Coeff::One, other)
}
}
impl<'a, E: Engine> Add<&'a LinearCombination<E>> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn add(mut self, other: &'a LinearCombination<E>) -> LinearCombination<E> {
for s in &other.0 {
self = self + (s.1, s.0);
}
self
}
}
impl<'a, E: Engine> Sub<&'a LinearCombination<E>> for LinearCombination<E> {
type Output = LinearCombination<E>;
fn sub(mut self, other: &'a LinearCombination<E>) -> LinearCombination<E> {
for s in &other.0 {
self = self - (s.1, s.0);
}
self
}
}
#[derive(Copy, Clone, Debug)]
pub enum Variable {
A(usize),
B(usize),
C(usize),
}
#[derive(Debug)]
pub enum Coeff<E: Engine> {
Zero,
One,
NegativeOne,
Full(E::Fr),
}
impl<E: Engine> Coeff<E> {
pub fn multiply(&self, with: &mut E::Fr) {
match self {
Coeff::Zero => {
*with = E::Fr::zero();
},
Coeff::One => {},
Coeff::NegativeOne => {
with.negate();
},
Coeff::Full(val) => {
with.mul_assign(val);
}
}
}
}
impl<E: Engine> Copy for Coeff<E> {}
impl<E: Engine> Clone for Coeff<E> {
fn clone(&self) -> Self {
*self
}
}
impl<E: Engine> Neg for Coeff<E> {
type Output = Coeff<E>;
fn neg(self) -> Self {
match self {
Coeff::Zero => Coeff::Zero,
Coeff::One => Coeff::NegativeOne,
Coeff::NegativeOne => Coeff::One,
Coeff::Full(mut a) => {
a.negate();
Coeff::Full(a)
}
}
}
}

366
src/sonic/cs/mod.rs Normal file

@ -0,0 +1,366 @@
extern crate pairing;
use pairing::ff::{Field};
use pairing::{Engine};
use crate::{SynthesisError};
use std::marker::PhantomData;
mod lc;
pub use self::lc::{Coeff, Variable, LinearCombination};
pub trait Circuit<E: Engine> {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError>;
}
pub trait ConstraintSystem<E: Engine> {
const ONE: Variable;
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>;
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>;
fn enforce_zero(&mut self, lc: LinearCombination<E>);
fn multiply<F>(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>;
// TODO: get rid of this
fn get_value(&self, _var: Variable) -> Result<E::Fr, ()> {
Err(())
}
}
/// This is a backend for the `SynthesisDriver` to relay information about
/// the concrete circuit. One backend might just collect basic information
/// about the circuit for verification, while another actually constructs
/// a witness.
pub trait Backend<E: Engine> {
/// Get the value of a variable. Can return None if we don't know.
fn get_var(&self, _variable: Variable) -> Option<E::Fr> { None }
/// Set the value of a variable. Might error if this backend expects to know it.
fn set_var<F>(&mut self, _variable: Variable, _value: F) -> Result<(), SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError> { Ok(()) }
/// Create a new multiplication gate.
fn new_multiplication_gate(&mut self) { }
/// Create a new linear constraint.
fn new_linear_constraint(&mut self) { }
/// Insert a term into a linear constraint. TODO: bad name of function
fn insert_coefficient(&mut self, _var: Variable, _coeff: Coeff<E>) { }
/// Mark y^{_index} as the power of y cooresponding to the public input
/// coefficient for the next public input, in the k(Y) polynomial.
fn new_k_power(&mut self, _index: usize) { }
}
/// This is an abstraction which synthesizes circuits.
pub trait SynthesisDriver {
fn synthesize<E: Engine, C: Circuit<E>, B: Backend<E>>(backend: B, circuit: &C) -> Result<(), SynthesisError>;
}
pub struct Basic;
impl SynthesisDriver for Basic {
fn synthesize<E: Engine, C: Circuit<E>, B: Backend<E>>(backend: B, circuit: &C) -> Result<(), SynthesisError> {
struct Synthesizer<E: Engine, B: Backend<E>> {
backend: B,
current_variable: Option<usize>,
_marker: PhantomData<E>,
q: usize,
n: usize,
}
impl<E: Engine, B: Backend<E>> ConstraintSystem<E> for Synthesizer<E, B> {
const ONE: Variable = Variable::A(1);
fn alloc<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
match self.current_variable.take() {
Some(index) => {
let var_a = Variable::A(index);
let var_b = Variable::B(index);
let var_c = Variable::C(index);
let mut product = None;
let value_a = self.backend.get_var(var_a);
self.backend.set_var(var_b, || {
let value_b = value()?;
product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?);
product.as_mut().map(|product| product.mul_assign(&value_b));
Ok(value_b)
})?;
self.backend.set_var(var_c, || {
product.ok_or(SynthesisError::AssignmentMissing)
})?;
self.current_variable = None;
Ok(var_b)
},
None => {
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let var_a = Variable::A(index);
self.backend.set_var(var_a, value)?;
self.current_variable = Some(index);
Ok(var_a)
}
}
}
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let input_var = self.alloc(value)?;
self.enforce_zero(LinearCombination::zero() + input_var);
self.backend.new_k_power(self.q);
Ok(input_var)
}
fn enforce_zero(&mut self, lc: LinearCombination<E>)
{
self.q += 1;
self.backend.new_linear_constraint();
for (var, coeff) in lc.as_ref() {
self.backend.insert_coefficient(*var, *coeff);
}
}
fn multiply<F>(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>
{
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let a = Variable::A(index);
let b = Variable::B(index);
let c = Variable::C(index);
let mut b_val = None;
let mut c_val = None;
self.backend.set_var(a, || {
let (a, b, c) = values()?;
b_val = Some(b);
c_val = Some(c);
Ok(a)
})?;
self.backend.set_var(b, || {
b_val.ok_or(SynthesisError::AssignmentMissing)
})?;
self.backend.set_var(c, || {
c_val.ok_or(SynthesisError::AssignmentMissing)
})?;
Ok((a, b, c))
}
fn get_value(&self, var: Variable) -> Result<E::Fr, ()> {
self.backend.get_var(var).ok_or(())
}
}
let mut tmp: Synthesizer<E, B> = Synthesizer {
backend: backend,
current_variable: None,
_marker: PhantomData,
q: 0,
n: 0,
};
let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <Synthesizer<E, B> as ConstraintSystem<E>>::ONE) {
(Variable::A(1), Variable::A(1)) => {},
_ => panic!("one variable is incorrect")
}
circuit.synthesize(&mut tmp)?;
// let blindings_to_add = 6;
// for i in 0..blindings_to_add {
// match tmp.current_variable.take() {
// Some(index) => {
// let var_a = Variable::A(index);
// let var_b = Variable::B(index);
// let var_c = Variable::C(index);
// let mut product = None;
// let value_a = tmp.backend.get_var(var_a);
// tmp.backend.set_var(var_b, || {
// let value_b = E::Fr::one();
// product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?);
// product.as_mut().map(|product| product.mul_assign(&value_b));
// Ok(value_b)
// })?;
// tmp.backend.set_var(var_c, || {
// product.ok_or(SynthesisError::AssignmentMissing)
// })?;
// tmp.current_variable = None;
// },
// None => {
// self.n += 1;
// let index = self.n ;
// tmp.backend.new_multiplication_gate();
// let var_a = Variable::A(index);
// tmp.backend.set_var(var_a, value)?;
// tmp.current_variable = Some(index);
// }
// }
// }
// TODO: add blinding factors so we actually get zero-knowledge
Ok(())
}
}
pub struct Nonassigning;
impl SynthesisDriver for Nonassigning {
fn synthesize<E: Engine, C: Circuit<E>, B: Backend<E>>(backend: B, circuit: &C) -> Result<(), SynthesisError> {
struct NonassigningSynthesizer<E: Engine, B: Backend<E>> {
backend: B,
current_variable: Option<usize>,
_marker: PhantomData<E>,
q: usize,
n: usize,
}
impl<E: Engine, B: Backend<E>> ConstraintSystem<E> for NonassigningSynthesizer<E, B> {
const ONE: Variable = Variable::A(1);
fn alloc<F>(&mut self, _value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
match self.current_variable.take() {
Some(index) => {
let var_b = Variable::B(index);
self.current_variable = None;
Ok(var_b)
},
None => {
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let var_a = Variable::A(index);
self.current_variable = Some(index);
Ok(var_a)
}
}
}
fn alloc_input<F>(&mut self, value: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let input_var = self.alloc(value)?;
self.enforce_zero(LinearCombination::zero() + input_var);
self.backend.new_k_power(self.q);
Ok(input_var)
}
fn enforce_zero(&mut self, lc: LinearCombination<E>)
{
self.q += 1;
self.backend.new_linear_constraint();
for (var, coeff) in lc.as_ref() {
self.backend.insert_coefficient(*var, *coeff);
}
}
fn multiply<F>(&mut self, _values: F) -> Result<(Variable, Variable, Variable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>
{
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let a = Variable::A(index);
let b = Variable::B(index);
let c = Variable::C(index);
Ok((a, b, c))
}
fn get_value(&self, var: Variable) -> Result<E::Fr, ()> {
self.backend.get_var(var).ok_or(())
}
}
let mut tmp: NonassigningSynthesizer<E, B> = NonassigningSynthesizer {
backend: backend,
current_variable: None,
_marker: PhantomData,
q: 0,
n: 0,
};
let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <NonassigningSynthesizer<E, B> as ConstraintSystem<E>>::ONE) {
(Variable::A(1), Variable::A(1)) => {},
_ => panic!("one variable is incorrect")
}
circuit.synthesize(&mut tmp)?;
// TODO: add blinding factors so we actually get zero-knowledge
// println!("n = {}", tmp.n);
Ok(())
}
}

@ -0,0 +1,33 @@
use pairing::ff::{Field};
use pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use rand::{Rand, Rng};
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters};
use super::helper::{Aggregate};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::{Circuit};
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::srs::SRS;
use crate::sonic::cs::Nonassigning;
use super::helper::create_aggregate as create_aggregate_sonic_circuit;
pub fn create_aggregate<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
inputs: &[(Proof<E>, SxyAdvice<E>)],
params: &Parameters<E>,
) -> Aggregate<E>
{
let adapted_circuit = AdaptorCircuit(circuit);
create_aggregate_sonic_circuit::<_, _, Nonassigning>(&adapted_circuit, inputs, params)
}

@ -0,0 +1,146 @@
use pairing::ff::{Field};
use pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::{Circuit};
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::srs::SRS;
use crate::sonic::cs::Basic;
use super::prover::create_advice as create_advice_sonic_circuit;
use super::prover::create_advice_on_information_and_srs as create_advice_on_information_and_srs_sonic_circuit;
use super::prover::create_proof_on_srs as create_proof_on_srs_sonic_circuit;
// pub fn create_advice_on_information_and_srs<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_advice_on_information_and_srs<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
proof: &Proof<E>,
srs: &SRS<E>,
n: usize
) -> Result<SxyAdvice<E>, SynthesisError>
{
let adapted_circuit = AdaptorCircuit(circuit);
create_advice_on_information_and_srs_sonic_circuit::<_, _, Basic>(&adapted_circuit, proof, srs, n)
}
// pub fn create_advice<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_advice<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
proof: &Proof<E>,
parameters: &Parameters<E>,
) -> Result<SxyAdvice<E>, SynthesisError>
{
let n = parameters.vk.n;
create_advice_on_information_and_srs::<E, C>(circuit, proof, &parameters.srs, n)
}
// pub fn create_advice_on_srs<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_advice_on_srs<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
proof: &Proof<E>,
srs: &SRS<E>
) -> Result<SxyAdvice<E>, SynthesisError>
{
use crate::sonic::cs::Nonassigning;
let adapted_circuit = AdaptorCircuit(circuit.clone());
// annoying, but we need n to compute s(z, y), and this isn't
// precomputed anywhere yet
let n = {
struct CountN {
n: usize
}
impl<'a, E: Engine> Backend<E> for &'a mut CountN {
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
}
let mut tmp = CountN{n:0};
Nonassigning::synthesize(&mut tmp, &adapted_circuit)?;
tmp.n
};
create_advice_on_information_and_srs::<E, C>(circuit, proof, srs, n)
}
// pub fn create_proof<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_proof<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
parameters: &Parameters<E>
) -> Result<Proof<E>, SynthesisError> {
create_proof_on_srs::<E, C>(circuit, &parameters.srs)
}
// pub fn create_proof_on_srs<E: Engine, C: Circuit<E> + Clone, S: SynthesisDriver>(
pub fn create_proof_on_srs<E: Engine, C: Circuit<E> + Clone>(
circuit: C,
srs: &SRS<E>
) -> Result<Proof<E>, SynthesisError>
{
let adapted_circuit = AdaptorCircuit(circuit);
create_proof_on_srs_sonic_circuit::<_, _, Basic>(&adapted_circuit, srs)
}
// #[test]
// fn my_fun_circuit_test() {
// use pairing::ff::PrimeField;
// use pairing::bls12_381::{Bls12, Fr};
// use super::*;
// use crate::sonic::cs::{Basic, ConstraintSystem, LinearCombination};
// struct MyCircuit;
// impl<E: Engine> Circuit<E> for MyCircuit {
// fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
// let (a, b, _) = cs.multiply(|| {
// Ok((
// E::Fr::from_str("10").unwrap(),
// E::Fr::from_str("20").unwrap(),
// E::Fr::from_str("200").unwrap(),
// ))
// })?;
// cs.enforce_zero(LinearCombination::from(a) + a - b);
// //let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?;
// //cs.enforce_zero(LinearCombination::from(b) - multiplier);
// Ok(())
// }
// }
// let srs = SRS::<Bls12>::new(
// 20,
// Fr::from_str("22222").unwrap(),
// Fr::from_str("33333333").unwrap(),
// );
// let proof = create_proof_on_srs::<Bls12, _, Basic>(&MyCircuit, &srs).unwrap();
// use std::time::{Instant};
// let start = Instant::now();
// let mut batch = MultiVerifier::<Bls12, _, Basic>::new(MyCircuit, &srs).unwrap();
// for _ in 0..1 {
// batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None);
// }
// assert!(batch.check_all());
// let elapsed = start.elapsed();
// println!("time to verify: {:?}", elapsed);
// }

@ -0,0 +1,102 @@
use pairing::ff::{Field};
use pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use rand::{Rand, Rng};
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters};
use super::helper::{Aggregate};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::{Circuit};
use crate::sonic::sonic::AdaptorCircuit;
use crate::sonic::srs::SRS;
use crate::sonic::cs::Nonassigning;
use super::verifier::verify_aggregate_on_srs as verify_aggregate_on_srs_sonic_circuit;
use super::verifier::verify_proofs_on_srs as verify_proofs_on_srs_sonic_circuit;
pub fn verify_proofs<E: Engine, C: Circuit<E> + Clone, R: Rng>(
proofs: &[Proof<E>],
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
params: &Parameters<E>,
) -> Result<bool, SynthesisError>
{
let adapted_circuit = AdaptorCircuit(circuit);
verify_proofs_on_srs_sonic_circuit::<_, _, Nonassigning, _>(proofs, inputs, adapted_circuit, rng, &params.srs)
}
/// Check multiple proofs with aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_aggregate<E: Engine, C: Circuit<E> + Clone, R: Rng>(
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &Aggregate<E>,
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
params: &Parameters<E>,
) -> Result<bool, SynthesisError> {
let adapted_circuit = AdaptorCircuit(circuit);
verify_aggregate_on_srs_sonic_circuit::<_, _, Nonassigning, _>(proofs, aggregate, inputs, adapted_circuit, rng, &params.srs)
}
// #[test]
// fn my_fun_circuit_test() {
// use pairing::ff::PrimeField;
// use pairing::bls12_381::{Bls12, Fr};
// use super::*;
// use crate::sonic::cs::{Basic, ConstraintSystem, LinearCombination};
// struct MyCircuit;
// impl<E: Engine> Circuit<E> for MyCircuit {
// fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
// let (a, b, _) = cs.multiply(|| {
// Ok((
// E::Fr::from_str("10").unwrap(),
// E::Fr::from_str("20").unwrap(),
// E::Fr::from_str("200").unwrap(),
// ))
// })?;
// cs.enforce_zero(LinearCombination::from(a) + a - b);
// //let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?;
// //cs.enforce_zero(LinearCombination::from(b) - multiplier);
// Ok(())
// }
// }
// let srs = SRS::<Bls12>::new(
// 20,
// Fr::from_str("22222").unwrap(),
// Fr::from_str("33333333").unwrap(),
// );
// let proof = create_proof_on_srs::<Bls12, _, Basic>(&MyCircuit, &srs).unwrap();
// use std::time::{Instant};
// let start = Instant::now();
// let mut batch = MultiVerifier::<Bls12, _, Basic>::new(MyCircuit, &srs).unwrap();
// for _ in 0..1 {
// batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None);
// }
// assert!(batch.check_all());
// let elapsed = start.elapsed();
// println!("time to verify: {:?}", elapsed);
// }

165
src/sonic/helped/batch.rs Normal file

@ -0,0 +1,165 @@
//! Our protocol allows the verification of multiple proofs and even
//! of individual proofs to batch the pairing operations such that
//! only a smaller, fixed number of pairings must occur for an entire
//! batch of proofs. This is possible because G2 elements are fixed
//! in our protocol and never appear in proofs; everything can be
//! combined probabilistically.
//!
//! This submodule contains the `Batch` abstraction for creating a
//! context for batch verification.
use pairing::ff::{Field};
use pairing::{Engine, CurveAffine, CurveProjective};
use crate::SynthesisError;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit};
use super::parameters::VerifyingKey;
use crate::sonic::srs::SRS;
use crate::sonic::util::multiexp;
use std::marker::PhantomData;
// One of the primary functions of the `Batch` abstraction is handling
// Kate commitment openings:
//
// e(P', [\alpha(x - z)] H) = e(P, H) e([-v] G, [\alpha] H)
// ==> e(P', [\alpha x] H) e([-z] P', [\alpha] H) = e(P, H) e([-v] G, [\alpha] H)
//
// Many of these can be opened simultaneously by sampling random `r` and
// accumulating...
//
// e([r] P', [\alpha x] H)
// e([-rz] P', [\alpha] H)
// e([r] P, -H)
// e([rv] G, [\alpha] H)
//
// ... and checking that the result is the identity in the target group.
pub struct Batch<E: Engine> {
alpha_x: Vec<(E::G1Affine, E::Fr)>,
alpha_x_precomp: <E::G2Affine as CurveAffine>::Prepared,
alpha: Vec<(E::G1Affine, E::Fr)>,
alpha_precomp: <E::G2Affine as CurveAffine>::Prepared,
neg_h: Vec<(E::G1Affine, E::Fr)>,
neg_h_precomp: <E::G2Affine as CurveAffine>::Prepared,
neg_x_n_minus_d: Vec<(E::G1Affine, E::Fr)>,
neg_x_n_minus_d_precomp: <E::G2Affine as CurveAffine>::Prepared,
// The value paired with [\alpha] H, accumulated in the field
// to save group operations.
value: E::Fr,
g: E::G1Affine,
}
impl<E: Engine> Batch<E> {
pub fn new(srs: &SRS<E>, n: usize) -> Self {
Batch {
alpha_x: vec![],
alpha_x_precomp: srs.h_positive_x_alpha[1].prepare(),
alpha: vec![],
alpha_precomp: srs.h_positive_x_alpha[0].prepare(),
neg_h: vec![],
neg_h_precomp: {
let mut tmp = srs.h_negative_x[0];
tmp.negate();
tmp.prepare()
},
neg_x_n_minus_d: vec![],
neg_x_n_minus_d_precomp: {
let mut tmp = srs.h_negative_x[srs.d - n];
tmp.negate();
tmp.prepare()
},
value: E::Fr::zero(),
g: srs.g_positive_x[0],
}
}
pub fn new_from_key(vk: &VerifyingKey<E>) -> Self {
Batch {
alpha_x: vec![],
alpha_x_precomp: vk.alpha_x.prepare(),
alpha: vec![],
alpha_precomp: vk.alpha.prepare(),
neg_h: vec![],
neg_h_precomp: vk.neg_h.prepare(),
neg_x_n_minus_d: vec![],
neg_x_n_minus_d_precomp: vk.neg_x_n_minus_d.prepare(),
value: E::Fr::zero(),
g: E::G1Affine::one(),
}
}
pub fn add_opening(&mut self, p: E::G1Affine, mut r: E::Fr, point: E::Fr) {
self.alpha_x.push((p, r));
r.mul_assign(&point);
r.negate();
self.alpha.push((p, r));
}
pub fn add_commitment(&mut self, p: E::G1Affine, r: E::Fr) {
self.neg_h.push((p, r));
}
pub fn add_commitment_max_n(&mut self, p: E::G1Affine, r: E::Fr) {
self.neg_x_n_minus_d.push((p, r));
}
pub fn add_opening_value(&mut self, mut r: E::Fr, point: E::Fr) {
r.mul_assign(&point);
self.value.add_assign(&r);
}
pub fn check_all(mut self) -> bool {
self.alpha.push((self.g, self.value));
let alpha_x = multiexp(
self.alpha_x.iter().map(|x| &x.0),
self.alpha_x.iter().map(|x| &x.1),
).into_affine();
let alpha_x = alpha_x.prepare();
let alpha = multiexp(
self.alpha.iter().map(|x| &x.0),
self.alpha.iter().map(|x| &x.1),
).into_affine();
let alpha = alpha.prepare();
let neg_h = multiexp(
self.neg_h.iter().map(|x| &x.0),
self.neg_h.iter().map(|x| &x.1),
).into_affine();
let neg_h = neg_h.prepare();
let neg_x_n_minus_d = multiexp(
self.neg_x_n_minus_d.iter().map(|x| &x.0),
self.neg_x_n_minus_d.iter().map(|x| &x.1),
).into_affine();
let neg_x_n_minus_d = neg_x_n_minus_d.prepare();
E::final_exponentiation(&E::miller_loop(&[
(&alpha_x, &self.alpha_x_precomp),
(&alpha, &self.alpha_precomp),
(&neg_h, &self.neg_h_precomp),
(&neg_x_n_minus_d, &self.neg_x_n_minus_d_precomp),
])).unwrap() == E::Fqk::one()
}
}

@ -0,0 +1,693 @@
use rand::Rng;
use std::sync::Arc;
use pairing::{
Engine,
Wnaf,
CurveProjective,
CurveAffine
};
use pairing::ff::{
PrimeField,
Field
};
use super::{
Parameters,
VerifyingKey
};
use crate::{
SynthesisError,
Circuit,
ConstraintSystem,
LinearCombination,
Variable,
Index
};
use crate::domain::{
Scalar
};
use crate::multicore::{
Worker
};
use std::marker::PhantomData;
use crate::sonic::cs::{Backend, Basic, SynthesisDriver};
use crate::sonic::srs::SRS;
use crate::sonic::cs::LinearCombination as SonicLinearCombination;
use crate::sonic::cs::Circuit as SonicCircuit;
use crate::sonic::cs::ConstraintSystem as SonicConstraintSystem;
use crate::sonic::cs::Variable as SonicVariable;
use crate::sonic::cs::Coeff;
use crate::sonic::sonic::{AdaptorCircuit};
use super::parameters::NUM_BLINDINGS;
use crate::verbose_flag;
/// Generates a random common reference string for
/// a circuit.
pub fn generate_random_parameters<E, C, R>(
circuit: C,
rng: &mut R
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>, R: Rng
{
let alpha = rng.gen();
let x = rng.gen();
generate_parameters::<E, C>(
circuit,
alpha,
x
)
}
/// This is our assembly structure that we'll use to synthesize the
/// circuit into
#[derive(Clone, Debug)]
pub struct CircuitParameters<E: Engine> {
pub num_inputs: usize,
pub num_aux: usize,
pub num_constraints: usize,
pub k_map: Vec<usize>,
pub n: usize,
pub q: usize,
_marker: PhantomData<E>
}
/// This is our assembly structure that we'll use to synthesize the
/// circuit into
struct GeneratorAssembly<'a, E: Engine, CS: SonicConstraintSystem<E> + 'a> {
cs: &'a mut CS,
num_inputs: usize,
num_aux: usize,
num_constraints: usize,
_marker: PhantomData<E>
}
impl<'a, E: Engine, CS: SonicConstraintSystem<E> + 'a> crate::ConstraintSystem<E>
for GeneratorAssembly<'a, E, CS>
{
type Root = Self;
// this is an important change
fn one() -> crate::Variable {
crate::Variable::new_unchecked(crate::Index::Input(1))
}
fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.num_aux += 1;
let var = self.cs.alloc(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
}).map_err(|_| crate::SynthesisError::AssignmentMissing)?;
Ok(match var {
SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)),
SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!(),
})
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
f: F,
) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.num_inputs += 1;
let var = self.cs.alloc_input(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
}).map_err(|_| crate::SynthesisError::AssignmentMissing)?;
Ok(match var {
SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)),
SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!(),
})
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LB: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LC: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
{
fn convert<E: Engine>(lc: crate::LinearCombination<E>) -> SonicLinearCombination<E> {
let mut ret = SonicLinearCombination::zero();
for &(v, coeff) in lc.as_ref().iter() {
let var = match v.get_unchecked() {
crate::Index::Input(i) => SonicVariable::A(i),
crate::Index::Aux(i) => SonicVariable::B(i),
};
ret = ret + (Coeff::Full(coeff), var);
}
ret
}
fn eval<E: Engine, CS: SonicConstraintSystem<E>>(
lc: &SonicLinearCombination<E>,
cs: &CS,
) -> Option<E::Fr> {
let mut ret = E::Fr::zero();
for &(v, coeff) in lc.as_ref().iter() {
let mut tmp = match cs.get_value(v) {
Ok(tmp) => tmp,
Err(_) => return None,
};
coeff.multiply(&mut tmp);
ret.add_assign(&tmp);
}
Some(ret)
}
self.num_constraints += 1;
let a_lc = convert(a(crate::LinearCombination::zero()));
let a_value = eval(&a_lc, &*self.cs);
let b_lc = convert(b(crate::LinearCombination::zero()));
let b_value = eval(&b_lc, &*self.cs);
let c_lc = convert(c(crate::LinearCombination::zero()));
let c_value = eval(&c_lc, &*self.cs);
let (a, b, c) = self
.cs
.multiply(|| Ok((a_value.unwrap(), b_value.unwrap(), c_value.unwrap())))
.unwrap();
self.cs.enforce_zero(a_lc - a);
self.cs.enforce_zero(b_lc - b);
self.cs.enforce_zero(c_lc - c);
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
/// Get circuit information such as number of input, variables,
/// constraints, and the corresponding SONIC parameters
/// k_map, n, q
pub fn get_circuit_parameters<E, C>(
circuit: C,
) -> Result<CircuitParameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
struct NonassigningSynthesizer<E: Engine, B: Backend<E>> {
backend: B,
current_variable: Option<usize>,
_marker: PhantomData<E>,
q: usize,
n: usize,
}
impl<E: Engine, B: Backend<E>> SonicConstraintSystem<E> for NonassigningSynthesizer<E, B> {
const ONE: SonicVariable = SonicVariable::A(1);
fn alloc<F>(&mut self, _value: F) -> Result<SonicVariable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
match self.current_variable.take() {
Some(index) => {
let var_b = SonicVariable::B(index);
self.current_variable = None;
Ok(var_b)
},
None => {
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let var_a = SonicVariable::A(index);
self.current_variable = Some(index);
Ok(var_a)
}
}
}
fn alloc_input<F>(&mut self, value: F) -> Result<SonicVariable, SynthesisError>
where
F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let input_var = self.alloc(value)?;
self.enforce_zero(SonicLinearCombination::zero() + input_var);
self.backend.new_k_power(self.q);
Ok(input_var)
}
fn enforce_zero(&mut self, lc: SonicLinearCombination<E>)
{
self.q += 1;
self.backend.new_linear_constraint();
for (var, coeff) in lc.as_ref() {
self.backend.insert_coefficient(*var, *coeff);
}
}
fn multiply<F>(&mut self, _values: F) -> Result<(SonicVariable, SonicVariable, SonicVariable), SynthesisError>
where
F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>
{
self.n += 1;
let index = self.n;
self.backend.new_multiplication_gate();
let a = SonicVariable::A(index);
let b = SonicVariable::B(index);
let c = SonicVariable::C(index);
Ok((a, b, c))
}
fn get_value(&self, var: SonicVariable) -> Result<E::Fr, ()> {
self.backend.get_var(var).ok_or(())
}
}
struct Preprocess<E: Engine> {
k_map: Vec<usize>,
n: usize,
q: usize,
_marker: PhantomData<E>
}
impl<'a, E: Engine> Backend<E> for &'a mut Preprocess<E> {
fn new_k_power(&mut self, index: usize) {
self.k_map.push(index);
}
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
fn new_linear_constraint(&mut self) {
self.q += 1;
}
}
let mut preprocess = Preprocess { k_map: vec![], n: 0, q: 0, _marker: PhantomData };
let (num_inputs, num_aux, num_constraints) = {
let mut cs: NonassigningSynthesizer<E, &'_ mut Preprocess<E>> = NonassigningSynthesizer {
backend: &mut preprocess,
current_variable: None,
_marker: PhantomData,
q: 0,
n: 0,
};
let one = cs.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues");
match (one, <NonassigningSynthesizer<E, &'_ mut Preprocess<E>> as SonicConstraintSystem<E>>::ONE) {
(SonicVariable::A(1), SonicVariable::A(1)) => {},
_ => return Err(SynthesisError::UnconstrainedVariable)
}
let mut assembly = GeneratorAssembly::<'_, E, _> {
cs: &mut cs,
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
_marker: PhantomData
};
circuit.synthesize(&mut assembly)?;
(assembly.num_inputs, assembly.num_aux, assembly.num_constraints)
};
Ok(CircuitParameters {
num_inputs: num_inputs,
num_aux: num_aux,
num_constraints: num_constraints,
k_map: preprocess.k_map,
n: preprocess.n,
q: preprocess.q,
_marker: PhantomData
})
}
pub fn generate_parameters<E, C>(
circuit: C,
alpha: E::Fr,
x: E::Fr
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let circuit_parameters = get_circuit_parameters::<E, C>(circuit)?;
let min_d = circuit_parameters.n * 4 + 2*NUM_BLINDINGS;
println!{"Mid d = {}", min_d};
let srs = generate_srs(alpha, x, min_d)?;
let parameters = generate_parameters_on_srs_and_information::<E>(&srs, circuit_parameters)?;
Ok(parameters)
}
pub fn generate_parameters_on_srs<E, C>(
circuit: C,
srs: &SRS<E>,
) -> Result<Parameters<E>, SynthesisError>
where E: Engine, C: Circuit<E>
{
let circuit_parameters = get_circuit_parameters::<E, C>(circuit)?;
let parameters = generate_parameters_on_srs_and_information(&srs, circuit_parameters)?;
Ok(parameters)
}
pub fn generate_parameters_on_srs_and_information<E: Engine>(
srs: &SRS<E>,
information: CircuitParameters<E>
) -> Result<Parameters<E>, SynthesisError>
{
assert!(srs.d >= information.n * 4 + 2*NUM_BLINDINGS);
let min_d = information.n * 4 + 2*NUM_BLINDINGS;
let trimmed_srs: SRS<E> = SRS {
d: min_d,
g_negative_x: srs.g_negative_x[0..min_d+1].to_vec(),
g_positive_x: srs.g_positive_x[0..min_d+1].to_vec().clone(),
h_negative_x: srs.h_negative_x[0..min_d+1].to_vec(),
h_positive_x: srs.h_positive_x[0..min_d+1].to_vec(),
g_negative_x_alpha: srs.g_negative_x_alpha[0..min_d].to_vec(),
g_positive_x_alpha: srs.g_positive_x_alpha[0..min_d].to_vec(),
h_negative_x_alpha: srs.h_negative_x_alpha[0..min_d+1].to_vec(),
h_positive_x_alpha: srs.h_positive_x_alpha[0..min_d+1].to_vec(),
};
let vk = VerifyingKey {
alpha_x: trimmed_srs.h_positive_x_alpha[1],
alpha: trimmed_srs.h_positive_x_alpha[0],
neg_h: {
let mut tmp = trimmed_srs.h_negative_x[0];
tmp.negate();
tmp
},
neg_x_n_minus_d: {
let mut tmp = trimmed_srs.h_negative_x[trimmed_srs.d - information.n];
tmp.negate();
tmp
},
k_map: information.k_map,
n: information.n,
q: information.q
};
Ok(Parameters{
vk: vk,
srs: trimmed_srs
})
}
pub fn generate_srs<E: Engine>(
alpha: E::Fr,
x: E::Fr,
d: usize
) -> Result<SRS<E>, SynthesisError> {
let verbose = verbose_flag();
let g1 = E::G1Affine::one().into_projective();
let g2 = E::G2Affine::one().into_projective();
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1_wnaf = g1_wnaf.base(g1, 4*d);
// Compute G2 window table
let mut g2_wnaf = Wnaf::new();
let g2_wnaf = g2_wnaf.base(g2, 4*d);
let x_inverse = x.inverse().ok_or(SynthesisError::UnexpectedIdentity)?;
let worker = Worker::new();
let mut x_powers_positive = vec![Scalar::<E>(E::Fr::zero()); d];
let mut x_powers_negative = vec![Scalar::<E>(E::Fr::zero()); d];
{
// Compute powers of tau
if verbose {eprintln!("computing powers of x...")};
let start = std::time::Instant::now();
{
worker.scope(d, |scope, chunk| {
for (i, x_powers) in x_powers_positive.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = x.pow(&[(i*chunk + 1) as u64]);
for p in x_powers {
p.0 = current_power;
current_power.mul_assign(&x);
}
});
}
});
}
{
worker.scope(d, |scope, chunk| {
for (i, x_powers) in x_powers_negative.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = x_inverse.pow(&[(i*chunk + 1) as u64]);
for p in x_powers {
p.0 = current_power;
current_power.mul_assign(&x_inverse);
}
});
}
});
}
if verbose {eprintln!("powers of x done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
}
// we will later add zero powers to g_x, h_x, h_x_alpha
let mut g_negative_x = vec![E::G1::one(); d];
let mut g_positive_x = vec![E::G1::one(); d];
let mut h_negative_x = vec![E::G2::one(); d];
let mut h_positive_x = vec![E::G2::one(); d];
let mut g_negative_x_alpha = vec![E::G1::one(); d];
let mut g_positive_x_alpha = vec![E::G1::one(); d];
let mut h_negative_x_alpha = vec![E::G2::one(); d];
let mut h_positive_x_alpha = vec![E::G2::one(); d];
fn eval<E: Engine>(
// wNAF window tables
g1_wnaf: &Wnaf<usize, &[E::G1], &mut Vec<i64>>,
g2_wnaf: &Wnaf<usize, &[E::G2], &mut Vec<i64>>,
powers_of_x: &[Scalar<E>],
g_x: &mut [E::G1],
g_x_alpha: &mut [E::G1],
h_x: &mut [E::G2],
h_x_alpha: &mut [E::G2],
// Trapdoors
alpha: &E::Fr,
// Worker
worker: &Worker
)
{
// Sanity check
assert_eq!(g_x.len(), powers_of_x.len());
assert_eq!(g_x.len(), g_x_alpha.len());
assert_eq!(g_x.len(), h_x.len());
assert_eq!(g_x.len(), h_x_alpha.len());
// Evaluate polynomials in multiple threads
worker.scope(g_x.len(), |scope, chunk| {
for ((((x, g_x), g_x_alpha), h_x), h_x_alpha) in powers_of_x.chunks(chunk)
.zip(g_x.chunks_mut(chunk))
.zip(g_x_alpha.chunks_mut(chunk))
.zip(h_x.chunks_mut(chunk))
.zip(h_x_alpha.chunks_mut(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move |_| {
for ((((x, g_x), g_x_alpha), h_x), h_x_alpha) in x.iter()
.zip(g_x.iter_mut())
.zip(g_x_alpha.iter_mut())
.zip(h_x.iter_mut())
.zip(h_x_alpha.iter_mut())
{
let mut x_alpha = x.0;
x_alpha.mul_assign(&alpha);
*g_x = g1_wnaf.scalar(x.0.into_repr());
*h_x = g2_wnaf.scalar(x.0.into_repr());
*g_x_alpha = g1_wnaf.scalar(x_alpha.into_repr());
*h_x_alpha = g2_wnaf.scalar(x_alpha.into_repr());
}
// Batch normalize
E::G1::batch_normalization(g_x);
E::G1::batch_normalization(g_x_alpha);
E::G2::batch_normalization(h_x);
E::G2::batch_normalization(h_x_alpha);
});
};
});
}
let start = std::time::Instant::now();
// Evaluate for positive powers.
eval(
&g1_wnaf,
&g2_wnaf,
&x_powers_positive,
&mut g_positive_x[..],
&mut g_positive_x_alpha[..],
&mut h_positive_x[..],
&mut h_positive_x_alpha[..],
&alpha,
&worker
);
// Evaluate for negative powers
eval(
&g1_wnaf,
&g2_wnaf,
&x_powers_negative,
&mut g_negative_x[..],
&mut g_negative_x_alpha[..],
&mut h_negative_x[..],
&mut h_negative_x_alpha[..],
&alpha,
&worker
);
if verbose {eprintln!("evaluating points done in {} s", start.elapsed().as_millis() as f64 / 1000.0);};
let g1 = g1.into_affine();
let g2 = g2.into_affine();
let h_alpha = g2.mul(alpha.into_repr()).into_affine();
let g_negative_x = {
let mut tmp = vec![g1];
tmp.extend(g_negative_x.into_iter().map(|e| e.into_affine()));
tmp
};
let g_positive_x = {
let mut tmp = vec![g1];
tmp.extend(g_positive_x.into_iter().map(|e| e.into_affine()));
tmp
};
let h_negative_x = {
let mut tmp = vec![g2];
tmp.extend(h_negative_x.into_iter().map(|e| e.into_affine()));
tmp
};
let h_positive_x = {
let mut tmp = vec![g2];
tmp.extend(h_positive_x.into_iter().map(|e| e.into_affine()));
tmp
};
let g_negative_x_alpha = g_negative_x_alpha.into_iter().map(|e| e.into_affine()).collect();
let g_positive_x_alpha = g_positive_x_alpha.into_iter().map(|e| e.into_affine()).collect();
let h_negative_x_alpha = {
let mut tmp = vec![h_alpha];
tmp.extend(h_negative_x_alpha.into_iter().map(|e| e.into_affine()));
tmp
};
let h_positive_x_alpha = {
let mut tmp = vec![h_alpha];
tmp.extend(h_positive_x_alpha.into_iter().map(|e| e.into_affine()));
tmp
};
Ok(SRS {
d: d,
g_negative_x: g_negative_x,
g_positive_x: g_positive_x,
h_negative_x: h_negative_x,
h_positive_x: h_positive_x,
g_negative_x_alpha: g_negative_x_alpha,
g_positive_x_alpha: g_positive_x_alpha,
h_negative_x_alpha: h_negative_x_alpha,
h_positive_x_alpha: h_positive_x_alpha,
}
)
}

252
src/sonic/helped/helper.rs Normal file

@ -0,0 +1,252 @@
use pairing::ff::{Field};
use pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::Parameters;
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
#[derive(Clone)]
pub struct Aggregate<E: Engine> {
// Commitment to s(z, Y)
pub c: E::G1Affine,
// We have to open each of the S commitments to a random point `z`
pub s_opening: E::G1Affine,
// We have to open C to each constituent `y`
pub c_openings: Vec<(E::G1Affine, E::Fr)>,
// Then we have to finally open C
pub opening: E::G1Affine,
}
pub fn create_aggregate<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
inputs: &[(Proof<E>, SxyAdvice<E>)],
params: &Parameters<E>,
) -> Aggregate<E>
{
let n = params.vk.n;
let q = params.vk.q;
create_aggregate_on_srs_using_information::<E, C, S>(circuit, inputs, &params.srs, n, q)
}
pub fn create_aggregate_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
inputs: &[(Proof<E>, SxyAdvice<E>)],
srs: &SRS<E>,
) -> Aggregate<E>
{
// TODO: precompute this?
let (n, q) = {
struct CountN {
n: usize,
q: usize
}
impl<'a, E: Engine> Backend<E> for &'a mut CountN {
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
fn new_linear_constraint(&mut self) {
self.q += 1;
}
}
let mut tmp = CountN{n:0,q:0};
S::synthesize(&mut tmp, circuit).unwrap(); // TODO
(tmp.n, tmp.q)
};
create_aggregate_on_srs_using_information::<E, C, S>(circuit, inputs, srs, n, q)
}
pub fn create_aggregate_on_srs_using_information<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
inputs: &[(Proof<E>, SxyAdvice<E>)],
srs: &SRS<E>,
n: usize,
q: usize,
) -> Aggregate<E>
{
let mut transcript = Transcript::new(&[]);
let mut y_values: Vec<E::Fr> = Vec::with_capacity(inputs.len());
for &(ref proof, ref sxyadvice) in inputs {
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
y_values.push(transcript.get_challenge_scalar());
}
transcript.commit_point(&sxyadvice.s);
}
let z: E::Fr = transcript.get_challenge_scalar();
// Compute s(z, Y)
let (s_poly_negative, s_poly_positive) = {
let mut tmp = SyEval::new(z, n, q);
S::synthesize(&mut tmp, circuit).unwrap(); // TODO
tmp.poly()
};
// Compute C = g^{s(z, x)}
let c = multiexp(
srs.g_positive_x_alpha[0..(n + q)]
.iter()
.chain_ext(srs.g_negative_x_alpha[0..n].iter()),
s_poly_positive.iter().chain_ext(s_poly_negative.iter())
).into_affine();
transcript.commit_point(&c);
// Open C at w
let w: E::Fr = transcript.get_challenge_scalar();
let value = compute_value::<E>(&w, &s_poly_positive, &s_poly_negative);
let opening = {
let mut value = value;
value.negate();
let poly = kate_divison(
s_poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(s_poly_positive.iter()),
w,
);
let negative_poly = poly[0..n].iter().rev();
let positive_poly = poly[n..].iter();
multiexp(
srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext(
srs.g_positive_x[0..positive_poly.len()].iter()
),
negative_poly.chain_ext(positive_poly)
).into_affine()
};
// TODO: parallelize
// Let's open up C to every y.
fn compute_value<E: Engine>(y: &E::Fr, poly_positive: &[E::Fr], poly_negative: &[E::Fr]) -> E::Fr {
let mut value = E::Fr::zero();
let yinv = y.inverse().unwrap(); // TODO
let mut tmp = yinv;
for &coeff in poly_negative {
let mut coeff = coeff;
coeff.mul_assign(&tmp);
value.add_assign(&coeff);
tmp.mul_assign(&yinv);
}
let mut tmp = *y;
for &coeff in poly_positive {
let mut coeff = coeff;
coeff.mul_assign(&tmp);
value.add_assign(&coeff);
tmp.mul_assign(&y);
}
value
}
let mut c_openings = vec![];
for y in &y_values {
let value = compute_value::<E>(y, &s_poly_positive, &s_poly_negative);
let opening = {
let mut value = value;
value.negate();
let poly = kate_divison(
s_poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(s_poly_positive.iter()),
*y,
);
let negative_poly = poly[0..n].iter().rev();
let positive_poly = poly[n..].iter();
multiexp(
srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext(
srs.g_positive_x[0..positive_poly.len()].iter()
),
negative_poly.chain_ext(positive_poly)
).into_affine()
};
c_openings.push((opening, value));
}
// Okay, great. Now we need to open up each S at the same point z to the same value.
// Since we're opening up all the S's at the same point, we create a bunch of random
// challenges instead and open up a random linear combination.
let mut poly_negative = vec![E::Fr::zero(); n];
let mut poly_positive = vec![E::Fr::zero(); 2*n];
let mut expected_value = E::Fr::zero();
for (y, c_opening) in y_values.iter().zip(c_openings.iter()) {
// Compute s(X, y_i)
let (s_poly_negative, s_poly_positive) = {
let mut tmp = SxEval::new(*y, n);
S::synthesize(&mut tmp, circuit).unwrap(); // TODO
tmp.poly()
};
let mut value = c_opening.1;
let r: E::Fr = transcript.get_challenge_scalar();
value.mul_assign(&r);
expected_value.add_assign(&value);
for (mut coeff, target) in s_poly_negative.into_iter().zip(poly_negative.iter_mut()) {
coeff.mul_assign(&r);
target.add_assign(&coeff);
}
for (mut coeff, target) in s_poly_positive.into_iter().zip(poly_positive.iter_mut()) {
coeff.mul_assign(&r);
target.add_assign(&coeff);
}
}
// TODO: parallelize
let s_opening = {
let mut value = expected_value;
value.negate();
let poly = kate_divison(
poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(poly_positive.iter()),
z,
);
let negative_poly = poly[0..n].iter().rev();
let positive_poly = poly[n..].iter();
multiexp(
srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext(
srs.g_positive_x[0..positive_poly.len()].iter()
),
negative_poly.chain_ext(positive_poly)
).into_affine()
};
Aggregate {
// Commitment to s(z, Y)
c,
// We have to open each of the S commitments to a random point `z`
s_opening,
// We have to open C to each constituent `y`
c_openings,
// Then we have to finally open C
opening,
}
}

52
src/sonic/helped/mod.rs Normal file

@ -0,0 +1,52 @@
extern crate pairing;
use pairing::ff::{Field};
use pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
mod batch;
mod poly;
pub mod prover;
pub mod verifier;
pub mod helper;
mod parameters;
mod generator;
mod adapted_prover;
mod adapted_verifier;
mod adapted_helper;
pub use self::batch::{Batch};
pub use self::verifier::{MultiVerifier};
pub use self::generator::{
CircuitParameters,
generate_parameters,
generate_parameters_on_srs,
generate_parameters_on_srs_and_information,
generate_random_parameters,
generate_srs,
get_circuit_parameters
};
pub use self::parameters::{
Proof,
SxyAdvice,
Parameters,
VerifyingKey,
PreparedVerifyingKey
};
pub use self::adapted_prover::{
create_advice,
create_advice_on_srs,
create_advice_on_information_and_srs,
create_proof,
create_proof_on_srs,
};
pub use self::adapted_verifier::{
verify_proofs,
verify_aggregate
};
pub use self::adapted_helper::{
create_aggregate
};

@ -0,0 +1,488 @@
use pairing::ff::{
Field,
PrimeField,
PrimeFieldRepr
};
use pairing::{
Engine,
CurveAffine,
EncodedPoint
};
use crate::{
SynthesisError
};
use crate::source::SourceBuilder;
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
pub const NUM_BLINDINGS: usize = 6;
// pub const NUM_BLINDINGS: usize = 0;
#[derive(Clone, Debug, Eq)]
pub struct SxyAdvice<E: Engine> {
pub s: E::G1Affine,
pub opening: E::G1Affine,
pub szy: E::Fr,
}
impl<E: Engine> PartialEq for SxyAdvice<E> {
fn eq(&self, other: &SxyAdvice<E>) -> bool {
self.s == other.s &&
self.opening == other.opening &&
self.szy == other.szy
}
}
#[derive(Clone, Debug, Eq)]
pub struct Proof<E: Engine> {
pub r: E::G1Affine,
pub t: E::G1Affine,
pub rz: E::Fr,
pub rzy: E::Fr,
pub z_opening: E::G1Affine,
pub zy_opening: E::G1Affine
}
impl<E: Engine> PartialEq for Proof<E> {
fn eq(&self, other: &Proof<E>) -> bool {
self.r == other.r &&
self.t == other.t &&
self.rz == other.rz &&
self.rzy == other.rzy &&
self.z_opening == other.z_opening &&
self.zy_opening == other.zy_opening
}
}
impl<E: Engine> Proof<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
use pairing::ff::{PrimeField, PrimeFieldRepr};
writer.write_all(self.r.into_compressed().as_ref())?;
writer.write_all(self.t.into_compressed().as_ref())?;
let mut buffer = vec![];
self.rz.into_repr().write_be(&mut buffer)?;
writer.write_all(&buffer[..])?;
let mut buffer = vec![];
self.rzy.into_repr().write_be(&mut buffer)?;
writer.write_all(&buffer[..])?;
writer.write_all(self.z_opening.into_compressed().as_ref())?;
writer.write_all(self.zy_opening.into_compressed().as_ref())?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g1_repr = <E::G1Affine as CurveAffine>::Compressed::empty();
let mut fr_repr = E::Fr::zero().into_repr();
reader.read_exact(g1_repr.as_mut())?;
let r = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let t = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
fr_repr.read_be(&mut reader)?;
let rz = E::Fr::from_repr(fr_repr)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "field element is zero"))
} else {
Ok(e)
})?;
fr_repr.read_be(&mut reader)?;
let rzy = E::Fr::from_repr(fr_repr)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "field element is zero"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let z_opening = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
reader.read_exact(g1_repr.as_mut())?;
let zy_opening = g1_repr
.into_affine()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})?;
Ok(Proof {
r: r,
t: t,
rz: rz,
rzy: rzy,
z_opening: z_opening,
zy_opening: zy_opening
})
}
}
#[derive(Clone, Debug, Eq)]
pub struct VerifyingKey<E: Engine> {
pub alpha_x: E::G2Affine,
pub alpha: E::G2Affine,
pub neg_h: E::G2Affine,
pub neg_x_n_minus_d: E::G2Affine,
pub k_map: Vec<usize>,
pub n: usize,
pub q: usize
}
impl<E: Engine> PartialEq for VerifyingKey<E> {
fn eq(&self, other: &VerifyingKey<E>) -> bool {
self.alpha_x == other.alpha_x &&
self.alpha == other.alpha &&
self.neg_h == other.neg_h &&
self.neg_x_n_minus_d == other.neg_x_n_minus_d &&
self.k_map == other.k_map &&
self.n == other.n &&
self.q == other.q
}
}
impl<E: Engine> VerifyingKey<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
writer.write_all(self.alpha_x.into_uncompressed().as_ref())?;
writer.write_all(self.alpha.into_uncompressed().as_ref())?;
writer.write_all(self.neg_h.into_uncompressed().as_ref())?;
writer.write_all(self.neg_x_n_minus_d.into_uncompressed().as_ref())?;
writer.write_u32::<BigEndian>(self.k_map.len() as u32)?;
for k in &self.k_map {
writer.write_u32::<BigEndian>(*k as u32)?;
}
writer.write_u32::<BigEndian>(self.n as u32)?;
writer.write_u32::<BigEndian>(self.q as u32)?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R
) -> io::Result<Self>
{
let mut g2_repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(g2_repr.as_mut())?;
let alpha_x = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let alpha = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let neg_h = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
reader.read_exact(g2_repr.as_mut())?;
let neg_x_n_minus_d = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
let k_map_len = reader.read_u32::<BigEndian>()? as usize;
let mut k_map = vec![];
for _ in 0..k_map_len {
let k = reader.read_u32::<BigEndian>()? as usize;
k_map.push(k);
}
let n = reader.read_u32::<BigEndian>()? as usize;
let q = reader.read_u32::<BigEndian>()? as usize;
Ok(VerifyingKey {
alpha_x: alpha_x,
alpha: alpha,
neg_h: neg_h,
neg_x_n_minus_d: neg_x_n_minus_d,
k_map: k_map,
n: n,
q: q
})
}
}
use crate::sonic::cs::{Backend, Basic, SynthesisDriver};
use crate::sonic::srs::SRS;
use crate::sonic::cs::Circuit as SonicCircuit;
use std::marker::PhantomData;
impl<E: Engine> VerifyingKey<E> {
pub fn new<C: SonicCircuit<E>, S: SynthesisDriver>(circuit: C, srs: &SRS<E>) -> Result<Self, SynthesisError> {
struct Preprocess<E: Engine> {
k_map: Vec<usize>,
n: usize,
q: usize,
_marker: PhantomData<E>
}
impl<'a, E: Engine> Backend<E> for &'a mut Preprocess<E> {
fn new_k_power(&mut self, index: usize) {
self.k_map.push(index);
}
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
fn new_linear_constraint(&mut self) {
self.q += 1;
}
}
let mut preprocess = Preprocess { k_map: vec![], n: 0, q: 0, _marker: PhantomData };
S::synthesize(&mut preprocess, &circuit)?;
Ok(Self {
alpha_x: srs.h_positive_x_alpha[1],
alpha: srs.h_positive_x_alpha[0],
neg_h: {
let mut tmp = srs.h_negative_x[0];
tmp.negate();
tmp
},
neg_x_n_minus_d: {
let mut tmp = srs.h_negative_x[srs.d - preprocess.n];
tmp.negate();
tmp
},
k_map: preprocess.k_map,
n: preprocess.n,
q: preprocess.q
})
}
}
pub struct PreparedVerifyingKey<E: Engine> {
alpha_x: <E::G2Affine as CurveAffine>::Prepared,
alpha: <E::G2Affine as CurveAffine>::Prepared,
neg_h: <E::G2Affine as CurveAffine>::Prepared,
neg_x_n_minus_d: <E::G2Affine as CurveAffine>::Prepared,
k_map: Vec<usize>,
n: usize,
q: usize
}
#[derive(Clone, Eq)]
pub struct Parameters<E: Engine> {
pub vk: VerifyingKey<E>,
pub srs: SRS<E>,
// pub d: usize,
// // g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}
// pub g_negative_x: Arc<Vec<E::G1Affine>>,
// // g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}
// pub g_positive_x: Arc<Vec<E::G1Affine>>,
// // g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}
// pub h_negative_x: Arc<Vec<E::G2Affine>>,
// // g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}
// pub h_positive_x: Arc<Vec<E::G2Affine>>,
// // alpha*(g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}})
// pub g_negative_x_alpha: Arc<Vec<E::G1Affine>>,
// // alpha*(g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}})
// pub g_positive_x_alpha: Arc<Vec<E::G1Affine>>,
// // alpha*(h^{x^0}, h^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}})
// pub h_negative_x_alpha: Arc<Vec<E::G2Affine>>,
// // alpha*(h^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}})
// pub h_positive_x_alpha: Arc<Vec<E::G2Affine>>,
}
impl<E: Engine> PartialEq for Parameters<E> {
fn eq(&self, other: &Parameters<E>) -> bool {
self.vk == other.vk &&
self.srs == other.srs
}
}
impl<E: Engine> Parameters<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
self.vk.write(&mut writer)?;
self.srs.write(&mut writer)?;
Ok(())
}
pub fn read<R: Read>(
mut reader: R,
checked: bool
) -> io::Result<Self>
{
let vk = VerifyingKey::<E>::read(&mut reader)?;
let srs = SRS::<E>::read(&mut reader, checked)?;
Ok(Parameters {
vk: vk,
srs: srs
})
}
}
#[test]
fn parameters_generation() {
use crate::{ConstraintSystem, Circuit};
use pairing::bls12_381::{Bls12, Fr};
#[derive(Clone)]
struct MySillyCircuit<E: Engine> {
a: Option<E::Fr>,
b: Option<E::Fr>
}
impl<E: Engine> Circuit<E> for MySillyCircuit<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?;
let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?;
let c = cs.alloc_input(|| "c", || {
let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
a.mul_assign(&b);
Ok(a)
})?;
cs.enforce(
|| "a*b=c",
|lc| lc + a,
|lc| lc + b,
|lc| lc + c
);
Ok(())
}
}
use rand::{Rng, Rand, thread_rng};
use super::{generate_parameters, get_circuit_parameters, generate_srs, generate_parameters_on_srs_and_information};
use super::adapted_prover::create_proof;
let info = get_circuit_parameters::<Bls12, _>(MySillyCircuit { a: None, b: None }).expect("Must get circuit info");
println!("{:?}", info);
let rng = &mut thread_rng();
let x: Fr = rng.gen();
let alpha: Fr = rng.gen();
let params = generate_parameters::<Bls12, _>(MySillyCircuit { a: None, b: None }, alpha, x).unwrap();
let srs = generate_srs::<Bls12>(alpha, x, info.n * 100).unwrap();
let naive_srs = SRS::<Bls12>::new(
info.n * 100,
x,
alpha,
);
assert!(srs == naive_srs);
let params_on_srs = generate_parameters_on_srs_and_information::<Bls12>(&srs, info.clone()).unwrap();
assert!(params == params_on_srs);
{
let mut v = vec![];
params.write(&mut v).unwrap();
let de_params = Parameters::read(&v[..], true).unwrap();
assert!(params == de_params);
let de_params = Parameters::read(&v[..], false).unwrap();
assert!(params == de_params);
}
for _ in 0..100 {
let a = Fr::rand(rng);
let b = Fr::rand(rng);
let mut c = a;
c.mul_assign(&b);
let proof = create_proof (
MySillyCircuit {
a: Some(a),
b: Some(b)
},
&params,
).unwrap();
let mut v = vec![];
proof.write(&mut v).unwrap();
assert_eq!(v.len(), 256);
let de_proof = Proof::read(&v[..]).unwrap();
assert!(proof == de_proof);
// assert!(verify_proof(&pvk, &proof, &[c]).unwrap());
// assert!(!verify_proof(&pvk, &proof, &[a]).unwrap());
}
}

264
src/sonic/helped/poly.rs Normal file

@ -0,0 +1,264 @@
use pairing::ff::{Field};
use pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use crate::sonic::cs::{Backend};
use crate::sonic::cs::{Coeff, Variable, LinearCombination};
/*
s(X, Y) = \sum\limits_{i=1}^N u_i(Y) X^{-i}
+ \sum\limits_{i=1}^N v_i(Y) X^{i}
+ \sum\limits_{i=1}^N w_i(Y) X^{i+N}
where
u_i(Y) = \sum\limits_{q=1}^Q Y^{q+N} u_{i,q}
v_i(Y) = \sum\limits_{q=1}^Q Y^{q+N} v_{i,q}
w_i(Y) = -Y^i + -Y^{-i} + \sum\limits_{q=1}^Q Y^{q+N} w_{i,q}
*/
#[derive(Clone)]
pub struct SxEval<E: Engine> {
y: E::Fr,
// current value of y^{q+N}
yqn: E::Fr,
// x^{-i} (\sum\limits_{q=1}^Q y^{q+N} u_{q,i})
u: Vec<E::Fr>,
// x^{i} (\sum\limits_{q=1}^Q y^{q+N} v_{q,i})
v: Vec<E::Fr>,
// x^{i+N} (-y^i -y^{-i} + \sum\limits_{q=1}^Q y^{q+N} w_{q,i})
w: Vec<E::Fr>,
}
impl<E: Engine> SxEval<E> {
pub fn new(y: E::Fr, n: usize) -> Self {
let y_inv = y.inverse().unwrap(); // TODO
let yqn = y.pow(&[n as u64]);
let u = vec![E::Fr::zero(); n];
let v = vec![E::Fr::zero(); n];
let mut w = vec![E::Fr::zero(); n];
let mut tmp1 = y;
let mut tmp2 = y_inv;
for w in &mut w {
let mut new = tmp1;
new.add_assign(&tmp2);
new.negate();
*w = new;
tmp1.mul_assign(&y);
tmp2.mul_assign(&y_inv);
}
SxEval {
y,
yqn,
u,
v,
w,
}
}
pub fn poly(mut self) -> (Vec<E::Fr>, Vec<E::Fr>) {
self.v.extend(self.w);
(self.u, self.v)
}
pub fn finalize(self, x: E::Fr) -> E::Fr {
let x_inv = x.inverse().unwrap(); // TODO
let mut tmp = x_inv;
let mut acc = E::Fr::zero();
for mut u in self.u {
u.mul_assign(&tmp);
acc.add_assign(&u);
tmp.mul_assign(&x_inv);
}
let mut tmp = x;
for mut v in self.v {
v.mul_assign(&tmp);
acc.add_assign(&v);
tmp.mul_assign(&x);
}
for mut w in self.w {
w.mul_assign(&tmp);
acc.add_assign(&w);
tmp.mul_assign(&x);
}
acc
}
}
impl<'a, E: Engine> Backend<E> for &'a mut SxEval<E> {
fn new_linear_constraint(&mut self) {
self.yqn.mul_assign(&self.y);
}
fn insert_coefficient(&mut self, var: Variable, coeff: Coeff<E>) {
let acc = match var {
Variable::A(index) => {
&mut self.u[index - 1]
}
Variable::B(index) => {
&mut self.v[index - 1]
}
Variable::C(index) => {
&mut self.w[index - 1]
}
};
match coeff {
Coeff::Zero => { },
Coeff::One => {
acc.add_assign(&self.yqn);
},
Coeff::NegativeOne => {
acc.sub_assign(&self.yqn);
},
Coeff::Full(mut val) => {
val.mul_assign(&self.yqn);
acc.add_assign(&val);
}
}
}
}
/*
s(X, Y) = \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} u_{i,q} X^{-i}
+ \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} v_{i,q} X^{i}
+ \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} w_{i,q} X^{i+N}
- \sum\limits_{i=1}^N Y^i X^{i+N}
- \sum\limits_{i=1}^N Y^{-i} X^{i+N}
*/
pub struct SyEval<E: Engine> {
max_n: usize,
current_q: usize,
// x^{-1}, ..., x^{-N}
a: Vec<E::Fr>,
// x^1, ..., x^{N}
b: Vec<E::Fr>,
// x^{N+1}, ..., x^{2*N}
c: Vec<E::Fr>,
// coeffs for y^1, ..., y^{N+Q}
positive_coeffs: Vec<E::Fr>,
// coeffs for y^{-1}, y^{-2}, ..., y^{-N}
negative_coeffs: Vec<E::Fr>,
}
impl<E: Engine> SyEval<E> {
pub fn new(x: E::Fr, n: usize, q: usize) -> Self {
let xinv = x.inverse().unwrap();
let mut tmp = E::Fr::one();
let mut a = vec![E::Fr::zero(); n];
for a in &mut a {
tmp.mul_assign(&xinv); // tmp = x^{-i}
*a = tmp;
}
let mut tmp = E::Fr::one();
let mut b = vec![E::Fr::zero(); n];
for b in &mut b {
tmp.mul_assign(&x); // tmp = x^{i}
*b = tmp;
}
let mut positive_coeffs = vec![E::Fr::zero(); n + q];
let mut negative_coeffs = vec![E::Fr::zero(); n];
let mut c = vec![E::Fr::zero(); n];
for ((c, positive_coeff), negative_coeff) in c.iter_mut().zip(&mut positive_coeffs).zip(&mut negative_coeffs) {
tmp.mul_assign(&x); // tmp = x^{i+N}
*c = tmp;
// - \sum\limits_{i=1}^N Y^i X^{i+N}
let mut tmp = tmp;
tmp.negate();
*positive_coeff = tmp;
// - \sum\limits_{i=1}^N Y^{-i} X^{i+N}
*negative_coeff = tmp;
}
SyEval {
a,
b,
c,
positive_coeffs,
negative_coeffs,
current_q: 0,
max_n: n,
}
}
pub fn poly(self) -> (Vec<E::Fr>, Vec<E::Fr>) {
(self.negative_coeffs, self.positive_coeffs)
}
pub fn finalize(self, y: E::Fr) -> E::Fr {
let mut acc = E::Fr::zero();
let mut tmp = y;
for mut coeff in self.positive_coeffs {
coeff.mul_assign(&tmp);
acc.add_assign(&coeff);
tmp.mul_assign(&y);
}
let yinv = y.inverse().unwrap(); // TODO
let mut tmp = yinv;
for mut coeff in self.negative_coeffs {
coeff.mul_assign(&tmp);
acc.add_assign(&coeff);
tmp.mul_assign(&yinv);
}
acc
}
}
impl<'a, E: Engine> Backend<E> for &'a mut SyEval<E> {
fn new_linear_constraint(&mut self) {
self.current_q += 1;
}
fn insert_coefficient(&mut self, var: Variable, coeff: Coeff<E>) {
match var {
Variable::A(index) => {
let index = index - 1;
// Y^{q+N} += X^{-i} * coeff
let mut tmp = self.a[index];
coeff.multiply(&mut tmp);
let yindex = self.current_q + self.max_n;
self.positive_coeffs[yindex - 1].add_assign(&tmp);
}
Variable::B(index) => {
let index = index - 1;
// Y^{q+N} += X^{i} * coeff
let mut tmp = self.b[index];
coeff.multiply(&mut tmp);
let yindex = self.current_q + self.max_n;
self.positive_coeffs[yindex - 1].add_assign(&tmp);
}
Variable::C(index) => {
let index = index - 1;
// Y^{q+N} += X^{i+N} * coeff
let mut tmp = self.c[index];
coeff.multiply(&mut tmp);
let yindex = self.current_q + self.max_n;
self.positive_coeffs[yindex - 1].add_assign(&tmp);
}
};
}
}

505
src/sonic/helped/prover.rs Normal file

@ -0,0 +1,505 @@
use pairing::ff::{Field};
use pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::parameters::{Parameters, NUM_BLINDINGS};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
pub fn create_advice_on_information_and_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
srs: &SRS<E>,
n: usize
) -> Result<SxyAdvice<E>, SynthesisError>
{
let z: E::Fr;
let y: E::Fr;
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
y = transcript.get_challenge_scalar();
transcript.commit_point(&proof.t);
z = transcript.get_challenge_scalar();
}
let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?;
let (s_poly_negative, s_poly_positive) = {
let mut tmp = SxEval::new(y, n);
S::synthesize(&mut tmp, circuit)?;
tmp.poly()
};
// Compute S commitment
let s = multiexp(
srs.g_positive_x_alpha[0..(2 * n)]
.iter()
.chain_ext(srs.g_negative_x_alpha[0..(n)].iter()),
s_poly_positive.iter().chain_ext(s_poly_negative.iter())
).into_affine();
// Compute s(z, y)
let mut szy = E::Fr::zero();
{
let mut tmp = z;
for &p in &s_poly_positive {
let mut p = p;
p.mul_assign(&tmp);
szy.add_assign(&p);
tmp.mul_assign(&z);
}
let mut tmp = z_inv;
for &p in &s_poly_negative {
let mut p = p;
p.mul_assign(&tmp);
szy.add_assign(&p);
tmp.mul_assign(&z_inv);
}
}
// Compute kate opening
let opening = {
let mut open = szy;
open.negate();
let poly = kate_divison(
s_poly_negative.iter().rev().chain_ext(Some(open).iter()).chain_ext(s_poly_positive.iter()),
z,
);
let negative_poly = poly[0..n].iter().rev();
let positive_poly = poly[n..].iter();
multiexp(
srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext(
srs.g_positive_x[0..positive_poly.len()].iter()
),
negative_poly.chain_ext(positive_poly)
).into_affine()
};
Ok(SxyAdvice {
s,
szy,
opening
})
}
pub fn create_advice<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
parameters: &Parameters<E>,
) -> Result<SxyAdvice<E>, SynthesisError>
{
let n = parameters.vk.n;
create_advice_on_information_and_srs::<E, C, S>(circuit, proof, &parameters.srs, n)
}
pub fn create_advice_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
proof: &Proof<E>,
srs: &SRS<E>
) -> Result<SxyAdvice<E>, SynthesisError>
{
// annoying, but we need n to compute s(z, y), and this isn't
// precomputed anywhere yet
let n = {
struct CountN {
n: usize
}
impl<'a, E: Engine> Backend<E> for &'a mut CountN {
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
}
let mut tmp = CountN{n:0};
S::synthesize(&mut tmp, circuit)?;
tmp.n
};
create_advice_on_information_and_srs::<E, C, S>(circuit, proof, srs, n)
}
pub fn create_proof<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
parameters: &Parameters<E>
) -> Result<Proof<E>, SynthesisError> {
create_proof_on_srs::<E, C, S>(circuit, &parameters.srs)
}
extern crate rand;
use self::rand::{Rand, Rng, thread_rng};
pub fn create_proof_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver>(
circuit: &C,
srs: &SRS<E>
) -> Result<Proof<E>, SynthesisError>
{
struct Wires<E: Engine> {
a: Vec<E::Fr>,
b: Vec<E::Fr>,
c: Vec<E::Fr>
}
impl<'a, E: Engine> Backend<E> for &'a mut Wires<E> {
fn new_multiplication_gate(&mut self) {
self.a.push(E::Fr::zero());
self.b.push(E::Fr::zero());
self.c.push(E::Fr::zero());
}
fn get_var(&self, variable: Variable) -> Option<E::Fr> {
Some(match variable {
Variable::A(index) => {
self.a[index - 1]
},
Variable::B(index) => {
self.b[index - 1]
},
Variable::C(index) => {
self.c[index - 1]
}
})
}
fn set_var<F>(&mut self, variable: Variable, value: F) -> Result<(), SynthesisError>
where F: FnOnce() -> Result<E::Fr, SynthesisError>
{
let value = value()?;
match variable {
Variable::A(index) => {
self.a[index - 1] = value;
},
Variable::B(index) => {
self.b[index - 1] = value;
},
Variable::C(index) => {
self.c[index - 1] = value;
}
}
Ok(())
}
}
let mut wires = Wires {
a: vec![],
b: vec![],
c: vec![],
};
S::synthesize(&mut wires, circuit)?;
let n = wires.a.len();
let mut transcript = Transcript::new(&[]);
let rng = &mut thread_rng();
// c_{n+1}, c_{n+2}, c_{n+3}, c_{n+4}
let blindings: Vec<E::Fr> = (0..NUM_BLINDINGS).into_iter().map(|_| E::Fr::rand(rng)).collect();
// r is a commitment to r(X, 1)
let r = polynomial_commitment::<E, _>(
n,
2*n + NUM_BLINDINGS,
n,
&srs,
blindings.iter().rev()
.chain_ext(wires.c.iter().rev())
.chain_ext(wires.b.iter().rev())
.chain_ext(Some(E::Fr::zero()).iter())
.chain_ext(wires.a.iter()),
);
transcript.commit_point(&r);
let y: E::Fr = transcript.get_challenge_scalar();
// create r(X, 1) by observation that it's just a series of coefficients.
// Used representation is for powers X^{-2n}...X^{-n-1}, X^{-n}...X^{-1}, X^{1}...X^{n}
// Same representation is ok for r(X, Y) too cause powers always match
// TODO: add blindings c_{n+1}*X^{-2n - 1}, c_{n+2}*X^{-2n - 2}, c_{n+3}*X^{-2n - 3}, c_{n+4}*X^{-2n - 4}
let mut rx1 = wires.b;
rx1.extend(wires.c);
rx1.extend(blindings.clone());
rx1.reverse();
rx1.push(E::Fr::zero());
rx1.extend(wires.a);
let mut rxy = rx1.clone();
let y_inv = y.inverse().ok_or(SynthesisError::DivisionByZero)?;
// y^(-2n - num blindings)
let tmp = y_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
mut_distribute_consequitive_powers(
&mut rxy,
tmp,
y,
);
// negative powers [-1, -2n], positive [1, n]
let (s_poly_negative, s_poly_positive) = {
let mut tmp = SxEval::new(y, n);
S::synthesize(&mut tmp, circuit)?;
tmp.poly()
};
// TODO: Parallelize
// r'(X, y) = r(X, y) + s(X, y). Note `y` - those are evaluated at the point already
let mut rxy_prime = rxy.clone();
{
// extend to have powers [n+1, 2n]
rxy_prime.resize(4 * n + 1 + NUM_BLINDINGS, E::Fr::zero());
// add coefficients in front of X^{-2n}...X^{-n-1}, X^{-n}...X^{-1}
for (r, s) in rxy_prime[NUM_BLINDINGS..(2 * n + NUM_BLINDINGS)]
.iter_mut()
.rev()
.zip(s_poly_negative)
{
r.add_assign(&s);
}
// add coefficients in front of X^{1}...X^{n}, X^{n+1}...X^{2*n}
for (r, s) in rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..].iter_mut().zip(s_poly_positive) {
r.add_assign(&s);
}
}
// by this point all R related polynomials are blinded and evaluated for Y variable
// t(X, y) = r'(X, y)*r(X, 1) and will be later evaluated at z
// contained degree in respect to X are from -4*n to 3*n including X^0
let mut txy = multiply_polynomials::<E>(rx1.clone(), rxy_prime);
txy[4 * n + 2 * NUM_BLINDINGS] = E::Fr::zero(); // -k(y)
// commit to t(X, y) to later open at z
let t = polynomial_commitment(
srs.d,
(4 * n) + 2*NUM_BLINDINGS,
3 * n,
srs,
// skip what would be zero power
txy[0..(4 * n) + 2*NUM_BLINDINGS].iter()
.chain_ext(txy[(4 * n + 2*NUM_BLINDINGS + 1)..].iter()),
);
transcript.commit_point(&t);
let z: E::Fr = transcript.get_challenge_scalar();
let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?;
let rz = {
let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&rx1, tmp, z)
};
// {
// rx1[(2 * n + NUM_BLINDINGS)].sub_assign(&rz);
// let opening = polynomial_commitment_opening(
// 2 * n + NUM_BLINDINGS,
// n,
// rx1.iter(),
// z,
// srs
// );
// let valid_rz_commitment = check_polynomial_commitment(&r, &z, &rz, &opening, n, &srs);
// assert!(valid_rz_commitment);
// rx1[(2 * n + NUM_BLINDINGS)].add_assign(&rz);
// }
// rzy is evaluation of r(X, Y) at z, y
let rzy = {
let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&rxy, tmp, z)
};
transcript.commit_scalar(&rz);
transcript.commit_scalar(&rzy);
let r1: E::Fr = transcript.get_challenge_scalar();
let zy_opening = {
// r(X, 1) - r(z, y)
// subtract constant term from R(X, 1)
rx1[(2 * n + NUM_BLINDINGS)].sub_assign(&rzy);
let mut point = y;
point.mul_assign(&z);
polynomial_commitment_opening(
2 * n + NUM_BLINDINGS,
n,
&rx1,
point,
srs
)
};
assert_eq!(rx1.len(), 3*n + NUM_BLINDINGS + 1);
// it's an opening of t(X, y) at z
let z_opening = {
rx1[(2 * n + NUM_BLINDINGS)].add_assign(&rzy); // restore
// skip powers from until reach -2n - NUM_BLINDINGS
for (t, &r) in txy[(2 * n + NUM_BLINDINGS)..].iter_mut().zip(rx1.iter()) {
let mut r = r;
r.mul_assign(&r1);
t.add_assign(&r);
}
let val = {
let tmp = z_inv.pow(&[(4*n + 2*NUM_BLINDINGS) as u64]);
evaluate_at_consequitive_powers(&txy, tmp, z)
};
txy[(4 * n + 2*NUM_BLINDINGS)].sub_assign(&val);
polynomial_commitment_opening(
4*n + 2*NUM_BLINDINGS,
3*n,
&txy,
z,
srs)
};
// let mut zy = z;
// zy.mul_assign(&y);
// let valid_rzy_commitment = check_polynomial_commitment(&r, &zy, &rzy, &zy_opening, n, &srs);
// assert!(valid_rzy_commitment);
Ok(Proof {
r, rz, rzy, t, z_opening, zy_opening
})
}
#[test]
fn my_fun_circuit_test() {
use pairing::ff::PrimeField;
use pairing::bls12_381::{Bls12, Fr};
use super::*;
use crate::sonic::cs::{Basic, ConstraintSystem, LinearCombination};
use rand::{thread_rng};
struct MyCircuit;
impl<E: Engine> Circuit<E> for MyCircuit {
fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let (a, b, _) = cs.multiply(|| {
Ok((
E::Fr::from_str("10").unwrap(),
E::Fr::from_str("20").unwrap(),
E::Fr::from_str("200").unwrap(),
))
})?;
cs.enforce_zero(LinearCombination::from(a) + a - b);
//let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?;
//cs.enforce_zero(LinearCombination::from(b) - multiplier);
Ok(())
}
}
let srs = SRS::<Bls12>::new(
20,
Fr::from_str("22222").unwrap(),
Fr::from_str("33333333").unwrap(),
);
let proof = self::create_proof_on_srs::<Bls12, _, Basic>(&MyCircuit, &srs).unwrap();
use std::time::{Instant};
let start = Instant::now();
let rng = thread_rng();
let mut batch = MultiVerifier::<Bls12, _, Basic, _>::new(MyCircuit, &srs, rng).unwrap();
for _ in 0..1 {
batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None);
}
assert!(batch.check_all());
let elapsed = start.elapsed();
println!("time to verify: {:?}", elapsed);
}
#[test]
fn polynomial_commitment_test() {
use pairing::ff::PrimeField;
use pairing::ff::PrimeFieldRepr;
use pairing::bls12_381::{Bls12, Fr};
use super::*;
use crate::sonic::cs::{Basic, ConstraintSystem, LinearCombination};
use rand::{thread_rng};
use pairing::{CurveAffine};
let srs = SRS::<Bls12>::new(
20,
Fr::from_str("22222").unwrap(),
Fr::from_str("33333333").unwrap(),
);
let mut rng = thread_rng();
// x^-4 + x^-3 + x^-2 + x^-1 + x + x^2
let mut poly = vec![Fr::one(), Fr::one(), Fr::one(), Fr::one(), Fr::zero(), Fr::one(), Fr::one()];
// make commitment to the poly
let commitment = polynomial_commitment(2, 4, 2, &srs, poly.iter());
let point: Fr = rng.gen();
let mut tmp = point.inverse().unwrap();
tmp.square();
let value = evaluate_at_consequitive_powers(&poly, tmp, point);
// evaluate f(z)
poly[4] = value;
poly[4].negate();
// f(x) - f(z)
let opening = polynomial_commitment_opening(4, 2, poly.iter(), point, &srs);
// e(W , hα x )e(g^{v} * W{-z} , hα ) = e(F , h^{x^{d +max}} )
let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut neg_x_n_minus_d_precomp = srs.h_negative_x[srs.d - 2];
neg_x_n_minus_d_precomp.negate();
let neg_x_n_minus_d_precomp = neg_x_n_minus_d_precomp.prepare();
// let neg_x_n_minus_d_precomp = srs.h_negative_x[0].prepare();
let w = opening.prepare();
let mut gv = srs.g_positive_x[0].mul(value.into_repr());
let mut z_neg = point;
z_neg.negate();
let w_minus_z = opening.mul(z_neg.into_repr());
gv.add_assign(&w_minus_z);
let gv = gv.into_affine().prepare();
assert!(Bls12::final_exponentiation(&Bls12::miller_loop(&[
(&w, &alpha_x_precomp),
(&gv, &alpha_precomp),
(&commitment.prepare(), &neg_x_n_minus_d_precomp),
])).unwrap() == <Bls12 as Engine>::Fqk::one());
}

@ -0,0 +1,333 @@
use pairing::ff::{Field};
use pairing::{Engine, CurveProjective};
use std::marker::PhantomData;
use rand::{Rand, Rng};
use super::{Proof, SxyAdvice};
use super::batch::Batch;
use super::poly::{SxEval, SyEval};
use super::helper::Aggregate;
use super::parameters::{Parameters};
use crate::SynthesisError;
use crate::sonic::transcript::{Transcript, TranscriptProtocol};
use crate::sonic::util::*;
use crate::sonic::cs::{Backend, SynthesisDriver};
use crate::sonic::cs::{Circuit, Variable, Coeff};
use crate::sonic::srs::SRS;
pub struct MultiVerifier<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng> {
circuit: C,
pub(crate) batch: Batch<E>,
k_map: Vec<usize>,
n: usize,
q: usize,
randomness_source: R,
_marker: PhantomData<(E, S)>
}
impl<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng> MultiVerifier<E, C, S, R> {
// This constructor consumes randomness source cause it's later used internally
pub fn new(circuit: C, srs: &SRS<E>, rng: R) -> Result<Self, SynthesisError> {
struct Preprocess<E: Engine> {
k_map: Vec<usize>,
n: usize,
q: usize,
_marker: PhantomData<E>
}
impl<'a, E: Engine> Backend<E> for &'a mut Preprocess<E> {
fn new_k_power(&mut self, index: usize) {
self.k_map.push(index);
}
fn new_multiplication_gate(&mut self) {
self.n += 1;
}
fn new_linear_constraint(&mut self) {
self.q += 1;
}
}
let mut preprocess = Preprocess { k_map: vec![], n: 0, q: 0, _marker: PhantomData };
S::synthesize(&mut preprocess, &circuit)?;
Ok(MultiVerifier {
circuit,
batch: Batch::new(srs, preprocess.n),
k_map: preprocess.k_map,
n: preprocess.n,
q: preprocess.q,
randomness_source: rng,
_marker: PhantomData
})
}
pub fn add_aggregate(
&mut self,
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &Aggregate<E>,
)
{
let mut transcript = Transcript::new(&[]);
let mut y_values: Vec<E::Fr> = Vec::with_capacity(proofs.len());
for &(ref proof, ref sxyadvice) in proofs {
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
y_values.push(transcript.get_challenge_scalar());
}
transcript.commit_point(&sxyadvice.s);
}
let z: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&aggregate.c);
let w: E::Fr = transcript.get_challenge_scalar();
let szw = {
let mut tmp = SxEval::new(w, self.n);
S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO
tmp.finalize(z)
};
{
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(aggregate.opening, random, w);
self.batch.add_commitment(aggregate.c, random);
self.batch.add_opening_value(szw, random);
}
for ((opening, value), &y) in aggregate.c_openings.iter().zip(y_values.iter()) {
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(*opening, random, y);
self.batch.add_commitment(aggregate.c, random);
self.batch.add_opening_value(*value, random);
}
let random: E::Fr = self.randomness_source.gen();
let mut expected_value = E::Fr::zero();
for ((_, advice), c_opening) in proofs.iter().zip(aggregate.c_openings.iter()) {
let mut r: E::Fr = transcript.get_challenge_scalar();
// expected value of the later opening
{
let mut tmp = c_opening.1;
tmp.mul_assign(&r);
expected_value.add_assign(&tmp);
}
r.mul_assign(&random);
self.batch.add_commitment(advice.s, r);
}
self.batch.add_opening_value(expected_value, random);
self.batch.add_opening(aggregate.s_opening, random, z);
}
/// Caller must ensure to add aggregate after adding a proof
pub fn add_proof_with_advice(
&mut self,
proof: &Proof<E>,
inputs: &[E::Fr],
advice: &SxyAdvice<E>,
)
{
let mut z = None;
self.add_proof(proof, inputs, |_z, _y| {
z = Some(_z);
Some(advice.szy)
});
let z = z.unwrap();
// We need to open up SxyAdvice.s at z using SxyAdvice.opening
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&advice.opening);
transcript.commit_point(&advice.s);
transcript.commit_scalar(&advice.szy);
let random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(advice.opening, random, z);
self.batch.add_commitment(advice.s, random);
self.batch.add_opening_value(advice.szy, random);
}
pub fn add_proof<F>(
&mut self,
proof: &Proof<E>,
inputs: &[E::Fr],
sxy: F
)
where F: FnOnce(E::Fr, E::Fr) -> Option<E::Fr>
{
let mut transcript = Transcript::new(&[]);
transcript.commit_point(&proof.r);
let y: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&proof.t);
let z: E::Fr = transcript.get_challenge_scalar();
transcript.commit_scalar(&proof.rz);
transcript.commit_scalar(&proof.rzy);
let r1: E::Fr = transcript.get_challenge_scalar();
transcript.commit_point(&proof.z_opening);
transcript.commit_point(&proof.zy_opening);
// First, the easy one. Let's open up proof.r at zy, using proof.zy_opening
// as the evidence and proof.rzy as the opening.
{
let random: E::Fr = self.randomness_source.gen();
let mut zy = z;
zy.mul_assign(&y);
self.batch.add_opening(proof.zy_opening, random, zy);
self.batch.add_commitment_max_n(proof.r, random);
self.batch.add_opening_value(proof.rzy, random);
}
// Now we need to compute t(z, y) with what we have. Let's compute k(y).
let mut ky = E::Fr::zero();
for (exp, input) in self.k_map.iter().zip(Some(E::Fr::one()).iter().chain(inputs.iter())) {
let mut term = y.pow(&[(*exp + self.n) as u64]);
term.mul_assign(input);
ky.add_assign(&term);
}
// Compute s(z, y)
let szy = sxy(z, y).unwrap_or_else(|| {
let mut tmp = SxEval::new(y, self.n);
S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO
tmp.finalize(z)
// let mut tmp = SyEval::new(z, self.n, self.q);
// S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO
// tmp.finalize(y)
});
// Finally, compute t(z, y)
// t(z, y) = (r(z, y) + s(z,y))*r(z, 1) - k(y)
let mut tzy = proof.rzy;
tzy.add_assign(&szy);
tzy.mul_assign(&proof.rz);
tzy.sub_assign(&ky);
// We open these both at the same time by keeping their commitments
// linearly independent (using r1).
{
let mut random: E::Fr = self.randomness_source.gen();
self.batch.add_opening(proof.z_opening, random, z);
self.batch.add_opening_value(tzy, random);
self.batch.add_commitment(proof.t, random);
random.mul_assign(&r1);
self.batch.add_opening_value(proof.rz, random);
self.batch.add_commitment_max_n(proof.r, random);
}
}
pub fn get_k_map(&self) -> Vec<usize> {
return self.k_map.clone();
}
pub fn get_n(&self) -> usize {
return self.n;
}
pub fn get_q(&self) -> usize {
return self.q;
}
pub fn check_all(self) -> bool {
self.batch.check_all()
}
}
/// Check multiple proofs without aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_proofs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
proofs: &[Proof<E>],
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
params: &Parameters<E>,
) -> Result<bool, SynthesisError> {
verify_proofs_on_srs::<E, C, S, R>(proofs, inputs, circuit, rng, &params.srs)
}
/// Check multiple proofs without aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_proofs_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
proofs: &[Proof<E>],
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
srs: &SRS<E>,
) -> Result<bool, SynthesisError> {
let mut verifier = MultiVerifier::<E, C, S, R>::new(circuit, srs, rng)?;
let expected_inputs_size = verifier.get_k_map().len() - 1;
for (proof, inputs) in proofs.iter().zip(inputs.iter()) {
if inputs.len() != expected_inputs_size {
return Err(SynthesisError::Unsatisfiable);
}
verifier.add_proof(proof, &inputs, |_, _| None);
}
Ok(verifier.check_all())
}
/// Check multiple proofs with aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_aggregate<E: Engine, C: Circuit<E>, S: SynthesisDriver,R: Rng>(
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &Aggregate<E>,
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
params: &Parameters<E>,
) -> Result<bool, SynthesisError> {
verify_aggregate_on_srs::<E, C, S, R>(proofs, aggregate, inputs, circuit, rng, &params.srs)
}
/// Check multiple proofs with aggregation. Verifier's work is
/// not succint due to `S(X, Y)` evaluation
pub fn verify_aggregate_on_srs<E: Engine, C: Circuit<E>, S: SynthesisDriver, R: Rng>(
proofs: &[(Proof<E>, SxyAdvice<E>)],
aggregate: &Aggregate<E>,
inputs: &[Vec<E::Fr>],
circuit: C,
rng: R,
srs: &SRS<E>,
) -> Result<bool, SynthesisError> {
let mut verifier = MultiVerifier::<E, C, S, R>::new(circuit, srs, rng)?;
let expected_inputs_size = verifier.get_k_map().len() - 1;
for ((proof, advice), inputs) in proofs.iter().zip(inputs.iter()) {
if inputs.len() != expected_inputs_size {
return Err(SynthesisError::Unsatisfiable);
}
verifier.add_proof_with_advice(proof, &inputs, &advice);
}
verifier.add_aggregate(proofs, aggregate);
Ok(verifier.check_all())
}

17
src/sonic/mod.rs Normal file

@ -0,0 +1,17 @@
extern crate pairing;
pub use crate::{SynthesisError};
pub mod sonic;
pub mod srs;
pub mod util;
pub mod helped;
pub mod cs;
pub mod unhelped;
mod transcript;
#[cfg(test)]
mod tests;

310
src/sonic/paper.rs Normal file

@ -0,0 +1,310 @@
#[test]
fn test_paper_results() {
use pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
struct PedersenHashPreimageCircuit<'a, E: sapling_crypto::jubjub::JubjubEngine + 'a> {
preimage: Vec<Option<bool>>,
params: &'a E::Params,
}
impl<'a, E: sapling_crypto::jubjub::JubjubEngine + 'a> Clone for PedersenHashPreimageCircuit<'a, E> {
fn clone(&self) -> Self {
PedersenHashPreimageCircuit {
preimage: self.preimage.clone(),
params: self.params
}
}
}
impl<'a, E: sapling_crypto::jubjub::JubjubEngine> bellman::Circuit<E> for PedersenHashPreimageCircuit<'a, E> {
fn synthesize<CS: bellman::ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), bellman::SynthesisError>
{
//use bellman::ConstraintSystem;
use sapling_crypto::circuit::boolean::{AllocatedBit, Boolean};
use sapling_crypto::circuit::pedersen_hash;
let mut preimage = vec![];
for &bit in self.preimage.iter() {
preimage.push(Boolean::from(AllocatedBit::alloc(&mut* cs, bit)?));
}
pedersen_hash::pedersen_hash(
&mut* cs, pedersen_hash::Personalization::NoteCommitment, &preimage, self.params)?;
Ok(())
}
}
#[derive(Clone)]
struct SHA256PreimageCircuit {
preimage: Vec<Option<bool>>,
}
impl<E: Engine> bellman::Circuit<E> for SHA256PreimageCircuit {
fn synthesize<CS: bellman::ConstraintSystem<E>>(
self,
cs: &mut CS,
) -> Result<(), bellman::SynthesisError> {
//use bellman::ConstraintSystem;
use sapling_crypto::circuit::boolean::{AllocatedBit, Boolean};
use sapling_crypto::circuit::sha256::sha256_block_no_padding;
let mut preimage = vec![];
for &bit in self.preimage.iter() {
preimage.push(Boolean::from(AllocatedBit::alloc(&mut *cs, bit)?));
}
sha256_block_no_padding(&mut *cs, &preimage)?;
sha256_block_no_padding(&mut *cs, &preimage)?;
sha256_block_no_padding(&mut *cs, &preimage)?;
// sha256_block_no_padding(&mut *cs, &preimage)?;
Ok(())
}
}
{
use pairing::{CurveAffine};
use pairing::bls12_381::{G1Affine, G2Affine};
let a = G1Affine::one();
let b = G2Affine::one();
let c = G1Affine::one();
let alpha = G1Affine::one();
let beta = G2Affine::one();
let iv = G1Affine::one();
let gamma = G2Affine::one().prepare();
let delta = G2Affine::one().prepare();
let alphabeta = <Bls12 as Engine>::pairing(alpha, beta);
println!("verifying an idealized groth16 proof");
let start = Instant::now();
assert!(<Bls12 as Engine>::final_exponentiation(
&<Bls12 as Engine>::miller_loop([
(&a.prepare(), &b.prepare()),
(&iv.prepare(), &gamma),
(&c.prepare(), &delta),
].into_iter())
).unwrap() != alphabeta);
println!("done in {:?}", start.elapsed());
}
{
use sonic::util::multiexp;
use pairing::{CurveAffine};
use pairing::bls12_381::{G1Affine, G2Affine};
// e([\alpha G], [\beta H]) = e(A, B) e(IV, [\gamma] H) e(C, [\delta] H)
let a = G1Affine::one();
let b = G2Affine::one();
let c = vec![G1Affine::one(); 100];
let mut tmp = Fr::one();
tmp.double();
tmp = tmp.inverse().unwrap();
let cscalars = (0..100).map(|_| {tmp.square(); tmp}).collect::<Vec<_>>();
let alpha = G1Affine::one();
let beta = G2Affine::one();
let iv = G1Affine::one();
let gamma = G2Affine::one().prepare();
let delta = G2Affine::one().prepare();
let alphabeta = <Bls12 as Engine>::pairing(alpha, beta);
println!("verifying 100 idealized groth16 proofs");
let start = Instant::now();
let c = multiexp(
c.iter(),
cscalars.iter(),
).into_affine();
assert!(<Bls12 as Engine>::final_exponentiation(
&<Bls12 as Engine>::miller_loop([
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&a.prepare(), &b.prepare()),
(&iv.prepare(), &gamma),
(&c.prepare(), &delta),
].into_iter())
).unwrap() != alphabeta);
println!("done in {:?}", start.elapsed());
}
{
let samples: usize = 100;
const NUM_BITS: usize = 384;
let params = sapling_crypto::jubjub::JubjubBls12::new();
let circuit = PedersenHashPreimageCircuit {
preimage: vec![Some(true); NUM_BITS],
params: &params
};
println!("creating proof");
let start = Instant::now();
let proof = create_proof::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &proof, &srs);
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &proofs, &srs);
println!("done in {:?}", start.elapsed());
{
let mut verifier = MultiVerifier::<Bls12, _, Basic>::new(AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("verifying 1 proof without advice");
let start = Instant::now();
{
for _ in 0..1 {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let mut verifier = MultiVerifier::<Bls12, _, Basic>::new(AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("verifying {} proofs without advice", samples);
let start = Instant::now();
{
for _ in 0..samples {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let mut verifier = MultiVerifier::<Bls12, _, Basic>::new(AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("verifying 100 proofs with advice");
let start = Instant::now();
{
for (ref proof, ref advice) in &proofs {
verifier.add_proof_with_advice(proof, &[], advice);
}
verifier.add_aggregate(&proofs, &aggregate);
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
}
}

165
src/sonic/sonic/adaptor.rs Normal file

@ -0,0 +1,165 @@
extern crate pairing;
extern crate rand;
use pairing::ff::{Field, PrimeField};
use pairing::{Engine, CurveProjective};
// this one is for all external interfaces
// use crate::{LinearCombination, ConstraintSystem, Circuit, Variable};
use crate::SynthesisError;
use crate::sonic::srs::SRS;
use crate::sonic::cs::LinearCombination as SonicLinearCombination;
use crate::sonic::cs::Circuit as SonicCircuit;
use crate::sonic::cs::ConstraintSystem as SonicConstraintSystem;
use crate::sonic::cs::Variable as SonicVariable;
use crate::sonic::cs::Coeff;
use std::marker::PhantomData;
pub struct Adaptor<'a, E: Engine, CS: SonicConstraintSystem<E> + 'a> {
cs: &'a mut CS,
_marker: PhantomData<E>,
}
impl<'a, E: Engine, CS: SonicConstraintSystem<E> + 'a> crate::ConstraintSystem<E>
for Adaptor<'a, E, CS>
{
type Root = Self;
// this is an important change
fn one() -> crate::Variable {
crate::Variable::new_unchecked(crate::Index::Input(1))
}
fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let var = self.cs.alloc(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
}).map_err(|_| crate::SynthesisError::AssignmentMissing)?;
Ok(match var {
SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)),
SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!(),
})
}
fn alloc_input<F, A, AR>(
&mut self,
_: A,
f: F,
) -> Result<crate::Variable, crate::SynthesisError>
where
F: FnOnce() -> Result<E::Fr, crate::SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let var = self.cs.alloc_input(|| {
f().map_err(|_| crate::SynthesisError::AssignmentMissing)
}).map_err(|_| crate::SynthesisError::AssignmentMissing)?;
Ok(match var {
SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)),
SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)),
_ => unreachable!(),
})
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LB: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
LC: FnOnce(crate::LinearCombination<E>) -> crate::LinearCombination<E>,
{
fn convert<E: Engine>(lc: crate::LinearCombination<E>) -> SonicLinearCombination<E> {
let mut ret = SonicLinearCombination::zero();
for &(v, coeff) in lc.as_ref().iter() {
let var = match v.get_unchecked() {
crate::Index::Input(i) => SonicVariable::A(i),
crate::Index::Aux(i) => SonicVariable::B(i),
};
ret = ret + (Coeff::Full(coeff), var);
}
ret
}
fn eval<E: Engine, CS: SonicConstraintSystem<E>>(
lc: &SonicLinearCombination<E>,
cs: &CS,
) -> Option<E::Fr> {
let mut ret = E::Fr::zero();
for &(v, coeff) in lc.as_ref().iter() {
let mut tmp = match cs.get_value(v) {
Ok(tmp) => tmp,
Err(_) => return None,
};
coeff.multiply(&mut tmp);
ret.add_assign(&tmp);
}
Some(ret)
}
let a_lc = convert(a(crate::LinearCombination::zero()));
let a_value = eval(&a_lc, &*self.cs);
let b_lc = convert(b(crate::LinearCombination::zero()));
let b_value = eval(&b_lc, &*self.cs);
let c_lc = convert(c(crate::LinearCombination::zero()));
let c_value = eval(&c_lc, &*self.cs);
let (a, b, c) = self
.cs
.multiply(|| Ok((a_value.unwrap(), b_value.unwrap(), c_value.unwrap())))
.unwrap();
self.cs.enforce_zero(a_lc - a);
self.cs.enforce_zero(b_lc - b);
self.cs.enforce_zero(c_lc - c);
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
#[derive(Clone)]
pub struct AdaptorCircuit<T>(pub T);
impl<'a, E: Engine, C: crate::Circuit<E> + Clone> SonicCircuit<E> for AdaptorCircuit<C> {
fn synthesize<CS: SonicConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> {
let mut adaptor = Adaptor {
cs: cs,
_marker: PhantomData,
};
match self.0.clone().synthesize(&mut adaptor) {
Err(_) => return Err(SynthesisError::AssignmentMissing),
Ok(_) => {}
};
Ok(())
}
}

5
src/sonic/sonic/mod.rs Normal file

@ -0,0 +1,5 @@
extern crate pairing;
mod adaptor;
pub use self::adaptor::{Adaptor, AdaptorCircuit};

4
src/sonic/srs/mod.rs Normal file

@ -0,0 +1,4 @@
extern crate pairing;
mod srs;
pub use self::srs::SRS;

274
src/sonic/srs/srs.rs Normal file

@ -0,0 +1,274 @@
use pairing::ff::{Field, PrimeField};
use pairing::{CurveAffine, CurveProjective, Engine, Wnaf};
use std::io::{self, Read, Write};
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
#[derive(Clone, Eq)]
pub struct SRS<E: Engine> {
pub d: usize,
// g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}
pub g_negative_x: Vec<E::G1Affine>,
// g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}
pub g_positive_x: Vec<E::G1Affine>,
// g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}
pub h_negative_x: Vec<E::G2Affine>,
// g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}
pub h_positive_x: Vec<E::G2Affine>,
// alpha*(g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}})
pub g_negative_x_alpha: Vec<E::G1Affine>,
// alpha*(g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}})
pub g_positive_x_alpha: Vec<E::G1Affine>,
// alpha*(h^{x^0}, h^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}})
pub h_negative_x_alpha: Vec<E::G2Affine>,
// alpha*(h^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}})
pub h_positive_x_alpha: Vec<E::G2Affine>,
}
impl<E: Engine> PartialEq for SRS<E> {
fn eq(&self, other: &SRS<E>) -> bool {
self.d == other.d &&
self.g_negative_x == other.g_negative_x &&
self.g_positive_x == other.g_positive_x &&
self.h_negative_x == other.h_negative_x &&
self.h_positive_x == other.h_positive_x &&
self.g_negative_x_alpha == other.g_negative_x_alpha &&
self.g_positive_x_alpha == other.g_positive_x_alpha &&
self.h_negative_x_alpha == other.h_negative_x_alpha &&
self.h_positive_x_alpha == other.h_positive_x_alpha
}
}
impl<E: Engine> SRS<E> {
pub fn dummy(d: usize, _: E::Fr, _: E::Fr) -> Self {
SRS {
d: d,
g_negative_x: vec![E::G1Affine::one(); d + 1],
g_positive_x: vec![E::G1Affine::one(); d + 1],
h_negative_x: vec![E::G2Affine::one(); d + 1],
h_positive_x: vec![E::G2Affine::one(); d + 1],
g_negative_x_alpha: vec![E::G1Affine::one(); d],
g_positive_x_alpha: vec![E::G1Affine::one(); d],
h_negative_x_alpha: vec![E::G2Affine::one(); d + 1],
h_positive_x_alpha: vec![E::G2Affine::one(); d + 1],
}
}
pub fn new(d: usize, x: E::Fr, alpha: E::Fr) -> Self {
let mut g1 = Wnaf::new();
let mut g1 = g1.base(E::G1::one(), d * 4);
let mut g2 = Wnaf::new();
let mut g2 = g2.base(E::G2::one(), d * 4);
fn table<C: CurveAffine>(
mut cur: C::Scalar,
step: C::Scalar,
num: usize,
table: &mut Wnaf<usize, &[C::Projective], &mut Vec<i64>>,
) -> Vec<C> {
let mut v = vec![];
for _ in 0..num {
v.push(table.scalar(cur.into_repr()));
cur.mul_assign(&step);
}
C::Projective::batch_normalization(&mut v);
let v = v.into_iter().map(|e| e.into_affine()).collect();
v
}
let x_inv = x.inverse().unwrap();
let mut x_alpha = x;
x_alpha.mul_assign(&alpha);
let mut inv_x_alpha = x_inv;
inv_x_alpha.mul_assign(&alpha);
SRS {
d: d,
g_negative_x: table(E::Fr::one(), x_inv, d + 1, &mut g1),
g_positive_x: table(E::Fr::one(), x, d + 1, &mut g1),
h_negative_x: table(E::Fr::one(), x_inv, d + 1, &mut g2),
h_positive_x: table(E::Fr::one(), x, d + 1, &mut g2),
g_negative_x_alpha: table(inv_x_alpha, x_inv, d, &mut g1),
g_positive_x_alpha: table(x_alpha, x, d, &mut g1),
h_negative_x_alpha: table(alpha, x_inv, d + 1, &mut g2),
h_positive_x_alpha: table(alpha, x, d + 1, &mut g2),
}
}
}
impl<E: Engine> SRS<E> {
pub fn write<W: Write>(
&self,
mut writer: W
) -> io::Result<()>
{
assert_eq!(self.d + 1, self.g_negative_x.len());
assert_eq!(self.d + 1, self.g_positive_x.len());
assert_eq!(self.d + 1, self.h_negative_x.len());
assert_eq!(self.d + 1, self.h_positive_x.len());
assert_eq!(self.d, self.g_negative_x_alpha.len());
assert_eq!(self.d, self.g_positive_x_alpha.len());
assert_eq!(self.d + 1, self.h_negative_x_alpha.len());
assert_eq!(self.d + 1, self.h_positive_x_alpha.len());
writer.write_u32::<BigEndian>(self.d as u32)?;
for g in &self.g_negative_x[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.g_positive_x[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.h_negative_x[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.h_positive_x[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.g_negative_x_alpha[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.g_positive_x_alpha[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.h_negative_x_alpha[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
for g in &self.h_positive_x_alpha[..] {
writer.write_all(g.into_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(
mut reader: R,
checked: bool
) -> io::Result<Self>
{
use pairing::EncodedPoint;
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut repr = <E::G1Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
if checked {
repr
.into_affine()
} else {
repr
.into_affine_unchecked()
}
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut repr = <E::G2Affine as CurveAffine>::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
if checked {
repr
.into_affine()
} else {
repr
.into_affine_unchecked()
}
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.and_then(|e| if e.is_zero() {
Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity"))
} else {
Ok(e)
})
};
let mut g_negative_x = vec![];
let mut g_positive_x = vec![];
let mut h_negative_x = vec![];
let mut h_positive_x = vec![];
let mut g_negative_x_alpha = vec![];
let mut g_positive_x_alpha = vec![];
let mut h_negative_x_alpha = vec![];
let mut h_positive_x_alpha = vec![];
let d = reader.read_u32::<BigEndian>()? as usize;
{
for _ in 0..(d+1) {
g_negative_x.push(read_g1(&mut reader)?);
}
for _ in 0..(d+1) {
g_positive_x.push(read_g1(&mut reader)?);
}
}
{
for _ in 0..(d+1) {
h_negative_x.push(read_g2(&mut reader)?);
}
for _ in 0..(d+1) {
h_positive_x.push(read_g2(&mut reader)?);
}
}
{
for _ in 0..d {
g_negative_x_alpha.push(read_g1(&mut reader)?);
}
for _ in 0..d {
g_positive_x_alpha.push(read_g1(&mut reader)?);
}
}
{
for _ in 0..(d+1) {
h_negative_x_alpha.push(read_g2(&mut reader)?);
}
for _ in 0..(d+1) {
h_positive_x_alpha.push(read_g2(&mut reader)?);
}
}
Ok(Self {
d: d,
g_negative_x: g_negative_x,
g_positive_x: g_positive_x,
h_negative_x: h_negative_x,
h_positive_x: h_positive_x,
g_negative_x_alpha: g_negative_x_alpha,
g_positive_x_alpha: g_positive_x_alpha,
h_negative_x_alpha: h_negative_x_alpha,
h_positive_x_alpha: h_positive_x_alpha
})
}
}

436
src/sonic/tests/sonics.rs Normal file

@ -0,0 +1,436 @@
extern crate bellman;
extern crate pairing;
extern crate rand;
// For randomness (during paramgen and proof generation)
use rand::{thread_rng, Rng};
// For benchmarking
use std::time::{Duration, Instant};
// Bring in some tools for using pairing-friendly curves
use pairing::{
Engine
};
use pairing::ff::{
Field,
};
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
use pairing::bls12_381::{
Bls12
};
use pairing::bn256::{
Bn256
};
// We'll use these interfaces to construct our circuit.
use bellman::{
Circuit,
ConstraintSystem,
SynthesisError
};
// We're going to use the Groth16 proving system.
use bellman::groth16::{
Proof,
generate_random_parameters,
prepare_verifying_key,
create_random_proof,
verify_proof,
};
const MIMC_ROUNDS: usize = 322;
/// This is our demo circuit for proving knowledge of the
/// preimage of a MiMC hash invocation.
#[derive(Clone)]
struct MiMCDemoNoInputs<'a, E: Engine> {
xl: Option<E::Fr>,
xr: Option<E::Fr>,
image: Option<E::Fr>,
constants: &'a [E::Fr]
}
/// Our demo circuit implements this `Circuit` trait which
/// is used during paramgen and proving in order to
/// synthesize the constraint system.
impl<'a, E: Engine> Circuit<E> for MiMCDemoNoInputs<'a, E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
assert_eq!(self.constants.len(), MIMC_ROUNDS);
// Allocate the first component of the preimage.
let mut xl_value = self.xl;
let mut xl = cs.alloc(|| "preimage xl", || {
xl_value.ok_or(SynthesisError::AssignmentMissing)
})?;
// Allocate the second component of the preimage.
let mut xr_value = self.xr;
let mut xr = cs.alloc(|| "preimage xr", || {
xr_value.ok_or(SynthesisError::AssignmentMissing)
})?;
for i in 0..MIMC_ROUNDS {
// xL, xR := xR + (xL + Ci)^3, xL
let cs = &mut cs.namespace(|| format!("round {}", i));
// tmp = (xL + Ci)^2
let tmp_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.square();
e
});
let tmp = cs.alloc(|| "tmp", || {
tmp_value.ok_or(SynthesisError::AssignmentMissing)
})?;
cs.enforce(
|| "tmp = (xL + Ci)^2",
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + tmp
);
// new_xL = xR + (xL + Ci)^3
// new_xL = xR + tmp * (xL + Ci)
// new_xL - xR = tmp * (xL + Ci)
let new_xl_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.mul_assign(&tmp_value.unwrap());
e.add_assign(&xr_value.unwrap());
e
});
let new_xl = if i == (MIMC_ROUNDS-1) {
// This is the last round, xL is our image and so
// we use the image
let image_value = self.image;
cs.alloc(|| "image", || {
image_value.ok_or(SynthesisError::AssignmentMissing)
})?
} else {
cs.alloc(|| "new_xl", || {
new_xl_value.ok_or(SynthesisError::AssignmentMissing)
})?
};
cs.enforce(
|| "new_xL = xR + (xL + Ci)^3",
|lc| lc + tmp,
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + new_xl - xr
);
// xR = xL
xr = xl;
xr_value = xl_value;
// xL = new_xL
xl = new_xl;
xl_value = new_xl_value;
}
Ok(())
}
}
#[test]
fn test_sonic_mimc() {
use pairing::ff::{Field, PrimeField};
use pairing::{Engine, CurveAffine, CurveProjective};
use pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
use bellman::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
{
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let samples: usize = 100;
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bls12>(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let circuit = MiMCDemoNoInputs {
xl: Some(xl),
xr: Some(xr),
image: Some(image),
constants: &constants
};
use bellman::sonic::cs::Basic;
use bellman::sonic::sonic::AdaptorCircuit;
use bellman::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs};
use bellman::sonic::helped::{MultiVerifier, get_circuit_parameters};
use bellman::sonic::helped::helper::{create_aggregate_on_srs};
println!("creating proof");
let start = Instant::now();
let proof = create_proof_on_srs::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice_on_srs::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate_on_srs::<Bls12, _, Basic>(&AdaptorCircuit(circuit.clone()), &proofs, &srs);
println!("done in {:?}", start.elapsed());
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 1 proof without advice");
let start = Instant::now();
{
for _ in 0..1 {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying {} proofs without advice", samples);
let start = Instant::now();
{
for _ in 0..samples {
verifier.add_proof(&proof, &[], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bls12, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 100 proofs with advice");
let start = Instant::now();
{
for (ref proof, ref advice) in &proofs {
verifier.add_proof_with_advice(proof, &[], advice);
}
verifier.add_aggregate(&proofs, &aggregate);
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
}
}
#[test]
fn test_inputs_into_sonic_mimc() {
use pairing::ff::{Field, PrimeField};
use pairing::{Engine, CurveAffine, CurveProjective};
use pairing::bn256::{Bn256, Fr};
// use pairing::bls12_381::{Bls12, Fr};
use std::time::{Instant};
use bellman::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
println!("making srs");
let start = Instant::now();
let srs = SRS::<Bn256>::dummy(830564, srs_x, srs_alpha);
println!("done in {:?}", start.elapsed());
{
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let samples: usize = 100;
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bn256>(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let circuit = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants
};
use bellman::sonic::cs::Basic;
use bellman::sonic::sonic::AdaptorCircuit;
use bellman::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs};
use bellman::sonic::helped::{MultiVerifier, get_circuit_parameters};
use bellman::sonic::helped::helper::{create_aggregate_on_srs};
let info = get_circuit_parameters::<Bn256, _>(circuit.clone()).expect("Must get circuit info");
println!("{:?}", info);
println!("creating proof");
let start = Instant::now();
let proof = create_proof_on_srs::<Bn256, _, Basic>(&AdaptorCircuit(circuit.clone()), &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice_on_srs::<Bn256, _, Basic>(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate_on_srs::<Bn256, _, Basic>(&AdaptorCircuit(circuit.clone()), &proofs, &srs);
println!("done in {:?}", start.elapsed());
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bn256, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 1 proof without advice");
let start = Instant::now();
{
for _ in 0..1 {
verifier.add_proof(&proof, &[image], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bn256, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying {} proofs without advice", samples);
let start = Instant::now();
{
for _ in 0..samples {
verifier.add_proof(&proof, &[image], |_, _| None);
}
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
{
let rng = thread_rng();
let mut verifier = MultiVerifier::<Bn256, _, Basic, _>::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap();
println!("verifying 100 proofs with advice and aggregate");
let start = Instant::now();
{
for (ref proof, ref advice) in &proofs {
verifier.add_proof_with_advice(proof, &[image], advice);
}
verifier.add_aggregate(&proofs, &aggregate);
assert_eq!(verifier.check_all(), true); // TODO
}
println!("done in {:?}", start.elapsed());
}
}
}
#[test]
fn test_high_level_sonic_api() {
use pairing::bn256::{Bn256};
use std::time::{Instant};
use bellman::sonic::helped::{
generate_random_parameters,
verify_aggregate,
verify_proofs,
create_proof,
create_advice,
create_aggregate,
get_circuit_parameters
};
{
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let mut rng = &mut thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::<Vec<_>>();
let samples: usize = 100;
let xl = rng.gen();
let xr = rng.gen();
let image = mimc::<Bn256>(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let circuit = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants
};
let info = get_circuit_parameters::<Bn256, _>(circuit.clone()).expect("Must get circuit info");
println!("{:?}", info);
let params = generate_random_parameters(circuit.clone(), &mut rng).unwrap();
println!("creating proof");
let start = Instant::now();
let proof = create_proof(circuit.clone(), &params).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating advice");
let start = Instant::now();
let advice = create_advice(circuit.clone(), &proof, &params).unwrap();
println!("done in {:?}", start.elapsed());
println!("creating aggregate for {} proofs", samples);
let start = Instant::now();
let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect();
let aggregate = create_aggregate::<Bn256, _>(circuit.clone(), &proofs, &params);
println!("done in {:?}", start.elapsed());
{
println!("verifying 1 proof without advice");
let rng = thread_rng();
let start = Instant::now();
assert_eq!(verify_proofs(&vec![proof.clone()], &vec![vec![image.clone()]], circuit.clone(), rng, &params).unwrap(), true);
println!("done in {:?}", start.elapsed());
}
{
println!("verifying {} proofs without advice", samples);
let rng = thread_rng();
let start = Instant::now();
assert_eq!(verify_proofs(&vec![proof.clone(); 100], &vec![vec![image.clone()]; 100], circuit.clone(), rng, &params).unwrap(), true);
println!("done in {:?}", start.elapsed());
}
{
println!("verifying 100 proofs with advice and aggregate");
let rng = thread_rng();
let start = Instant::now();
assert_eq!(verify_aggregate(&vec![(proof.clone(), advice.clone()); 100], &aggregate, &vec![vec![image.clone()]; 100], circuit.clone(), rng, &params).unwrap(), true);
println!("done in {:?}", start.elapsed());
}
}
}

@ -0,0 +1,73 @@
extern crate tiny_keccak;
extern crate blake2_rfc;
use self::tiny_keccak::Keccak;
use self::blake2_rfc::blake2s::Blake2s;
pub trait Hasher {
fn new(personalization: &[u8]) -> Self;
fn update(&mut self, data: &[u8]);
fn finalize(&mut self) -> Vec<u8>;
}
#[derive(Clone)]
pub struct BlakeHasher {
h: Blake2s
}
impl Hasher for BlakeHasher {
fn new(personalization: &[u8]) -> Self {
let h = Blake2s::with_params(32, &[], &[], personalization);
Self {
h: h
}
}
fn update(&mut self, data: &[u8]) {
self.h.update(data);
}
fn finalize(&mut self) -> Vec<u8> {
use std::mem;
let new_h = Blake2s::with_params(32, &[], &[], &[]);
let h = std::mem::replace(&mut self.h, new_h);
let result = h.finalize();
result.as_ref().to_vec().clone()
}
}
#[derive(Clone)]
pub struct Keccak256Hasher {
h: Keccak
}
impl Hasher for Keccak256Hasher {
fn new(personalization: &[u8]) -> Self {
let mut h = Keccak::new_keccak256();
h.update(personalization);
Self {
h: h
}
}
fn update(&mut self, data: &[u8]) {
self.h.update(data);
}
fn finalize(&mut self) -> Vec<u8> {
use std::mem;
let new_h = Keccak::new_keccak256();
let h = std::mem::replace(&mut self.h, new_h);
let mut res: [u8; 32] = [0; 32];
h.finalize(&mut res);
res[..].to_vec()
}
}

131
src/sonic/transcript/mod.rs Normal file

@ -0,0 +1,131 @@
extern crate pairing;
use pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use pairing::{CurveAffine, CurveProjective, Engine};
use std::io;
mod hasher;
use self::hasher::{Hasher, Keccak256Hasher, BlakeHasher};
#[derive(Clone)]
pub struct Transcript {
transcriptor: RollingHashTranscript<Keccak256Hasher>
}
impl Transcript {
pub fn new(personalization: &[u8]) -> Self {
Self {
transcriptor: RollingHashTranscript::new(personalization)
}
}
}
impl TranscriptProtocol for Transcript {
fn commit_point<G: CurveAffine>(&mut self, point: &G) {
self.transcriptor.commit_point(point);
}
fn commit_scalar<F: PrimeField>(&mut self, scalar: &F) {
self.transcriptor.commit_scalar(scalar);
}
fn get_challenge_scalar<F: PrimeField>(&mut self) -> F {
self.transcriptor.get_challenge_scalar()
}
}
use std::marker::PhantomData;
#[derive(Clone)]
pub struct RollingHashTranscript<H: Hasher> {
buffer: Vec<u8>,
last_finalized_value: Vec<u8>,
_marker: PhantomData<H>
}
impl<H: Hasher> RollingHashTranscript<H> {
pub fn new(personalization: &[u8]) -> Self {
let mut h = H::new(personalization);
let buffer = h.finalize();
Self {
buffer: buffer,
last_finalized_value: vec![],
_marker: PhantomData
}
}
pub fn commit_bytes(&mut self, personalization: &[u8], bytes: &[u8]) {
let mut h = H::new(&[]);
h.update(&self.buffer);
h.update(personalization);
h.update(bytes);
self.buffer = h.finalize();
}
pub fn get_challenge_bytes(&mut self, nonce: &[u8]) -> Vec<u8> {
let challenge_bytes = &self.buffer;
let mut h = H::new(&[]);
h.update(challenge_bytes);
h.update(nonce);
let challenge_bytes = h.finalize();
challenge_bytes
}
}
pub trait TranscriptProtocol {
fn commit_point<G: CurveAffine>(&mut self, point: &G);
fn commit_scalar<F: PrimeField>(&mut self, scalar: &F);
fn get_challenge_scalar<F: PrimeField>(&mut self) -> F;
}
impl<H:Hasher> TranscriptProtocol for RollingHashTranscript<H> {
fn commit_point<G: CurveAffine>(&mut self, point: &G) {
self.commit_bytes(b"point", point.into_uncompressed().as_ref());
// self.commit_bytes(b"point", point.into_compressed().as_ref());
}
fn commit_scalar<F: PrimeField>(&mut self, scalar: &F) {
let mut v = vec![];
scalar.into_repr().write_be(&mut v).unwrap();
// scalar.into_repr().write_le(&mut v).unwrap();
self.commit_bytes(b"scalar", &v);
}
fn get_challenge_scalar<F: PrimeField>(&mut self) -> F {
use byteorder::ByteOrder;
let mut nonce = 0u32;
loop {
let mut nonce_bytes = vec![0u8; 4];
byteorder::BigEndian::write_u32(&mut nonce_bytes, nonce);
let mut repr: F::Repr = Default::default();
let challenge_bytes = self.get_challenge_bytes(&nonce_bytes);
repr.read_be(&challenge_bytes[..]).unwrap();
if let Ok(result) = F::from_repr(repr) {
// println!("Got a challenge {} for nonce = {}", result, nonce);
return result;
}
if nonce == (0xffffffff as u32) {
panic!("can not make challenge scalar");
}
nonce += 1;
}
}
}
// struct TranscriptReader<'a, H:Hasher>(&'a mut Transcript<H>);
// impl<'a, H:Hasher> io::Read for TranscriptReader<'a, H: Hasher> {
// fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// self.0.challenge_bytes(b"read", buf);
// Ok(buf.len())
// }
// }

@ -0,0 +1,727 @@
/// One must prove that for commitments to two polynomials of degree n products of the coefficients
/// in those two polynomials are equal (part of the permutation argument) with additional assumption that
/// those coefficients are never equal to zero
use pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use pairing::{Engine, CurveProjective, CurveAffine};
use std::marker::PhantomData;
use crate::sonic::srs::SRS;
use crate::sonic::util::*;
#[derive(Clone)]
pub struct GrandProductArgument<E: Engine> {
a_polynomials: Vec<Vec<E::Fr>>,
c_polynomials: Vec<Vec<E::Fr>>,
v_elements: Vec<E::Fr>,
t_polynomial: Option<Vec<E::Fr>>,
n: usize
}
#[derive(Clone)]
pub struct GrandProductProof<E: Engine> {
t_opening: E::G1Affine,
e_zinv: E::Fr,
e_opening: E::G1Affine,
f_y: E::Fr,
f_opening: E::G1Affine,
}
impl<E: Engine> GrandProductArgument<E> {
pub fn new(polynomials: Vec<(Vec<E::Fr>, Vec<E::Fr>)>) -> Self {
assert!(polynomials.len() > 0);
let n = polynomials[0].0.len();
let mut a_polynomials = vec![];
let mut c_polynomials = vec![];
let mut v_elements = vec![];
// a_{1..n} = first poly
// a_{n+1..2n+1} = b_{1..n} = second poly
// c_1 = a_1
// c_2 = a_2 * c_1 = a_2 * a_1
// c_3 = a_3 * c_2 = a_3 * a_2 * a_1
// ...
// c_n = a_n * c_{n-1} = \prod a_i
// a_{n+1} = c_{n-1}^-1
// c_{n+1} = 1
// c_{n+1} = a_{n+2} * c_{n+1} = a_{n+2}
// ...
// c_{2n+1} = \prod a_{n+1+i} = \prod b_{i}
// v = c_{n}^-1
// calculate c, serially for now
for p in polynomials.into_iter() {
let (p0, p1) = p;
assert!(p0.len() == p1.len());
assert!(p0.len() == n);
let mut c_poly: Vec<E::Fr> = Vec::with_capacity(2*n + 1);
let mut a_poly: Vec<E::Fr> = Vec::with_capacity(2*n + 1);
let mut c_coeff = E::Fr::one();
// add a
for a in p0.iter() {
c_coeff.mul_assign(a);
c_poly.push(c_coeff);
}
assert_eq!(c_poly.len(), n);
a_poly.extend(p0);
// v = a_{n+1} = c_{n}^-1
let v = c_poly[n-1].inverse().unwrap();
a_poly.push(E::Fr::zero());
// a_poly.push(v);
// add c_{n+1}
let mut c_coeff = E::Fr::one();
c_poly.push(c_coeff);
// add b
for b in p1.iter() {
c_coeff.mul_assign(b);
c_poly.push(c_coeff);
}
assert_eq!(c_poly.len(), 2*n + 1);
a_poly.extend(p1);
assert_eq!(c_poly[n-1], c_poly[2*n]);
a_polynomials.push(a_poly);
c_polynomials.push(c_poly);
v_elements.push(v);
}
GrandProductArgument {
a_polynomials: a_polynomials,
c_polynomials: c_polynomials,
v_elements: v_elements,
t_polynomial: None,
n: n
}
}
// Make a commitment to a polynomial in a form A*B^{x+1} = [a_1...a_{n}, 0, b_1...b_{n}]
pub fn commit_for_grand_product(a: &[E::Fr], b: &[E::Fr], srs: &SRS<E>) -> E::G1Affine {
assert_eq!(a.len(), b.len());
let n = a.len();
multiexp(
srs.g_positive_x_alpha[0..(2*n+1)].iter(),
a.iter()
.chain_ext(Some(E::Fr::zero()).iter())
.chain_ext(b.iter())
).into_affine()
}
// Make a commitment to a polynomial in a form A*B^{x+1} = [a_1...a_{n}, 0, b_1...b_{n}]
pub fn commit_for_individual_products(a: &[E::Fr], b: &[E::Fr], srs: &SRS<E>) -> (E::G1Affine, E::G1Affine) {
assert_eq!(a.len(), b.len());
let n = a.len();
let a = multiexp(
srs.g_positive_x_alpha[0..n].iter(),
a.iter()).into_affine();
let b = multiexp(
srs.g_positive_x_alpha[0..n].iter(),
b.iter()).into_affine();
(a, b)
}
pub fn open_commitments_for_grand_product(&self, y: E::Fr, z: E::Fr, srs: &SRS<E>) -> Vec<(E::Fr, E::G1Affine)> {
let n = self.n;
let mut yz = y;
yz.mul_assign(&z);
let mut results = vec![];
for a_poly in self.a_polynomials.iter() {
let a = & a_poly[0..n];
let b = & a_poly[(n+1)..];
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
let mut val = evaluate_at_consequitive_powers(a, yz, yz);
{
let tmp = yz.pow([(n+2) as u64]);
let v = evaluate_at_consequitive_powers(b, tmp, yz);
val.add_assign(&v);
}
let mut constant_term = val;
constant_term.negate();
let opening = polynomial_commitment_opening(
0,
2*n + 1,
Some(constant_term).iter()
.chain_ext(a.iter())
.chain_ext(Some(E::Fr::zero()).iter())
.chain_ext(b.iter()),
yz,
&srs);
results.push((val, opening));
}
results
}
// Make a commitment for the begining of the protocol, returns commitment and `v` scalar
pub fn commit_to_individual_c_polynomials(&self, srs: &SRS<E>) -> Vec<(E::G1Affine, E::Fr)> {
let mut results = vec![];
let n = self.c_polynomials[0].len();
for (p, v) in self.c_polynomials.iter().zip(self.v_elements.iter()) {
let c = multiexp(
srs.g_positive_x_alpha[0..n].iter(),
p.iter()
).into_affine();
results.push((c, *v));
}
results
}
// Argument is based on an approach of main SONIC construction, but with a custom S(X,Y) polynomial of a simple form
pub fn commit_to_t_polynomial(&mut self, challenges: & Vec<E::Fr>, y: E::Fr, srs: &SRS<E>) -> E::G1Affine {
assert_eq!(challenges.len(), self.a_polynomials.len());
let n = self.n;
let mut t_polynomial: Option<Vec<E::Fr>> = None;
for (((a, c), v), challenge) in self.a_polynomials.iter()
.zip(self.c_polynomials.iter())
.zip(self.v_elements.iter())
.zip(challenges.iter())
{
let mut a_xy = a.clone();
let mut c_xy = c.clone();
let v = *v;
assert_eq!(a_xy.len(), 2*n + 1);
assert_eq!(c_xy.len(), 2*n + 1);
// make a T polynomial
let r: Vec<E::Fr> = {
// p_a(X,Y)*Y
let mut tmp = y;
tmp.square();
mut_distribute_consequitive_powers(&mut a_xy[..], tmp, y);
// add extra terms
//v*(XY)^{n+1}*Y + X^{n+2} + X^{n+1}Y X^{2n+2}*Y
// n+1 term v*(XY)^{n+1}*Y + X^{n+1}Y
let tmp = y.pow(&[(n+2) as u64]);
let mut x_n_plus_one_term = v;
x_n_plus_one_term.mul_assign(&tmp);
x_n_plus_one_term.add_assign(&y);
a_xy[n].add_assign(&x_n_plus_one_term);
// n+2 term
a_xy[n+1].add_assign(&E::Fr::one());
// 2n+2 term
let mut tmp = y;
tmp.negate();
a_xy.push(tmp);
assert_eq!(a_xy.len(), 2*n + 2);
let mut r = vec![E::Fr::zero(); 2*n + 3];
r.extend(a_xy);
r
};
let r_prime: Vec<E::Fr> = {
let mut c_prime: Vec<E::Fr> = c_xy.iter().rev().map(|el| *el).collect();
c_prime.push(E::Fr::one());
c_prime.push(E::Fr::zero());
assert_eq!(c_prime.len(), 2*n + 3);
c_prime
};
// multiply polynomials with powers [-2n-2, -1] and [1, 2n+2],
// expect result to be [-2n+1, 2n+1]
let mut t: Vec<E::Fr> = multiply_polynomials::<E>(r, r_prime);
assert_eq!(t.len(), 6*n + 7);
// drain first powers due to the padding and last element due to requirement of being zero
for (i, el) in t[0..(2*n+3)].iter().enumerate() {
assert_eq!(*el, E::Fr::zero(), "{}", format!("Element {} is non-zero", i));
}
t.drain(0..(2*n+3));
let last = t.pop();
assert_eq!(last.unwrap(), E::Fr::zero(), "last element should be zero");
assert_eq!(t.len(), 4*n + 3);
let mut val = {
let mut tmp = y;
tmp.square();
evaluate_at_consequitive_powers(&c_xy, tmp, y)
};
val.add_assign(&E::Fr::one());
// subtract at constant term
assert_eq!(t[2*n+1], val);
t[2*n+1].sub_assign(&val);
if t_polynomial.is_some() {
if let Some(t_poly) = t_polynomial.as_mut() {
mul_add_polynomials(&mut t_poly[..], &t, *challenge);
}
} else {
mul_polynomial_by_scalar(&mut t, *challenge);
t_polynomial = Some(t);
}
}
let t_polynomial = t_polynomial.unwrap();
let c = multiexp(srs.g_negative_x_alpha[0..(2*n+1)].iter().rev()
.chain_ext(srs.g_positive_x_alpha[0..(2*n+1)].iter()),
t_polynomial[0..(2*n+1)].iter()
.chain_ext(t_polynomial[(2*n+2)..].iter())).into_affine();
self.t_polynomial = Some(t_polynomial);
c
}
// Argument is based on an approach of main SONIC construction, but with a custom S(X,Y) polynomial of a simple form
pub fn make_argument(self, a_zy: & Vec<E::Fr>, challenges: & Vec<E::Fr>, y: E::Fr, z: E::Fr, srs: &SRS<E>) -> GrandProductProof<E> {
assert_eq!(a_zy.len(), self.a_polynomials.len());
assert_eq!(challenges.len(), self.a_polynomials.len());
let n = self.n;
let c_polynomials = self.c_polynomials;
let mut e_polynomial: Option<Vec<E::Fr>> = None;
let mut f_polynomial: Option<Vec<E::Fr>> = None;
let mut yz = y;
yz.mul_assign(&z);
let z_inv = z.inverse().unwrap();
for (((a, c), challenge), v) in a_zy.iter()
.zip(c_polynomials.into_iter())
.zip(challenges.iter())
.zip(self.v_elements.iter())
{
// cj = ((aj + vj(yz)n+1)y + zn+2 + zn+1y z2n+2y)z1
let mut c_zy = yz.pow([(n + 1) as u64]);
c_zy.mul_assign(v);
c_zy.add_assign(a);
c_zy.mul_assign(&y);
let mut z_n_plus_1 = z.pow([(n + 1) as u64]);
let mut z_n_plus_2 = z_n_plus_1;
z_n_plus_2.mul_assign(&z);
let mut z_2n_plus_2 = z_n_plus_1;
z_2n_plus_2.square();
z_2n_plus_2.mul_assign(&y);
z_n_plus_1.mul_assign(&y);
c_zy.add_assign(&z_n_plus_1);
c_zy.add_assign(&z_n_plus_2);
c_zy.sub_assign(&z_2n_plus_2);
c_zy.mul_assign(&z_inv);
let mut rc = c_zy;
rc.mul_assign(challenge);
let mut ry = y;
ry.mul_assign(challenge);
if e_polynomial.is_some() && f_polynomial.is_some() {
if let Some(e_poly) = e_polynomial.as_mut() {
if let Some(f_poly) = f_polynomial.as_mut() {
mul_add_polynomials(&mut e_poly[..], &c, rc);
mul_add_polynomials(&mut f_poly[..], &c, ry);
}
}
} else {
let mut e = c.clone();
let mut f = c;
mul_polynomial_by_scalar(&mut e, rc);
mul_polynomial_by_scalar(&mut f, ry);
e_polynomial = Some(e);
f_polynomial = Some(f);
}
}
let e_polynomial = e_polynomial.unwrap();
let f_polynomial = f_polynomial.unwrap();
// evaluate e at z^-1
let mut e_val = evaluate_at_consequitive_powers(&e_polynomial, z_inv, z_inv);
e_val.negate();
// evaluate f at y
let mut f_val = evaluate_at_consequitive_powers(&f_polynomial, y, y);
f_val.negate();
let e_opening = polynomial_commitment_opening(
0,
2*n + 1,
Some(e_val).iter().chain_ext(e_polynomial.iter()),
z_inv,
srs);
let f_opening = polynomial_commitment_opening(
0,
2*n + 1,
Some(f_val).iter().chain_ext(f_polynomial.iter()),
y,
srs);
e_val.negate();
f_val.negate();
let mut t_poly = self.t_polynomial.unwrap();
assert_eq!(t_poly.len(), 4*n + 3);
// largest negative power of t is -2n-1
let t_zy = {
let tmp = z_inv.pow([(2*n+1) as u64]);
evaluate_at_consequitive_powers(&t_poly, tmp, z)
};
t_poly[2*n + 1].sub_assign(&t_zy);
let t_opening = polynomial_commitment_opening(
2*n + 1,
2*n + 1,
t_poly.iter(),
z,
srs);
GrandProductProof {
t_opening: t_opening,
e_zinv: e_val,
e_opening: e_opening,
f_y: f_val,
f_opening: f_opening,
}
}
pub fn verify_ab_commitment(n: usize,
randomness: & Vec<E::Fr>,
a_commitments: &Vec<E::G1Affine>,
b_commitments: &Vec<E::G1Affine>,
openings: &Vec<(E::Fr, E::G1Affine)>,
y: E::Fr,
z: E::Fr,
srs: &SRS<E>
) -> bool {
assert_eq!(randomness.len(), a_commitments.len());
assert_eq!(openings.len(), a_commitments.len());
assert_eq!(b_commitments.len(), a_commitments.len());
let d = srs.d;
// e(Dj,hαx)e(Dyz,hα) = e(Aj,h)e(Bj,hxn+1)e(gaj ,hα)
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_x_n_plus_one_precomp = srs.h_positive_x[n];
h_x_n_plus_one_precomp.negate();
let h_x_n_plus_one_precomp = h_x_n_plus_one_precomp.prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let a = multiexp(
a_commitments.iter(),
randomness.iter(),
).into_affine();
let a = a.prepare();
let b = multiexp(
b_commitments.iter(),
randomness.iter(),
).into_affine();
let b = b.prepare();
let mut yz_neg = y;
yz_neg.mul_assign(&z);
yz_neg.negate();
let mut ops = vec![];
let mut value = E::Fr::zero();
for (el, r) in openings.iter().zip(randomness.iter()) {
let (v, o) = el;
ops.push(o.clone());
let mut val = *v;
val.mul_assign(&r);
value.add_assign(&val);
}
let value = g.mul(value.into_repr()).into_affine().prepare();
let openings = multiexp(
ops.iter(),
randomness.iter(),
).into_affine();
let openings_zy = openings.mul(yz_neg.into_repr()).into_affine().prepare();
let openings = openings.prepare();
// e(Dj,hαx)e(Dyz,hα) = e(Aj,h)e(Bj,hxn+1)e(gaj ,hα)
E::final_exponentiation(&E::miller_loop(&[
(&openings, &h_alpha_x_precomp),
(&openings_zy, &h_alpha_precomp),
(&a, &h_prep),
(&b, &h_x_n_plus_one_precomp),
(&value, &h_alpha_precomp)
])).unwrap() == E::Fqk::one()
}
pub fn verify(
n: usize,
randomness: & Vec<E::Fr>,
a_zy: & Vec<E::Fr>,
challenges: &Vec<E::Fr>,
t_commitment: E::G1Affine,
commitments: &Vec<(E::G1Affine, E::Fr)>,
proof: &GrandProductProof<E>,
y: E::Fr,
z: E::Fr,
srs: &SRS<E>
) -> bool {
assert_eq!(randomness.len(), 3);
assert_eq!(a_zy.len(), challenges.len());
assert_eq!(commitments.len(), challenges.len());
let d = srs.d;
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
// first re-calculate cj and t(z,y)
let mut yz = y;
yz.mul_assign(&z);
let z_inv = z.inverse().unwrap();
let mut t_zy = E::Fr::zero();
t_zy.add_assign(&proof.e_zinv);
t_zy.sub_assign(&proof.f_y);
let mut commitments_points = vec![];
let mut rc_vec = vec![];
let mut ry_vec = vec![];
for ((r, commitment), a) in challenges.iter()
.zip(commitments.iter())
.zip(a_zy.iter()) {
let (c, v) = commitment;
commitments_points.push(c.clone());
// cj = ((aj + vj(yz)n+1)y + zn+2 + zn+1y z2n+2y)z1
let mut c_zy = yz.pow([(n + 1) as u64]);
c_zy.mul_assign(v);
c_zy.add_assign(a);
c_zy.mul_assign(&y);
let mut z_n_plus_1 = z.pow([(n + 1) as u64]);
let mut z_n_plus_2 = z_n_plus_1;
z_n_plus_2.mul_assign(&z);
let mut z_2n_plus_2 = z_n_plus_1;
z_2n_plus_2.square();
z_2n_plus_2.mul_assign(&y);
z_n_plus_1.mul_assign(&y);
c_zy.add_assign(&z_n_plus_1);
c_zy.add_assign(&z_n_plus_2);
c_zy.sub_assign(&z_2n_plus_2);
c_zy.mul_assign(&z_inv);
let mut rc = c_zy;
rc.mul_assign(&r);
rc_vec.push(rc);
let mut ry = y;
ry.mul_assign(&r);
ry_vec.push(ry);
let mut val = rc;
val.sub_assign(r);
t_zy.add_assign(&val);
}
let c_rc = multiexp(
commitments_points.iter(),
rc_vec.iter(),
).into_affine();
let c_ry = multiexp(
commitments_points.iter(),
ry_vec.iter(),
).into_affine();
let mut minus_y = y;
minus_y.negate();
let mut f_y = proof.f_opening.mul(minus_y.into_repr());
let g_f = g.mul(proof.f_y.into_repr());
f_y.add_assign(&g_f);
let mut minus_z = z;
minus_z.negate();
let mut t_z = proof.t_opening.mul(minus_z.into_repr());
let g_tzy = g.mul(t_zy.into_repr());
t_z.add_assign(&g_tzy);
let mut minus_z_inv = z_inv;
minus_z_inv.negate();
let mut e_z_inv = proof.e_opening.mul(minus_z_inv.into_repr());
let g_e = g.mul(proof.e_zinv.into_repr());
e_z_inv.add_assign(&g_e);
let h_alpha_term = multiexp(
vec![e_z_inv.into_affine(), f_y.into_affine(), t_z.into_affine()].iter(),
randomness.iter(),
).into_affine();
let h_alpha_x_term = multiexp(
Some(proof.e_opening).iter()
.chain_ext(Some(proof.f_opening).iter())
.chain_ext(Some(proof.t_opening).iter()),
randomness.iter(),
).into_affine();
let h_term = multiexp(
Some(c_rc).iter()
.chain_ext(Some(c_ry).iter())
.chain_ext(Some(t_commitment).iter()),
randomness.iter(),
).into_affine();
E::final_exponentiation(&E::miller_loop(&[
(&h_alpha_x_term.prepare(), &h_alpha_x_precomp),
(&h_alpha_term.prepare(), &h_alpha_precomp),
(&h_term.prepare(), &h_prep),
])).unwrap() == E::Fqk::one()
}
}
#[test]
fn test_grand_product_argument() {
use pairing::bls12_381::{Fr, G1Affine, G1, Bls12};
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let n: usize = 1 << 8;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let coeffs = (0..n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let mut permutation = coeffs.clone();
rng.shuffle(&mut permutation);
let a_commitment = multiexp(srs.g_positive_x_alpha[0..n].iter(), coeffs.iter()).into_affine();
let b_commitment = multiexp(srs.g_positive_x_alpha[0..n].iter(), permutation.iter()).into_affine();
let mut argument = GrandProductArgument::new(vec![(coeffs, permutation)]);
let commitments_and_v_values = argument.commit_to_individual_c_polynomials(&srs);
assert_eq!(commitments_and_v_values.len(), 1);
let y : Fr = rng.gen();
let challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let t_commitment = argument.commit_to_t_polynomial(&challenges, y, &srs);
let z : Fr = rng.gen();
let grand_product_openings = argument.open_commitments_for_grand_product(y, z, &srs);
let randomness = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let valid = GrandProductArgument::verify_ab_commitment(n,
&randomness,
&vec![a_commitment],
&vec![b_commitment],
&grand_product_openings,
y,
z,
&srs);
assert!(valid, "grand product commitments should be valid");
let a_zy: Vec<Fr> = grand_product_openings.iter().map(|el| el.0.clone()).collect();
let proof = argument.make_argument(&a_zy, &challenges, y, z, &srs);
let randomness = (0..3).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let valid = GrandProductArgument::verify(
n,
&randomness,
&a_zy,
&challenges,
t_commitment,
&commitments_and_v_values,
&proof,
y,
z,
&srs);
assert!(valid, "t commitment should be valid");
}

11
src/sonic/unhelped/mod.rs Normal file

@ -0,0 +1,11 @@
/// Largeley this module is implementation of provable evaluation of s(z, y), that is represented in two parts
/// s2(X, Y) = \sum_{i=1}^{N} (Y^{-i} + Y^{i})X^{i}
/// s1(X, Y) = ...
/// s1 part requires grand product and permutation arguments, that are also implemented
mod s2_proof;
mod wellformed_argument;
mod grand_product_argument;
mod permutation_argument;
pub use self::wellformed_argument::{WellformednessArgument, WellformednessProof};

@ -0,0 +1,666 @@
/// Permutation argument allows to prove that a commitment to a vector A is
/// actually a commitment to a vector of values that are equal to `(s^{perm})_i * y^{perm(i)}`
/// for some fixed permutation `perm`
use pairing::ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};
use pairing::{Engine, CurveProjective, CurveAffine};
use std::marker::PhantomData;
use crate::sonic::srs::SRS;
use crate::sonic::util::*;
use super::wellformed_argument::{WellformednessArgument, WellformednessProof};
use super::grand_product_argument::{GrandProductArgument, GrandProductProof};
#[derive(Clone)]
pub struct SpecializedSRS<E: Engine> {
p_1: E::G1Affine,
p_2: Vec<E::G1Affine>,
p_3: E::G1Affine,
p_4: Vec<E::G1Affine>,
n: usize
}
#[derive(Clone)]
pub struct PermutationArgument<E: Engine> {
non_permuted_coefficients: Vec<Vec<E::Fr>>,
permuted_coefficients: Vec<Vec<E::Fr>>,
permuted_at_y_coefficients: Vec<Vec<E::Fr>>,
permutations: Vec<Vec<usize>>,
n: usize
}
#[derive(Clone)]
pub struct PermutationProof<E: Engine> {
v_zy: E::Fr,
e_opening: E::G1Affine,
f_opening: E::G1Affine,
}
#[derive(Clone)]
pub struct Proof<E: Engine> {
j: usize,
s_opening: E::G1Affine,
s_zy: E::Fr
}
fn permute<F: Field>(coeffs: &[F], permutation: & [usize]) -> Vec<F>{
assert_eq!(coeffs.len(), permutation.len());
let mut result: Vec<F> = vec![F::zero(); coeffs.len()];
for (i, j) in permutation.iter().enumerate() {
result[*j - 1] = coeffs[i];
}
result
}
impl<E: Engine> PermutationArgument<E> {
pub fn new(coefficients: Vec<Vec<E::Fr>>, permutations: Vec<Vec<usize>>) -> Self {
assert!(coefficients.len() > 0);
assert_eq!(coefficients.len(), permutations.len());
let n = coefficients[0].len();
for (c, p) in coefficients.iter().zip(permutations.iter()) {
assert!(c.len() == p.len());
assert!(c.len() == n);
}
PermutationArgument {
non_permuted_coefficients: coefficients,
permuted_coefficients: vec![vec![]],
permuted_at_y_coefficients: vec![vec![]],
permutations: permutations,
n: n
}
}
pub fn make_specialized_srs(non_permuted_coefficients: &Vec<Vec<E::Fr>>, permutations: &Vec<Vec<usize>>, srs: &SRS<E>) -> SpecializedSRS<E> {
assert!(non_permuted_coefficients.len() > 0);
assert_eq!(non_permuted_coefficients.len(), permutations.len());
let n = non_permuted_coefficients[0].len();
// p1 is just a commitment to the powers of x
let p_1 = multiexp(srs.g_positive_x_alpha[0..n].iter(), vec![E::Fr::one(); n].iter()).into_affine();
let mut p_2 = vec![];
let p_3 = {
let values: Vec<E::Fr> = (1..=n).map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
multiexp(srs.g_positive_x_alpha[0..n].iter(), values.iter()).into_affine()
};
let mut p_4 = vec![];
for (c, p) in non_permuted_coefficients.iter().zip(permutations.iter()) {
assert!(c.len() == p.len());
assert!(c.len() == n);
// p2 is a commitment to the s^{perm}_i * x^i
{
// let permuted_coeffs = permute(&c[..], &p[..]);
let p2 = multiexp(srs.g_positive_x_alpha[0..n].iter(), c.iter()).into_affine();
p_2.push(p2);
}
{
let values: Vec<E::Fr> = p.iter().map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = *el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
let p4 = multiexp(srs.g_positive_x_alpha[0..n].iter(), values.iter()).into_affine();
p_4.push(p4);
}
}
SpecializedSRS {
p_1: p_1,
p_2: p_2,
p_3: p_3,
p_4: p_4,
n: n
}
}
// commit to s and s' at y. Mutates the state
pub fn commit(&mut self, y: E::Fr, srs: &SRS<E>) -> Vec<(E::G1Affine, E::G1Affine)> {
let mut result = vec![];
let n = self.non_permuted_coefficients[0].len();
let mut permuted_coefficients = vec![];
let mut permuted_at_y_coefficients = vec![];
for (c, p) in self.non_permuted_coefficients.iter().zip(self.permutations.iter()) {
let mut non_permuted = c.clone();
let permuted = permute(&non_permuted[..], &p[..]);
mut_distribute_consequitive_powers(&mut non_permuted[..], y, y);
let s_prime = multiexp(srs.g_positive_x_alpha[0..n].iter(), non_permuted.iter()).into_affine();
let mut permuted_at_y = permute(&non_permuted[..], &p[..]);
drop(non_permuted);
let s = multiexp(srs.g_positive_x_alpha[0..n].iter(), permuted_at_y.iter()).into_affine();
result.push((s, s_prime));
permuted_coefficients.push(permuted);
permuted_at_y_coefficients.push(permuted_at_y);
}
self.permuted_coefficients = permuted_coefficients;
self.permuted_at_y_coefficients = permuted_at_y_coefficients;
result
}
pub fn open_commitments_to_s_prime(
&self,
challenges: &Vec<E::Fr>,
y: E::Fr,
z_prime: E::Fr,
srs: &SRS<E>
) -> PermutationProof<E> {
let n = self.non_permuted_coefficients[0].len();
let mut yz = y;
yz.mul_assign(&z_prime);
let mut polynomial: Option<Vec<E::Fr>> = None;
for (p, r) in self.non_permuted_coefficients.iter()
.zip(challenges.iter()) {
if polynomial.is_some() {
if let Some(poly) = polynomial.as_mut() {
mul_add_polynomials(&mut poly[..], &p[..], *r);
}
} else {
let mut poly = p.clone();
mul_polynomial_by_scalar(&mut poly[..], *r);
polynomial = Some(poly);
}
}
let mut polynomial = polynomial.unwrap();
let v = evaluate_at_consequitive_powers(&polynomial[..], yz, yz);
let mut v_neg = v;
v_neg.negate();
let f = polynomial_commitment_opening(
0,
n,
Some(v_neg).iter().chain_ext(polynomial.iter()),
yz,
&srs
);
mut_distribute_consequitive_powers(&mut polynomial[..], y, y);
let e = polynomial_commitment_opening(
0,
n,
Some(v_neg).iter().chain_ext(polynomial.iter()),
z_prime,
&srs
);
PermutationProof {
v_zy: v,
e_opening: e,
f_opening: f
}
}
// Argument a permutation argument. Current implementation consumes, cause extra arguments are required
pub fn make_argument(self,
beta: E::Fr,
gamma: E::Fr,
grand_product_challenges: & Vec<E::Fr>,
wellformed_challenges: & Vec<E::Fr>,
y: E::Fr,
z: E::Fr,
specialized_srs: &SpecializedSRS<E>,
srs: &SRS<E>
) -> Proof<E> {
// Sj(P4j)β(P1j)γ is equal to the product of the coefficients of Sj(P3j)β(P1j)γ
// also open s = \sum self.permuted_coefficients(X, y) at z
let n = self.n;
let j = self.non_permuted_coefficients.len();
assert_eq!(j, grand_product_challenges.len());
assert_eq!(2*j, wellformed_challenges.len());
let mut s_polynomial: Option<Vec<E::Fr>> = None;
for c in self.permuted_at_y_coefficients.iter()
{
if s_polynomial.is_some() {
if let Some(poly) = s_polynomial.as_mut() {
add_polynomials(&mut poly[..], & c[..]);
}
} else {
s_polynomial = Some(c.clone());
}
}
let mut s_polynomial = s_polynomial.unwrap();
// evaluate at z
let s_zy = evaluate_at_consequitive_powers(& s_polynomial[..], z, z);
let mut s_zy_neg = s_zy;
s_zy_neg.negate();
let s_zy_opening = polynomial_commitment_opening(
0,
n,
Some(s_zy_neg).iter().chain_ext(s_polynomial.iter()),
z,
&srs
);
// Sj(P4j)^β (P1j)^γ is equal to the product of the coefficients of Sj(P3j)^β (P1j)^γ
let p_1_values = vec![E::Fr::one(); n];
let p_3_values: Vec<E::Fr> = (1..=n).map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
let mut grand_products = vec![];
for (i, ((non_permuted, permuted), permutation)) in self.non_permuted_coefficients.into_iter()
.zip(self.permuted_coefficients.into_iter())
.zip(self.permutations.into_iter()).enumerate()
{
// \prod si+βσi+γ = \prod s'i + β*i + γ
let mut s_j_combination = non_permuted;
{
let p_4_values: Vec<E::Fr> = permutation.into_iter().map(|el| {
let mut repr = <<E as ScalarEngine>::Fr as PrimeField>::Repr::default();
repr.as_mut()[0] = el as u64;
let fe = E::Fr::from_repr(repr).unwrap();
fe
}).collect();
mul_add_polynomials(&mut s_j_combination[..], & p_4_values[..], beta);
mul_add_polynomials(&mut s_j_combination[..], & p_1_values[..], gamma);
}
let mut s_prime_j_combination = permuted;
{
mul_add_polynomials(&mut s_prime_j_combination[..], & p_3_values[..], beta);
mul_add_polynomials(&mut s_prime_j_combination[..], & p_1_values[..], gamma);
}
grand_products.push((s_j_combination, s_prime_j_combination));
}
let mut a_commitments = vec![];
let mut b_commitments = vec![];
for (a, b) in grand_products.iter() {
let (c_a, c_b) = GrandProductArgument::commit_for_individual_products(& a[..], & b[..], &srs);
a_commitments.push(c_a);
b_commitments.push(c_b);
}
{
let mut all_polys = vec![];
for p in grand_products.iter() {
let (a, b) = p;
all_polys.push(a.clone());
all_polys.push(b.clone());
}
let wellformed_argument = WellformednessArgument::new(all_polys);
let commitments = wellformed_argument.commit(&srs);
let proof = wellformed_argument.make_argument(wellformed_challenges.clone(), &srs);
let valid = WellformednessArgument::verify(n, &wellformed_challenges, &commitments, &proof, &srs);
assert!(valid, "wellformedness argument must be valid");
}
let mut grand_product_argument = GrandProductArgument::new(grand_products);
let c_commitments = grand_product_argument.commit_to_individual_c_polynomials(&srs);
let t_commitment = grand_product_argument.commit_to_t_polynomial(&grand_product_challenges, y, &srs);
let grand_product_openings = grand_product_argument.open_commitments_for_grand_product(y, z, &srs);
let a_zy: Vec<E::Fr> = grand_product_openings.iter().map(|el| el.0.clone()).collect();
let proof = grand_product_argument.make_argument(&a_zy, &grand_product_challenges, y, z, &srs);
{
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let randomness = (0..j).map(|_| E::Fr::rand(rng)).collect::<Vec<_>>();
let valid = GrandProductArgument::verify_ab_commitment(n,
&randomness,
&a_commitments,
& b_commitments,
&grand_product_openings,
y,
z,
&srs);
assert!(valid, "ab part of grand product argument must be valid");
let randomness = (0..3).map(|_| E::Fr::rand(rng)).collect::<Vec<_>>();
let valid = GrandProductArgument::verify(n,
&randomness,
&a_zy,
&grand_product_challenges,
t_commitment,
&c_commitments,
&proof,
y,
z,
&srs);
assert!(valid, "grand product argument must be valid");
}
Proof {
j: j,
s_opening: s_zy_opening,
s_zy: s_zy
}
}
pub fn verify_s_prime_commitment(
n: usize,
randomness: & Vec<E::Fr>,
challenges: & Vec<E::Fr>,
commitments: &Vec<E::G1Affine>,
proof: &PermutationProof<E>,
y: E::Fr,
z_prime: E::Fr,
specialized_srs: &SpecializedSRS<E>,
srs: &SRS<E>
) -> bool {
assert_eq!(randomness.len(), 2);
assert_eq!(challenges.len(), commitments.len());
// let g = srs.g_positive_x[0];
// let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
// let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
// let mut h_prep = srs.h_positive_x[0];
// h_prep.negate();
// let h_prep = h_prep.prepare();
// let value = proof.v_zy;
// let g_v = g.mul(value.into_repr());
// {
// let mut minus_z_prime = z_prime;
// minus_z_prime.negate();
// let e_z = proof.e_opening.mul(minus_z_prime.into_repr());
// let mut h_alpha_term = e_z;
// h_alpha_term.add_assign(&g_v);
// let h_alpha_x_term = proof.e_opening;
// let s_r = multiexp(
// commitments.iter(),
// challenges.iter()
// ).into_affine();
// let h_term = s_r;
// let valid = E::final_exponentiation(&E::miller_loop(&[
// (&h_alpha_x_term.prepare(), &h_alpha_x_precomp),
// (&h_alpha_term.into_affine().prepare(), &h_alpha_precomp),
// (&h_term.prepare(), &h_prep),
// ])).unwrap() == E::Fqk::one();
// if !valid {
// return false;
// }
// }
// {
// let mut minus_yz = z_prime;
// minus_yz.mul_assign(&y);
// minus_yz.negate();
// let f_yz = proof.f_opening.mul(minus_yz.into_repr());
// let p2_r = multiexp(
// specialized_srs.p_2.iter(),
// challenges.iter()
// ).into_affine();
// let mut h_alpha_term = f_yz;
// h_alpha_term.add_assign(&g_v);
// let h_alpha_x_term = proof.f_opening;
// let h_term = p2_r;
// let valid = E::final_exponentiation(&E::miller_loop(&[
// (&h_alpha_x_term.prepare(), &h_alpha_x_precomp),
// (&h_alpha_term.into_affine().prepare(), &h_alpha_precomp),
// (&h_term.prepare(), &h_prep),
// ])).unwrap() == E::Fqk::one();
// if !valid {
// return false;
// }
// }
// true
// e(E,hαx)e(Ez,hα) = e(􏰗Mj=1Sjrj,h)e(gv,hα)
// e(F,hαx)e(Fyz,hα) = e(􏰗Mj=1P2jrj,h)e(gv,hα)
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let mut value = E::Fr::zero();
for r in randomness.iter() {
value.add_assign(&r);
}
value.mul_assign(&proof.v_zy);
let mut minus_yz = z_prime;
minus_yz.mul_assign(&y);
minus_yz.negate();
let mut minus_z_prime = z_prime;
minus_z_prime.negate();
let f_yz = proof.f_opening.mul(minus_yz.into_repr());
let e_z = proof.e_opening.mul(minus_z_prime.into_repr());
let mut h_alpha_term = multiexp(
vec![e_z.into_affine(), f_yz.into_affine()].iter(),
randomness.iter(),
);
let g_v = g.mul(value.into_repr());
h_alpha_term.add_assign(&g_v);
let h_alpha_x_term = multiexp(
Some(proof.e_opening).iter()
.chain_ext(Some(proof.f_opening).iter()),
randomness.iter(),
).into_affine();
let s_r = multiexp(
commitments.iter(),
challenges.iter()
).into_affine();
let p2_r = multiexp(
specialized_srs.p_2.iter(),
challenges.iter()
).into_affine();
let h_term = multiexp(
Some(s_r).iter()
.chain_ext(Some(p2_r).iter()),
randomness.iter()
).into_affine();
E::final_exponentiation(&E::miller_loop(&[
(&h_alpha_x_term.prepare(), &h_alpha_x_precomp),
(&h_alpha_term.into_affine().prepare(), &h_alpha_precomp),
(&h_term.prepare(), &h_prep),
])).unwrap() == E::Fqk::one()
}
pub fn verify(
s_commitments: &Vec<E::G1Affine>,
proof: &Proof<E>,
z: E::Fr,
srs: &SRS<E>
) -> bool {
let g = srs.g_positive_x[0];
let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let mut minus_z = z;
minus_z.negate();
let opening_z = proof.s_opening.mul(minus_z.into_repr());
let mut h_alpha_term = opening_z;
let g_s = g.mul(proof.s_zy.into_repr());
h_alpha_term.add_assign(&g_s);
let h_alpha_x_term = proof.s_opening;
let mut s = E::G1::zero();
for p in s_commitments {
s.add_assign_mixed(&p);
}
let h_term = s.into_affine();
E::final_exponentiation(&E::miller_loop(&[
(&h_alpha_x_term.prepare(), &h_alpha_x_precomp),
(&h_alpha_term.into_affine().prepare(), &h_alpha_precomp),
(&h_term.prepare(), &h_prep),
])).unwrap() == E::Fqk::one()
}
}
#[test]
fn test_permutation_argument() {
use pairing::bls12_381::{Fr, G1Affine, G1, Bls12};
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let n: usize = 1 << 1;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
// let coeffs = (0..n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
// let mut permutation = (0..n).collect::<Vec<_>>();
// rng.shuffle(&mut permutation);
let coeffs = vec![Fr::from_str("3").unwrap(), Fr::from_str("4").unwrap()];
let permutation = vec![2, 1];
let coeffs = vec![coeffs];
let permutations = vec![permutation];
let specialized_srs = PermutationArgument::make_specialized_srs(&coeffs, &permutations, &srs);
let mut argument = PermutationArgument::new(coeffs, permutations);
let y : Fr = rng.gen();
let y : Fr = Fr::one();
let y : Fr = Fr::from_str("2").unwrap();
// let challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let challenges = vec![Fr::one()];
let commitments = argument.commit(y, &srs);
let mut s_commitments = vec![];
let mut s_prime_commitments = vec![];
for (s, s_prime) in commitments.into_iter() {
s_commitments.push(s);
s_prime_commitments.push(s_prime);
}
let z_prime : Fr = rng.gen();
let z_prime : Fr = Fr::one();
let opening = argument.open_commitments_to_s_prime(&challenges, y, z_prime, &srs);
let randomness = (0..2).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let valid = PermutationArgument::verify_s_prime_commitment(n,
&randomness,
&challenges,
&s_prime_commitments,
&opening,
y,
z_prime,
&specialized_srs,
&srs);
assert!(valid, "s' commitment must be valid");
let beta : Fr = rng.gen();
let gamma : Fr = rng.gen();
let grand_product_challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let wellformed_challenges = (0..2).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let z : Fr = rng.gen();
let proof = argument.make_argument(
beta,
gamma,
& grand_product_challenges,
& wellformed_challenges,
y, z,
&specialized_srs, &srs);
let valid = PermutationArgument::verify(&s_commitments, &proof, z, &srs);
assert!(valid, "permutation argument must be valid");
}

@ -0,0 +1,130 @@
use pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use pairing::{Engine, CurveProjective, CurveAffine};
use std::marker::PhantomData;
use crate::sonic::srs::SRS;
use crate::sonic::util::*;
#[derive(Clone)]
pub struct S2Eval<E: Engine> {
n: usize,
_marker: PhantomData<E>
}
#[derive(Clone)]
pub struct S2Proof<E: Engine> {
o: E::G1Affine,
c_value: E::Fr,
d_value: E::Fr,
c_opening: E::G1Affine,
d_opening: E::G1Affine
}
impl<E: Engine> S2Eval<E> {
pub fn calculate_commitment_element(n: usize, srs: &SRS<E>) -> E::G1Affine {
// TODO: parallelize
let mut o = E::G1::zero();
for i in 0..n {
o.add_assign_mixed(&srs.g_positive_x_alpha[i]);
}
o.into_affine()
}
pub fn new(n: usize) -> Self {
S2Eval {
n: n,
_marker: PhantomData
}
}
pub fn evaluate(&self, x: E::Fr, y: E::Fr, srs: &SRS<E>) -> S2Proof<E> {
// create a reference element first
let o = Self::calculate_commitment_element(self.n, &srs);
let mut poly = vec![E::Fr::one(); self.n+1];
let (c, c_opening) = {
let mut point = y;
point.mul_assign(&x);
let val = evaluate_at_consequitive_powers(&poly[1..], E::Fr::one(), point);
poly[0] = val;
poly[0].negate();
let opening = polynomial_commitment_opening(0, self.n, poly.iter(), point, &srs);
(val, opening)
};
let (d, d_opening) = {
let mut point = y.inverse().unwrap();
point.mul_assign(&x);
let val = evaluate_at_consequitive_powers(&poly[1..], E::Fr::one(), point);
poly[0] = val;
poly[0].negate();
let opening = polynomial_commitment_opening(0, self.n, poly.iter(), point, &srs);
(val, opening)
};
S2Proof {
o: o,
c_value: c,
d_value: d,
c_opening: c_opening,
d_opening: d_opening
}
}
pub fn verify(x: E::Fr, y: E::Fr, proof: &S2Proof<E>, srs: &SRS<E>) -> bool {
// e(C,hαx)e(Cyz,hα) = e(O,h)e(gc,hα)
let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let mut c_minus_xy = proof.c_value;
let mut xy = x;
xy.mul_assign(&y);
c_minus_xy.sub_assign(&xy);
let c_in_c_minus_xy = proof.c_opening.mul(c_minus_xy.into_repr()).into_affine();
let valid = E::final_exponentiation(&E::miller_loop(&[
(&proof.c_opening.prepare(), &alpha_x_precomp),
(&c_in_c_minus_xy.prepare(), &alpha_precomp),
(&proof.o.prepare(), &h_prep),
])).unwrap() == E::Fqk::one();
if !valid {
return false;
}
// e(D,hαx)e(Dy1z,hα) = e(O,h)e(gd,hα)
let mut d_minus_x_y_inv = proof.d_value;
let mut x_y_inv = x;
x_y_inv.mul_assign(&y.inverse().unwrap());
d_minus_x_y_inv.sub_assign(&x_y_inv);
let d_in_d_minus_x_y_inv = proof.d_opening.mul(d_minus_x_y_inv.into_repr()).into_affine();
let valid = E::final_exponentiation(&E::miller_loop(&[
(&proof.d_opening.prepare(), &alpha_x_precomp),
(&d_in_d_minus_x_y_inv.prepare(), &alpha_precomp),
(&proof.o.prepare(), &h_prep),
])).unwrap() == E::Fqk::one();
if !valid {
return false;
}
true
}
}

@ -0,0 +1,185 @@
/// Wellformedness argument allows to verify that some committment was to multivariate polynomial of degree n,
/// with no constant term and negative powers
use pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use pairing::{Engine, CurveProjective, CurveAffine};
use std::marker::PhantomData;
use crate::sonic::srs::SRS;
use crate::sonic::util::*;
#[derive(Clone)]
pub struct WellformednessArgument<E: Engine> {
polynomials: Vec<Vec<E::Fr>>
}
#[derive(Clone)]
pub struct WellformednessProof<E: Engine> {
l: E::G1Affine,
r: E::G1Affine
}
impl<E: Engine> WellformednessArgument<E> {
pub fn new(polynomials: Vec<Vec<E::Fr>>) -> Self {
assert!(polynomials.len() > 0);
let length = polynomials[0].len();
for p in polynomials.iter() {
assert!(p.len() == length);
}
WellformednessArgument {
polynomials: polynomials
}
}
pub fn commit(&self, srs: &SRS<E>) -> Vec<E::G1Affine> {
let mut results = vec![];
let n = self.polynomials[0].len();
for p in self.polynomials.iter() {
let c = multiexp(
srs.g_positive_x_alpha[0..n].iter(),
p.iter()
).into_affine();
results.push(c);
}
results
}
pub fn make_argument(self, challenges: Vec<E::Fr>, srs: &SRS<E>) -> WellformednessProof<E> {
assert_eq!(challenges.len(), self.polynomials.len());
let mut polynomials = self.polynomials;
let mut challenges = challenges;
let mut p0 = polynomials.pop().unwrap();
let r0 = challenges.pop().unwrap();
let n = p0.len();
mul_polynomial_by_scalar(&mut p0[..], r0);
let m = polynomials.len();
for _ in 0..m {
let p = polynomials.pop().unwrap();
let r = challenges.pop().unwrap();
mul_add_polynomials(&mut p0[..], & p[..], r);
}
let d = srs.d;
assert!(n < d);
// here the multiplier is x^-d, so largest negative power is -(d - 1), smallest negative power is -(d - n)
let l = multiexp(
srs.g_negative_x[(d - n)..d].iter().rev(),
p0.iter()
).into_affine();
// here the multiplier is x^d-n, so largest positive power is d, smallest positive power is d - n + 1
let r = multiexp(
srs.g_positive_x[(d - n + 1)..].iter().rev(),
p0.iter()
).into_affine();
WellformednessProof {
l: l,
r: r
}
}
pub fn verify(n: usize, challenges: &Vec<E::Fr>, commitments: &Vec<E::G1Affine>, proof: &WellformednessProof<E>, srs: &SRS<E>) -> bool {
let d = srs.d;
let alpha_x_d_precomp = srs.h_positive_x_alpha[d].prepare();
let alpha_x_n_minus_d_precomp = srs.h_negative_x_alpha[d - n].prepare();
let mut h_prep = srs.h_positive_x[0];
h_prep.negate();
let h_prep = h_prep.prepare();
let a = multiexp(
commitments.iter(),
challenges.iter(),
).into_affine();
let a = a.prepare();
let valid = E::final_exponentiation(&E::miller_loop(&[
(&a, &h_prep),
(&proof.l.prepare(), &alpha_x_d_precomp)
])).unwrap() == E::Fqk::one();
if !valid {
return false;
}
let valid = E::final_exponentiation(&E::miller_loop(&[
(&a, &h_prep),
(&proof.r.prepare(), &alpha_x_n_minus_d_precomp)
])).unwrap() == E::Fqk::one();
if !valid {
return false;
}
true
}
}
#[test]
fn test_argument() {
use pairing::bls12_381::{Fr, G1Affine, G1, Bls12};
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let n: usize = 1 << 16;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let coeffs = (0..n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let argument = WellformednessArgument::new(vec![coeffs]);
let challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let commitments = argument.commit(&srs);
let proof = argument.make_argument(challenges.clone(), &srs);
let valid = WellformednessArgument::verify(n, &challenges, &commitments, &proof, &srs);
assert!(valid);
}
#[test]
fn test_argument_soundness() {
use pairing::bls12_381::{Fr, G1Affine, G1, Bls12};
use rand::{XorShiftRng, SeedableRng, Rand, Rng};
use crate::sonic::srs::SRS;
let srs_x = Fr::from_str("23923").unwrap();
let srs_alpha = Fr::from_str("23728792").unwrap();
let srs = SRS::<Bls12>::dummy(830564, srs_x, srs_alpha);
let n: usize = 1 << 8;
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let coeffs = (0..n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let argument = WellformednessArgument::new(vec![coeffs]);
let commitments = argument.commit(&srs);
let coeffs = (0..n).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let argument = WellformednessArgument::new(vec![coeffs]);
let challenges = (0..1).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let proof = argument.make_argument(challenges.clone(), &srs);
let valid = WellformednessArgument::verify(n, &challenges, &commitments, &proof, &srs);
assert!(!valid);
}

807
src/sonic/util.rs Normal file

@ -0,0 +1,807 @@
use crate::SynthesisError;
use pairing::ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};
use pairing::{CurveAffine, CurveProjective, Engine};
use super::srs::SRS;
pub trait ChainExt: Iterator {
fn chain_ext<U>(self, other: U) -> Chain<Self, U::IntoIter>
where
Self: Sized,
U: IntoIterator<Item = Self::Item>,
{
Chain {
t: self,
u: other.into_iter(),
}
}
}
impl<I: Iterator> ChainExt for I {}
#[derive(Clone)]
pub struct Chain<T, U> {
t: T,
u: U,
}
impl<T, U> Iterator for Chain<T, U>
where
T: Iterator,
U: Iterator<Item = T::Item>,
{
type Item = T::Item;
fn next(&mut self) -> Option<T::Item> {
match self.t.next() {
Some(v) => Some(v),
None => match self.u.next() {
Some(v) => Some(v),
None => None,
},
}
}
}
impl<T, U> ExactSizeIterator for Chain<T, U>
where
T: Iterator,
U: Iterator<Item = T::Item>,
T: ExactSizeIterator,
U: ExactSizeIterator,
{
fn len(&self) -> usize {
self.t.len() + self.u.len()
}
}
impl<T, U> DoubleEndedIterator for Chain<T, U>
where
T: Iterator,
U: Iterator<Item = T::Item>,
T: DoubleEndedIterator,
U: DoubleEndedIterator,
{
fn next_back(&mut self) -> Option<T::Item> {
match self.u.next_back() {
Some(v) => Some(v),
None => match self.t.next_back() {
Some(v) => Some(v),
None => None,
},
}
}
}
pub fn polynomial_commitment<
'a,
E: Engine,
IS: IntoIterator<Item = &'a E::Fr>,
>(
max: usize,
largest_negative_power: usize,
largest_positive_power: usize,
srs: &'a SRS<E>,
s: IS,
) -> E::G1Affine
where
IS::IntoIter: ExactSizeIterator,
{
// smallest power is d - max - largest_negative_power; It should either be 0 for use of positive powers only,
// of we should use part of the negative powers
let d = srs.d;
assert!(max >= largest_positive_power);
// use both positive and negative powers for commitment
if d < max + largest_negative_power + 1 {
let min_power = largest_negative_power + max - d;
let max_power = d + largest_positive_power - max;
// need to use negative powers to make a proper commitment
return multiexp(
srs.g_negative_x_alpha[0..min_power].iter().rev()
.chain_ext(srs.g_positive_x_alpha[..max_power].iter()),
s
).into_affine();
} else {
return multiexp(
srs.g_positive_x_alpha[(srs.d - max - largest_negative_power - 1)..].iter(),
s
).into_affine();
}
}
/// For now this function MUST take a polynomial in a form f(x) - f(z)
pub fn polynomial_commitment_opening<
'a,
E: Engine,
I: IntoIterator<Item = &'a E::Fr>
>(
largest_negative_power: usize,
largest_positive_power: usize,
polynomial_coefficients: I,
point: E::Fr,
srs: &'a SRS<E>,
) -> E::G1Affine
where I::IntoIter: DoubleEndedIterator + ExactSizeIterator,
{
let poly = kate_divison(
polynomial_coefficients,
point,
);
let negative_poly = poly[0..largest_negative_power].iter().rev();
let positive_poly = poly[largest_negative_power..].iter();
multiexp(
srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext(
srs.g_positive_x[0..positive_poly.len()].iter()
),
negative_poly.chain_ext(positive_poly)
).into_affine()
}
extern crate crossbeam;
use self::crossbeam::channel::{unbounded};
pub fn evaluate_at_consequitive_powers<'a, F: Field> (
coeffs: &[F],
first_power: F,
base: F
) -> F
{
use crate::multicore::Worker;
let (s, r) = unbounded();
let worker = Worker::new();
worker.scope(coeffs.len(), |scope, chunk| {
for (i, coeffs) in coeffs.chunks(chunk).enumerate()
{
let s = s.clone();
scope.spawn(move |_| {
let mut current_power = base.pow(&[(i*chunk) as u64]);
current_power.mul_assign(&first_power);
let mut acc = F::zero();
for p in coeffs {
let mut tmp = *p;
tmp.mul_assign(&current_power);
acc.add_assign(&tmp);
current_power.mul_assign(&base);
}
s.send(acc).expect("must send");
});
}
});
drop(s);
// all threads in a scope have done working, so we can safely read
let mut result = F::zero();
loop {
if r.is_empty() {
break;
}
let value = r.recv().expect("must not be empty");
result.add_assign(&value);
}
result
}
pub fn mut_evaluate_at_consequitive_powers<'a, F: Field> (
coeffs: &mut [F],
first_power: F,
base: F
) -> F
{
use crate::multicore::Worker;
let (s, r) = unbounded();
let worker = Worker::new();
worker.scope(coeffs.len(), |scope, chunk| {
for (i, coeffs) in coeffs.chunks_mut(chunk).enumerate()
{
let s = s.clone();
scope.spawn(move |_| {
let mut current_power = base.pow(&[(i*chunk) as u64]);
current_power.mul_assign(&first_power);
let mut acc = F::zero();
for mut p in coeffs {
p.mul_assign(&current_power);
acc.add_assign(&p);
current_power.mul_assign(&base);
}
s.send(acc).expect("must send");
});
}
});
drop(s);
// all threads in a scope have done working, so we can safely read
let mut result = F::zero();
loop {
if r.is_empty() {
break;
}
let value = r.recv().expect("must not be empty");
result.add_assign(&value);
}
result
}
/// Multiply each coefficient by some power of the base in a form
/// `first_power * base^{i}`
pub fn mut_distribute_consequitive_powers<'a, F: Field> (
coeffs: &mut [F],
first_power: F,
base: F
)
{
use crate::multicore::Worker;
let worker = Worker::new();
worker.scope(coeffs.len(), |scope, chunk| {
for (i, coeffs_chunk) in coeffs.chunks_mut(chunk).enumerate()
{
scope.spawn(move |_| {
let mut current_power = base.pow(&[(i*chunk) as u64]);
current_power.mul_assign(&first_power);
for mut p in coeffs_chunk {
p.mul_assign(&current_power);
current_power.mul_assign(&base);
}
});
}
});
}
pub fn multiexp<
'a,
G: CurveAffine,
IB: IntoIterator<Item = &'a G>,
IS: IntoIterator<Item = &'a G::Scalar>,
>(
g: IB,
s: IS,
) -> G::Projective
where
IB::IntoIter: ExactSizeIterator + Clone,
IS::IntoIter: ExactSizeIterator,
{
use crate::multicore::Worker;
use crate::multiexp::dense_multiexp;
let s: Vec<<G::Scalar as PrimeField>::Repr> = s.into_iter().map(|e| e.into_repr()).collect::<Vec<_>>();
let g: Vec<G> = g.into_iter().map(|e| *e).collect::<Vec<_>>();
assert_eq!(s.len(), g.len(), "scalars and exponents must have the same length");
let pool = Worker::new();
let result = dense_multiexp(
&pool,
&g,
&s
).unwrap();
result
}
pub fn multiexp_serial<
'a,
G: CurveAffine,
IB: IntoIterator<Item = &'a G>,
IS: IntoIterator<Item = &'a G::Scalar>,
>(
g: IB,
s: IS,
) -> G::Projective
where
IB::IntoIter: ExactSizeIterator + Clone,
IS::IntoIter: ExactSizeIterator,
{
let g = g.into_iter();
let s = s.into_iter();
assert_eq!(g.len(), s.len());
let c = if s.len() < 32 {
3u32
} else {
(f64::from(s.len() as u32)).ln().ceil() as u32
};
// Convert all of the scalars into representations
let mut s = s.map(|s| s.into_repr()).collect::<Vec<_>>();
let mut windows = vec![];
let mut buckets = vec![];
let mask = (1u64 << c) - 1u64;
let mut cur = 0;
let num_bits = <G::Engine as ScalarEngine>::Fr::NUM_BITS;
while cur <= num_bits {
let mut acc = G::Projective::zero();
buckets.truncate(0);
buckets.resize((1 << c) - 1, G::Projective::zero());
let g = g.clone();
for (s, g) in s.iter_mut().zip(g) {
let index = (s.as_ref()[0] & mask) as usize;
if index != 0 {
buckets[index - 1].add_assign_mixed(g);
}
s.shr(c as u32);
}
let mut running_sum = G::Projective::zero();
for exp in buckets.iter().rev() {
running_sum.add_assign(exp);
acc.add_assign(&running_sum);
}
windows.push(acc);
cur += c;
}
let mut acc = G::Projective::zero();
for window in windows.into_iter().rev() {
for _ in 0..c {
acc.double();
}
acc.add_assign(&window);
}
acc
}
/// Divides polynomial `a` in `x` by `x - b` with
/// no remainder.
pub fn kate_divison<'a, F: Field, I: IntoIterator<Item = &'a F>>(a: I, mut b: F) -> Vec<F>
where
I::IntoIter: DoubleEndedIterator + ExactSizeIterator,
{
b.negate();
let a = a.into_iter();
let mut q = vec![F::zero(); a.len() - 1];
let mut tmp = F::zero();
for (q, r) in q.iter_mut().rev().zip(a.rev()) {
let mut lead_coeff = *r;
lead_coeff.sub_assign(&tmp);
*q = lead_coeff;
tmp = lead_coeff;
tmp.mul_assign(&b);
}
q
}
/// Convenience function to check polynomail commitment
pub fn check_polynomial_commitment<E: Engine>(
commitment: &E::G1Affine,
point: &E::Fr,
value: &E::Fr,
opening: &E::G1Affine,
max: usize,
srs: &SRS<E>
) -> bool {
// e(W , hα x )e(g^{v} * W{-z} , hα ) = e(F , h^{x^{d +max}} )
if srs.d < max {
return false;
}
let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare();
let alpha_precomp = srs.h_positive_x_alpha[0].prepare();
let mut neg_x_n_minus_d_precomp = srs.h_negative_x[srs.d - max];
neg_x_n_minus_d_precomp.negate();
let neg_x_n_minus_d_precomp = neg_x_n_minus_d_precomp.prepare();
let w = opening.prepare();
let mut gv = srs.g_positive_x[0].mul(value.into_repr());
let mut z_neg = *point;
z_neg.negate();
let w_minus_z = opening.mul(z_neg.into_repr());
gv.add_assign(&w_minus_z);
let gv = gv.into_affine().prepare();
E::final_exponentiation(&E::miller_loop(&[
(&w, &alpha_x_precomp),
(&gv, &alpha_precomp),
(&commitment.prepare(), &neg_x_n_minus_d_precomp),
])).unwrap() == E::Fqk::one()
}
#[test]
fn laurent_division() {
use pairing::ff::PrimeField;
use pairing::bls12_381::{Fr};
let mut poly = vec![
Fr::from_str("328947234").unwrap(),
Fr::from_str("3545623451111").unwrap(),
Fr::from_str("112").unwrap(),
Fr::from_str("55555").unwrap(),
Fr::from_str("1235685").unwrap(),
];
fn eval(poly: &[Fr], point: Fr) -> Fr {
let point_inv = point.inverse().unwrap();
let mut acc = Fr::zero();
let mut tmp = Fr::one();
for p in &poly[2..] {
let mut t = *p;
t.mul_assign(&tmp);
acc.add_assign(&t);
tmp.mul_assign(&point);
}
let mut tmp = point_inv;
for p in poly[0..2].iter().rev() {
let mut t = *p;
t.mul_assign(&tmp);
acc.add_assign(&t);
tmp.mul_assign(&point_inv);
}
acc
}
let x = Fr::from_str("23").unwrap();
let z = Fr::from_str("2000").unwrap();
let p_at_x = eval(&poly, x);
let p_at_z = eval(&poly, z);
// poly = poly(X) - poly(z)
poly[2].sub_assign(&p_at_z);
let quotient_poly = kate_divison(&poly, z);
let quotient = eval(&quotient_poly, x);
// check that
// quotient * (x - z) = p_at_x - p_at_z
let mut lhs = x;
lhs.sub_assign(&z);
lhs.mul_assign(&quotient);
let mut rhs = p_at_x;
rhs.sub_assign(&p_at_z);
assert_eq!(lhs, rhs);
}
pub fn multiply_polynomials<E: Engine>(a: Vec<E::Fr>, b: Vec<E::Fr>) -> Vec<E::Fr> {
let result_len = a.len() + b.len() - 1;
use crate::multicore::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
let scalars_a: Vec<Scalar<E>> = a.into_iter().map(|e| Scalar::<E>(e)).collect();
let mut domain_a = EvaluationDomain::from_coeffs_into_sized(scalars_a, result_len).unwrap();
let scalars_b: Vec<Scalar<E>> = b.into_iter().map(|e| Scalar::<E>(e)).collect();
let mut domain_b = EvaluationDomain::from_coeffs_into_sized(scalars_b, result_len).unwrap();
domain_a.fft(&worker);
domain_b.fft(&worker);
domain_a.mul_assign(&worker, &domain_b);
drop(domain_b);
domain_a.ifft(&worker);
let mut mul_result: Vec<E::Fr> = domain_a.into_coeffs().iter().map(|e| e.0).collect();
mul_result.truncate(result_len);
mul_result
}
pub fn multiply_polynomials_serial<E: Engine>(mut a: Vec<E::Fr>, mut b: Vec<E::Fr>) -> Vec<E::Fr> {
let result_len = a.len() + b.len() - 1;
// Compute the size of our evaluation domain
let mut m = 1;
let mut exp = 0;
while m < result_len {
m *= 2;
exp += 1;
// The pairing-friendly curve may not be able to support
// large enough (radix2) evaluation domains.
if exp >= E::Fr::S {
panic!("polynomial too large")
}
}
// Compute omega, the 2^exp primitive root of unity
let mut omega = E::Fr::root_of_unity();
for _ in exp..E::Fr::S {
omega.square();
}
// Extend with zeroes
a.resize(m, E::Fr::zero());
b.resize(m, E::Fr::zero());
serial_fft::<E>(&mut a[..], &omega, exp);
serial_fft::<E>(&mut b[..], &omega, exp);
for (a, b) in a.iter_mut().zip(b.iter()) {
a.mul_assign(b);
}
serial_fft::<E>(&mut a[..], &omega.inverse().unwrap(), exp);
a.truncate(result_len);
let minv = E::Fr::from_str(&format!("{}", m))
.unwrap()
.inverse()
.unwrap();
for a in a.iter_mut() {
a.mul_assign(&minv);
}
a
}
pub fn add_polynomials<F: Field>(a: &mut [F], b: &[F]) {
use crate::multicore::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
assert_eq!(a.len(), b.len());
worker.scope(a.len(), |scope, chunk| {
for (a, b) in a.chunks_mut(chunk).zip(b.chunks(chunk))
{
scope.spawn(move |_| {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.add_assign(b);
}
});
}
});
}
pub fn mul_polynomial_by_scalar<F: Field>(a: &mut [F], b: F) {
use crate::multicore::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
worker.scope(a.len(), |scope, chunk| {
for a in a.chunks_mut(chunk)
{
scope.spawn(move |_| {
for a in a.iter_mut() {
a.mul_assign(&b);
}
});
}
});
}
pub fn mul_add_polynomials<F: Field>(a: &mut [F], b: &[F], c: F) {
use crate::multicore::Worker;
use crate::domain::{EvaluationDomain, Scalar};
let worker = Worker::new();
assert_eq!(a.len(), b.len());
worker.scope(a.len(), |scope, chunk| {
for (a, b) in a.chunks_mut(chunk).zip(b.chunks(chunk))
{
scope.spawn(move |_| {
for (a, b) in a.iter_mut().zip(b.iter()) {
let mut r = *b;
r.mul_assign(&c);
a.add_assign(&r);
}
});
}
});
}
fn serial_fft<E: Engine>(a: &mut [E::Fr], omega: &E::Fr, log_n: u32) {
fn bitreverse(mut n: u32, l: u32) -> u32 {
let mut r = 0;
for _ in 0..l {
r = (r << 1) | (n & 1);
n >>= 1;
}
r
}
let n = a.len() as u32;
assert_eq!(n, 1 << log_n);
for k in 0..n {
let rk = bitreverse(k, log_n);
if k < rk {
a.swap(rk as usize, k as usize);
}
}
let mut m = 1;
for _ in 0..log_n {
let w_m = omega.pow(&[(n / (2 * m)) as u64]);
let mut k = 0;
while k < n {
let mut w = E::Fr::one();
for j in 0..m {
let mut t = a[(k + j + m) as usize];
t.mul_assign(&w);
let mut tmp = a[(k + j) as usize];
tmp.sub_assign(&t);
a[(k + j + m) as usize] = tmp;
a[(k + j) as usize].add_assign(&t);
w.mul_assign(&w_m);
}
k += 2 * m;
}
m *= 2;
}
}
pub trait OptionExt<T> {
fn get(self) -> Result<T, SynthesisError>;
}
impl<T> OptionExt<T> for Option<T> {
fn get(self) -> Result<T, SynthesisError> {
match self {
Some(t) => Ok(t),
None => Err(SynthesisError::AssignmentMissing),
}
}
}
#[test]
fn test_mul() {
use rand::{self, Rand};
use pairing::bls12_381::Bls12;
use pairing::bls12_381::Fr;
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let b = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let serial_res = multiply_polynomials_serial::<Bls12>(a.clone(), b.clone());
let parallel_res = multiply_polynomials::<Bls12>(a, b);
assert_eq!(serial_res.len(), parallel_res.len());
assert_eq!(serial_res, parallel_res);
}
#[test]
fn test_eval_at_powers() {
use rand::{self, Rand, Rng};
use pairing::bls12_381::Bls12;
use pairing::bls12_381::Fr;
const SAMPLES: usize = 100000;
let rng = &mut rand::thread_rng();
let a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let x: Fr = rng.gen();
let n: u32 = rng.gen();
let mut acc = Fr::zero();
{
let mut tmp = x.pow(&[n as u64]);
for coeff in a.iter() {
let mut c = *coeff;
c.mul_assign(&tmp);
acc.add_assign(&c);
tmp.mul_assign(&x);
}
}
let first_power = x.pow(&[n as u64]);
let acc_parallel = evaluate_at_consequitive_powers(&a[..], first_power, x);
assert_eq!(acc_parallel, acc);
}
#[test]
fn test_mut_eval_at_powers() {
use rand::{self, Rand, Rng};
use pairing::bls12_381::Bls12;
use pairing::bls12_381::Fr;
const SAMPLES: usize = 100000;
let rng = &mut rand::thread_rng();
let mut a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let mut b = a.clone();
let x: Fr = rng.gen();
let n: u32 = rng.gen();
let mut acc = Fr::zero();
{
let mut tmp = x.pow(&[n as u64]);
for mut coeff in a.iter_mut() {
coeff.mul_assign(&tmp);
acc.add_assign(&coeff);
tmp.mul_assign(&x);
}
}
let first_power = x.pow(&[n as u64]);
let acc_parallel = mut_evaluate_at_consequitive_powers(&mut b[..], first_power, x);
assert_eq!(acc_parallel, acc);
assert!(a == b);
}
#[test]
fn test_mut_distribute_powers() {
use rand::{self, Rand, Rng};
use pairing::bls12_381::Bls12;
use pairing::bls12_381::Fr;
const SAMPLES: usize = 100000;
let rng = &mut rand::thread_rng();
let mut a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::<Vec<_>>();
let mut b = a.clone();
let x: Fr = rng.gen();
let n: u32 = rng.gen();
{
let mut tmp = x.pow(&[n as u64]);
for mut coeff in a.iter_mut() {
coeff.mul_assign(&tmp);
tmp.mul_assign(&x);
}
}
let first_power = x.pow(&[n as u64]);
mut_distribute_consequitive_powers(&mut b[..], first_power, x);
assert!(a == b);
}

141
src/source.rs Normal file

@ -0,0 +1,141 @@
use pairing::{
CurveAffine,
CurveProjective,
Engine
};
use pairing::ff::{
PrimeField,
Field,
PrimeFieldRepr,
ScalarEngine};
use std::sync::Arc;
use std::io;
use bit_vec::{self, BitVec};
use std::iter;
use super::SynthesisError;
/// An object that builds a source of bases.
pub trait SourceBuilder<G: CurveAffine>: Send + Sync + 'static + Clone {
type Source: Source<G>;
fn new(self) -> Self::Source;
}
/// A source of bases, like an iterator.
pub trait Source<G: CurveAffine> {
/// Parses the element from the source. Fails if the point is at infinity.
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError>;
/// Skips `amt` elements from the source, avoiding deserialization.
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError>;
}
impl<G: CurveAffine> SourceBuilder<G> for (Arc<Vec<G>>, usize) {
type Source = (Arc<Vec<G>>, usize);
fn new(self) -> (Arc<Vec<G>>, usize) {
(self.0.clone(), self.1)
}
}
impl<G: CurveAffine> Source<G> for (Arc<Vec<G>>, usize) {
fn add_assign_mixed(&mut self, to: &mut <G as CurveAffine>::Projective) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases when adding from source").into());
}
if self.0[self.1].is_zero() {
return Err(SynthesisError::UnexpectedIdentity)
}
to.add_assign_mixed(&self.0[self.1]);
self.1 += 1;
Ok(())
}
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases skipping from source").into());
}
self.1 += amt;
Ok(())
}
}
pub trait QueryDensity {
/// Returns whether the base exists.
type Iter: Iterator<Item=bool>;
fn iter(self) -> Self::Iter;
fn get_query_size(self) -> Option<usize>;
}
#[derive(Clone)]
pub struct FullDensity;
impl AsRef<FullDensity> for FullDensity {
fn as_ref(&self) -> &FullDensity {
self
}
}
impl<'a> QueryDensity for &'a FullDensity {
type Iter = iter::Repeat<bool>;
fn iter(self) -> Self::Iter {
iter::repeat(true)
}
fn get_query_size(self) -> Option<usize> {
None
}
}
#[derive(Clone)]
pub struct DensityTracker {
bv: BitVec,
total_density: usize
}
impl<'a> QueryDensity for &'a DensityTracker {
type Iter = bit_vec::Iter<'a>;
fn iter(self) -> Self::Iter {
self.bv.iter()
}
fn get_query_size(self) -> Option<usize> {
Some(self.bv.len())
}
}
impl DensityTracker {
pub fn new() -> DensityTracker {
DensityTracker {
bv: BitVec::new(),
total_density: 0
}
}
pub fn add_element(&mut self) {
self.bv.push(false);
}
pub fn inc(&mut self, idx: usize) {
if !self.bv.get(idx).unwrap() {
self.bv.set(idx, true);
self.total_density += 1;
}
}
pub fn get_total_density(&self) -> usize {
self.total_density
}
}

@ -6,7 +6,7 @@ use pairing::{
EncodedPoint
};
use ff::{
use pairing::ff::{
PrimeField,
PrimeFieldRepr,
Field,

93
src/tests/mod.rs Normal file

@ -0,0 +1,93 @@
use pairing::{
Engine
};
use pairing::ff:: {
Field,
PrimeField,
};
pub mod dummy_engine;
use self::dummy_engine::*;
use std::marker::PhantomData;
use crate::{
Circuit,
ConstraintSystem,
SynthesisError
};
pub(crate) struct XORDemo<E: Engine> {
pub(crate) a: Option<bool>,
pub(crate) b: Option<bool>,
pub(crate) _marker: PhantomData<E>
}
impl<E: Engine> Circuit<E> for XORDemo<E> {
fn synthesize<CS: ConstraintSystem<E>>(
self,
cs: &mut CS
) -> Result<(), SynthesisError>
{
let a_var = cs.alloc(|| "a", || {
if self.a.is_some() {
if self.a.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "a_boolean_constraint",
|lc| lc + CS::one() - a_var,
|lc| lc + a_var,
|lc| lc
);
let b_var = cs.alloc(|| "b", || {
if self.b.is_some() {
if self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "b_boolean_constraint",
|lc| lc + CS::one() - b_var,
|lc| lc + b_var,
|lc| lc
);
let c_var = cs.alloc_input(|| "c", || {
if self.a.is_some() && self.b.is_some() {
if self.a.unwrap() ^ self.b.unwrap() {
Ok(E::Fr::one())
} else {
Ok(E::Fr::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
})?;
cs.enforce(
|| "c_xor_constraint",
|lc| lc + a_var + a_var,
|lc| lc + b_var,
|lc| lc + a_var + b_var - c_var
);
Ok(())
}
}

@ -1,7 +1,6 @@
extern crate bellman;
extern crate pairing;
extern crate rand;
extern crate ff;
// For randomness (during paramgen and proof generation)
use rand::{thread_rng, Rng};
@ -14,7 +13,7 @@ use pairing::{
Engine
};
use ff::{
use pairing::ff::{
Field,
};
@ -82,6 +81,7 @@ fn mimc<E: Engine>(
/// This is our demo circuit for proving knowledge of the
/// preimage of a MiMC hash invocation.
#[derive(Clone)]
struct MiMCDemo<'a, E: Engine> {
xl: Option<E::Fr>,
xr: Option<E::Fr>,
@ -116,12 +116,12 @@ impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
let cs = &mut cs.namespace(|| format!("round {}", i));
// tmp = (xL + Ci)^2
let mut tmp_value = xl_value.map(|mut e| {
let tmp_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.square();
e
});
let mut tmp = cs.alloc(|| "tmp", || {
let tmp = cs.alloc(|| "tmp", || {
tmp_value.ok_or(SynthesisError::AssignmentMissing)
})?;
@ -135,14 +135,14 @@ impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> {
// new_xL = xR + (xL + Ci)^3
// new_xL = xR + tmp * (xL + Ci)
// new_xL - xR = tmp * (xL + Ci)
let mut new_xl_value = xl_value.map(|mut e| {
let new_xl_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.mul_assign(&tmp_value.unwrap());
e.add_assign(&xr_value.unwrap());
e
});
let mut new_xl = if i == (MIMC_ROUNDS-1) {
let new_xl = if i == (MIMC_ROUNDS-1) {
// This is the last round, xL is our image and so
// we allocate a public input.
cs.alloc_input(|| "image", || {
@ -341,4 +341,3 @@ fn test_mimc_bn256() {
println!("Average proving time: {:?} seconds", proving_avg);
println!("Average verifying time: {:?} seconds", verifying_avg);
}