fix for newer pairing and ff
This commit is contained in:
parent
b6160fcd1b
commit
2d9f5528d4
10
Cargo.toml
10
Cargo.toml
@ -6,7 +6,7 @@ homepage = "https://github.com/matter-labs/bellman"
|
|||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
name = "bellman_ce"
|
name = "bellman_ce"
|
||||||
repository = "https://github.com/matter-labs/bellman"
|
repository = "https://github.com/matter-labs/bellman"
|
||||||
version = "0.3.0"
|
version = "0.3.1"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
@ -18,8 +18,8 @@ bit-vec = "0.4.4"
|
|||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
cfg-if = "0.1.7"
|
cfg-if = "0.1.7"
|
||||||
|
|
||||||
#pairing_ce = { path = "../pairing" }
|
pairing = {package = "pairing_ce", path = "../pairing" }
|
||||||
pairing_ce = { version = "0.17.0" }
|
#pairing = {package = "pairing_ce", version = "0.17.0" }
|
||||||
byteorder = "1"
|
byteorder = "1"
|
||||||
|
|
||||||
futures-cpupool = {version = "0.1", optional = true}
|
futures-cpupool = {version = "0.1", optional = true}
|
||||||
@ -32,9 +32,7 @@ tiny-keccak = {version = "1.4.2", optional = true}
|
|||||||
blake2-rfc = {version = "0.2.18", optional = true}
|
blake2-rfc = {version = "0.2.18", optional = true}
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
#default = ["multicore"]
|
default = ["multicore"]
|
||||||
default = ["multicore", "sonic"]
|
|
||||||
#default = ["multicore", "gm17", "sonic"]
|
|
||||||
#default = ["wasm"]
|
#default = ["wasm"]
|
||||||
multicore = ["futures-cpupool", "num_cpus", "crossbeam"]
|
multicore = ["futures-cpupool", "num_cpus", "crossbeam"]
|
||||||
sonic = ["tiny-keccak", "blake2-rfc"]
|
sonic = ["tiny-keccak", "blake2-rfc"]
|
||||||
|
@ -86,6 +86,48 @@ fn eval<E: Engine>(
|
|||||||
acc
|
acc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn field_elements_into_representations<E: Engine>(
|
||||||
|
worker: &Worker,
|
||||||
|
scalars: Vec<E::Fr>
|
||||||
|
) -> Result<Vec<<E::Fr as PrimeField>::Repr>, SynthesisError>
|
||||||
|
{
|
||||||
|
let mut representations = vec![<E::Fr as PrimeField>::Repr::default(); scalars.len()];
|
||||||
|
worker.scope(scalars.len(), |scope, chunk| {
|
||||||
|
for (scalar, repr) in scalars.chunks(chunk)
|
||||||
|
.zip(representations.chunks_mut(chunk)) {
|
||||||
|
scope.spawn(move |_| {
|
||||||
|
for (scalar, repr) in scalar.iter()
|
||||||
|
.zip(repr.iter_mut()) {
|
||||||
|
*repr = scalar.into_repr();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(representations)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn scalars_into_representations<E: Engine>(
|
||||||
|
worker: &Worker,
|
||||||
|
scalars: Vec<Scalar<E>>
|
||||||
|
) -> Result<Vec<<E::Fr as PrimeField>::Repr>, SynthesisError>
|
||||||
|
{
|
||||||
|
let mut representations = vec![<E::Fr as PrimeField>::Repr::default(); scalars.len()];
|
||||||
|
worker.scope(scalars.len(), |scope, chunk| {
|
||||||
|
for (scalar, repr) in scalars.chunks(chunk)
|
||||||
|
.zip(representations.chunks_mut(chunk)) {
|
||||||
|
scope.spawn(move |_| {
|
||||||
|
for (scalar, repr) in scalar.iter()
|
||||||
|
.zip(repr.iter_mut()) {
|
||||||
|
*repr = scalar.0.into_repr();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(representations)
|
||||||
|
}
|
||||||
|
|
||||||
// This is a proving assignment with densities precalculated
|
// This is a proving assignment with densities precalculated
|
||||||
pub struct PreparedProver<E: Engine>{
|
pub struct PreparedProver<E: Engine>{
|
||||||
assignment: ProvingAssignment<E>,
|
assignment: ProvingAssignment<E>,
|
||||||
@ -145,7 +187,7 @@ pub fn prepare_prover<E, C>(
|
|||||||
|
|
||||||
impl<E:Engine> PreparedProver<E> {
|
impl<E:Engine> PreparedProver<E> {
|
||||||
pub fn create_random_proof<R, P: ParameterSource<E>>(
|
pub fn create_random_proof<R, P: ParameterSource<E>>(
|
||||||
& self,
|
self,
|
||||||
params: P,
|
params: P,
|
||||||
rng: &mut R
|
rng: &mut R
|
||||||
) -> Result<Proof<E>, SynthesisError>
|
) -> Result<Proof<E>, SynthesisError>
|
||||||
@ -158,16 +200,16 @@ impl<E:Engine> PreparedProver<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_proof<P: ParameterSource<E>>(
|
pub fn create_proof<P: ParameterSource<E>>(
|
||||||
& self,
|
self,
|
||||||
mut params: P,
|
mut params: P,
|
||||||
r: E::Fr,
|
r: E::Fr,
|
||||||
s: E::Fr
|
s: E::Fr
|
||||||
) -> Result<Proof<E>, SynthesisError>
|
) -> Result<Proof<E>, SynthesisError>
|
||||||
{
|
{
|
||||||
let prover = self.assignment.clone();
|
let prover = self.assignment;
|
||||||
let worker = Worker::new();
|
let worker = Worker::new();
|
||||||
|
|
||||||
let vk = params.get_vk(self.assignment.input_assignment.len())?;
|
let vk = params.get_vk(prover.input_assignment.len())?;
|
||||||
|
|
||||||
let stopwatch = Stopwatch::new();
|
let stopwatch = Stopwatch::new();
|
||||||
|
|
||||||
@ -202,7 +244,8 @@ impl<E:Engine> PreparedProver<E> {
|
|||||||
a.truncate(a_len);
|
a.truncate(a_len);
|
||||||
// TODO: parallelize if it's even helpful
|
// TODO: parallelize if it's even helpful
|
||||||
// TODO: in large settings it may worth to parallelize
|
// TODO: in large settings it may worth to parallelize
|
||||||
let a = Arc::new(a.into_iter().map(|s| s.0.into_repr()).collect::<Vec<_>>());
|
let a = Arc::new(scalars_into_representations::<E>(&worker, a)?);
|
||||||
|
// let a = Arc::new(a.into_iter().map(|s| s.0.into_repr()).collect::<Vec<_>>());
|
||||||
|
|
||||||
multiexp(&worker, params.get_h(a.len())?, FullDensity, a)
|
multiexp(&worker, params.get_h(a.len())?, FullDensity, a)
|
||||||
};
|
};
|
||||||
@ -213,13 +256,19 @@ impl<E:Engine> PreparedProver<E> {
|
|||||||
|
|
||||||
// TODO: Check that difference in operations for different chunks is small
|
// TODO: Check that difference in operations for different chunks is small
|
||||||
|
|
||||||
|
let input_len = prover.input_assignment.len();
|
||||||
|
let aux_len = prover.aux_assignment.len();
|
||||||
|
|
||||||
|
let input_assignment = Arc::new(field_elements_into_representations::<E>(&worker, prover.input_assignment)?);
|
||||||
|
let aux_assignment = Arc::new(field_elements_into_representations::<E>(&worker, prover.aux_assignment)?);
|
||||||
|
|
||||||
// TODO: parallelize if it's even helpful
|
// TODO: parallelize if it's even helpful
|
||||||
// TODO: in large settings it may worth to parallelize
|
// TODO: in large settings it may worth to parallelize
|
||||||
let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
|
// let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
|
||||||
let aux_assignment = Arc::new(prover.aux_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
|
// let aux_assignment = Arc::new(prover.aux_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
|
||||||
|
|
||||||
let input_len = input_assignment.len();
|
// let input_len = input_assignment.len();
|
||||||
let aux_len = aux_assignment.len();
|
// let aux_len = aux_assignment.len();
|
||||||
elog_verbose!("H query is dense in G1,\nOther queries are {} elements in G1 and {} elements in G2",
|
elog_verbose!("H query is dense in G1,\nOther queries are {} elements in G1 and {} elements in G2",
|
||||||
2*(input_len + aux_len) + aux_len, input_len + aux_len);
|
2*(input_len + aux_len) + aux_len, input_len + aux_len);
|
||||||
|
|
||||||
@ -402,153 +451,13 @@ pub fn create_random_proof<E, C, R, P: ParameterSource<E>>(
|
|||||||
|
|
||||||
pub fn create_proof<E, C, P: ParameterSource<E>>(
|
pub fn create_proof<E, C, P: ParameterSource<E>>(
|
||||||
circuit: C,
|
circuit: C,
|
||||||
mut params: P,
|
params: P,
|
||||||
r: E::Fr,
|
r: E::Fr,
|
||||||
s: E::Fr
|
s: E::Fr
|
||||||
) -> Result<Proof<E>, SynthesisError>
|
) -> Result<Proof<E>, SynthesisError>
|
||||||
where E: Engine, C: Circuit<E>
|
where E: Engine, C: Circuit<E>
|
||||||
{
|
{
|
||||||
let mut prover = ProvingAssignment {
|
let prover = prepare_prover(circuit)?;
|
||||||
a_aux_density: DensityTracker::new(),
|
|
||||||
b_input_density: DensityTracker::new(),
|
|
||||||
b_aux_density: DensityTracker::new(),
|
|
||||||
a: vec![],
|
|
||||||
b: vec![],
|
|
||||||
c: vec![],
|
|
||||||
input_assignment: vec![],
|
|
||||||
aux_assignment: vec![]
|
|
||||||
};
|
|
||||||
|
|
||||||
prover.alloc_input(|| "", || Ok(E::Fr::one()))?;
|
prover.create_proof(params, r, s)
|
||||||
|
|
||||||
circuit.synthesize(&mut prover)?;
|
|
||||||
|
|
||||||
for i in 0..prover.input_assignment.len() {
|
|
||||||
prover.enforce(|| "",
|
|
||||||
|lc| lc + Variable(Index::Input(i)),
|
|
||||||
|lc| lc,
|
|
||||||
|lc| lc,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let worker = Worker::new();
|
|
||||||
|
|
||||||
let vk = params.get_vk(prover.input_assignment.len())?;
|
|
||||||
|
|
||||||
let stopwatch = Stopwatch::new();
|
|
||||||
|
|
||||||
let h = {
|
|
||||||
let mut a = EvaluationDomain::from_coeffs(prover.a)?;
|
|
||||||
let mut b = EvaluationDomain::from_coeffs(prover.b)?;
|
|
||||||
let mut c = EvaluationDomain::from_coeffs(prover.c)?;
|
|
||||||
elog_verbose!("H query domain size is {}", a.as_ref().len());
|
|
||||||
// here a coset is a domain where denominator (z) does not vanish
|
|
||||||
// inverse FFT is an interpolation
|
|
||||||
a.ifft(&worker);
|
|
||||||
// evaluate in coset
|
|
||||||
a.coset_fft(&worker);
|
|
||||||
// same is for B and C
|
|
||||||
b.ifft(&worker);
|
|
||||||
b.coset_fft(&worker);
|
|
||||||
c.ifft(&worker);
|
|
||||||
c.coset_fft(&worker);
|
|
||||||
|
|
||||||
// do A*B-C in coset
|
|
||||||
a.mul_assign(&worker, &b);
|
|
||||||
drop(b);
|
|
||||||
a.sub_assign(&worker, &c);
|
|
||||||
drop(c);
|
|
||||||
// z does not vanish in coset, so we divide by non-zero
|
|
||||||
a.divide_by_z_on_coset(&worker);
|
|
||||||
// interpolate back in coset
|
|
||||||
a.icoset_fft(&worker);
|
|
||||||
let mut a = a.into_coeffs();
|
|
||||||
let a_len = a.len() - 1;
|
|
||||||
a.truncate(a_len);
|
|
||||||
// TODO: parallelize if it's even helpful
|
|
||||||
// TODO: in large settings it may worth to parallelize
|
|
||||||
let a = Arc::new(a.into_iter().map(|s| s.0.into_repr()).collect::<Vec<_>>());
|
|
||||||
|
|
||||||
multiexp(&worker, params.get_h(a.len())?, FullDensity, a)
|
|
||||||
};
|
|
||||||
|
|
||||||
elog_verbose!("{} seconds for prover for H evaluation (mostly FFT)", stopwatch.elapsed());
|
|
||||||
|
|
||||||
let stopwatch = Stopwatch::new();
|
|
||||||
|
|
||||||
// TODO: Check that difference in operations for different chunks is small
|
|
||||||
|
|
||||||
// TODO: parallelize if it's even helpful
|
|
||||||
// TODO: in large settings it may worth to parallelize
|
|
||||||
let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
|
|
||||||
let aux_assignment = Arc::new(prover.aux_assignment.into_iter().map(|s| s.into_repr()).collect::<Vec<_>>());
|
|
||||||
|
|
||||||
// Run a dedicated process for dense vector
|
|
||||||
let l = multiexp(&worker, params.get_l(aux_assignment.len())?, FullDensity, aux_assignment.clone());
|
|
||||||
|
|
||||||
let a_aux_density_total = prover.a_aux_density.get_total_density();
|
|
||||||
|
|
||||||
let (a_inputs_source, a_aux_source) = params.get_a(input_assignment.len(), a_aux_density_total)?;
|
|
||||||
|
|
||||||
let a_inputs = multiexp(&worker, a_inputs_source, FullDensity, input_assignment.clone());
|
|
||||||
let a_aux = multiexp(&worker, a_aux_source, Arc::new(prover.a_aux_density), aux_assignment.clone());
|
|
||||||
|
|
||||||
let b_input_density = Arc::new(prover.b_input_density);
|
|
||||||
let b_input_density_total = b_input_density.get_total_density();
|
|
||||||
let b_aux_density = Arc::new(prover.b_aux_density);
|
|
||||||
let b_aux_density_total = b_aux_density.get_total_density();
|
|
||||||
|
|
||||||
let (b_g1_inputs_source, b_g1_aux_source) = params.get_b_g1(b_input_density_total, b_aux_density_total)?;
|
|
||||||
|
|
||||||
let b_g1_inputs = multiexp(&worker, b_g1_inputs_source, b_input_density.clone(), input_assignment.clone());
|
|
||||||
let b_g1_aux = multiexp(&worker, b_g1_aux_source, b_aux_density.clone(), aux_assignment.clone());
|
|
||||||
|
|
||||||
let (b_g2_inputs_source, b_g2_aux_source) = params.get_b_g2(b_input_density_total, b_aux_density_total)?;
|
|
||||||
|
|
||||||
let b_g2_inputs = multiexp(&worker, b_g2_inputs_source, b_input_density, input_assignment);
|
|
||||||
let b_g2_aux = multiexp(&worker, b_g2_aux_source, b_aux_density, aux_assignment);
|
|
||||||
|
|
||||||
if vk.delta_g1.is_zero() || vk.delta_g2.is_zero() {
|
|
||||||
// If this element is zero, someone is trying to perform a
|
|
||||||
// subversion-CRS attack.
|
|
||||||
return Err(SynthesisError::UnexpectedIdentity);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut g_a = vk.delta_g1.mul(r);
|
|
||||||
g_a.add_assign_mixed(&vk.alpha_g1);
|
|
||||||
let mut g_b = vk.delta_g2.mul(s);
|
|
||||||
g_b.add_assign_mixed(&vk.beta_g2);
|
|
||||||
let mut g_c;
|
|
||||||
{
|
|
||||||
let mut rs = r;
|
|
||||||
rs.mul_assign(&s);
|
|
||||||
|
|
||||||
g_c = vk.delta_g1.mul(rs);
|
|
||||||
g_c.add_assign(&vk.alpha_g1.mul(s));
|
|
||||||
g_c.add_assign(&vk.beta_g1.mul(r));
|
|
||||||
}
|
|
||||||
let mut a_answer = a_inputs.wait()?;
|
|
||||||
a_answer.add_assign(&a_aux.wait()?);
|
|
||||||
g_a.add_assign(&a_answer);
|
|
||||||
a_answer.mul_assign(s);
|
|
||||||
g_c.add_assign(&a_answer);
|
|
||||||
|
|
||||||
let mut b1_answer = b_g1_inputs.wait()?;
|
|
||||||
b1_answer.add_assign(&b_g1_aux.wait()?);
|
|
||||||
let mut b2_answer = b_g2_inputs.wait()?;
|
|
||||||
b2_answer.add_assign(&b_g2_aux.wait()?);
|
|
||||||
|
|
||||||
g_b.add_assign(&b2_answer);
|
|
||||||
b1_answer.mul_assign(r);
|
|
||||||
g_c.add_assign(&b1_answer);
|
|
||||||
g_c.add_assign(&h.wait()?);
|
|
||||||
g_c.add_assign(&l.wait()?);
|
|
||||||
|
|
||||||
elog_verbose!("{} seconds for prover for point multiplication", stopwatch.elapsed());
|
|
||||||
|
|
||||||
Ok(Proof {
|
|
||||||
a: g_a.into_affine(),
|
|
||||||
b: g_b.into_affine(),
|
|
||||||
c: g_c.into_affine()
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
|
|
||||||
extern crate cfg_if;
|
extern crate cfg_if;
|
||||||
extern crate pairing_ce as pairing_import;
|
pub extern crate pairing;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate bit_vec;
|
extern crate bit_vec;
|
||||||
extern crate byteorder;
|
extern crate byteorder;
|
||||||
@ -43,10 +43,6 @@ cfg_if! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod pairing {
|
|
||||||
pub use pairing_import::*;
|
|
||||||
}
|
|
||||||
|
|
||||||
mod cs;
|
mod cs;
|
||||||
pub use self::cs::*;
|
pub use self::cs::*;
|
||||||
|
|
||||||
|
112
src/multiexp.rs
112
src/multiexp.rs
@ -68,7 +68,7 @@ fn multiexp_inner<Q, D, G, S>(
|
|||||||
let exponents = exponents.clone();
|
let exponents = exponents.clone();
|
||||||
let density_map = density_map.clone();
|
let density_map = density_map.clone();
|
||||||
|
|
||||||
// This looks like a Pippenger’s algorithm
|
// This is a Pippenger’s algorithm
|
||||||
pool.compute(move || {
|
pool.compute(move || {
|
||||||
// Accumulate the result
|
// Accumulate the result
|
||||||
let mut acc = G::Projective::zero();
|
let mut acc = G::Projective::zero();
|
||||||
@ -153,6 +153,116 @@ fn multiexp_inner<Q, D, G, S>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn multiexp_inner_with_prefetch<Q, D, G, S>(
|
||||||
|
pool: &Worker,
|
||||||
|
bases: S,
|
||||||
|
density_map: D,
|
||||||
|
exponents: Arc<Vec<<G::Scalar as PrimeField>::Repr>>,
|
||||||
|
mut skip: u32,
|
||||||
|
c: u32,
|
||||||
|
handle_trivial: bool
|
||||||
|
) -> Box<Future<Item=<G as CurveAffine>::Projective, Error=SynthesisError>>
|
||||||
|
where for<'a> &'a Q: QueryDensity,
|
||||||
|
D: Send + Sync + 'static + Clone + AsRef<Q>,
|
||||||
|
G: CurveAffine,
|
||||||
|
S: SourceBuilder<G>
|
||||||
|
{
|
||||||
|
// Perform this region of the multiexp
|
||||||
|
let this = {
|
||||||
|
let bases = bases.clone();
|
||||||
|
let exponents = exponents.clone();
|
||||||
|
let density_map = density_map.clone();
|
||||||
|
|
||||||
|
// This is a Pippenger’s algorithm
|
||||||
|
pool.compute(move || {
|
||||||
|
// Accumulate the result
|
||||||
|
let mut acc = G::Projective::zero();
|
||||||
|
|
||||||
|
// Build a source for the bases
|
||||||
|
let mut bases = bases.new();
|
||||||
|
|
||||||
|
// Create buckets to place remainders s mod 2^c,
|
||||||
|
// it will be 2^c - 1 buckets (no bucket for zeroes)
|
||||||
|
|
||||||
|
// Create space for the buckets
|
||||||
|
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
|
||||||
|
|
||||||
|
let zero = <G::Engine as ScalarEngine>::Fr::zero().into_repr();
|
||||||
|
let one = <G::Engine as ScalarEngine>::Fr::one().into_repr();
|
||||||
|
let padding = Arc::new(vec![zero]);
|
||||||
|
|
||||||
|
// Sort the bases into buckets
|
||||||
|
for ((&exp, &next_exp), density) in exponents.iter()
|
||||||
|
.zip(exponents.iter().skip(1).chain(padding.iter()))
|
||||||
|
.zip(density_map.as_ref().iter()) {
|
||||||
|
// no matter what happens - prefetch next bucket
|
||||||
|
|
||||||
|
// Go over density and exponents
|
||||||
|
if density {
|
||||||
|
if exp == zero {
|
||||||
|
bases.skip(1)?;
|
||||||
|
} else if exp == one {
|
||||||
|
if handle_trivial {
|
||||||
|
bases.add_assign_mixed(&mut acc)?;
|
||||||
|
} else {
|
||||||
|
bases.skip(1)?;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Place multiplication into the bucket: Separate s * P as
|
||||||
|
// (s/2^c) * P + (s mod 2^c) P
|
||||||
|
// First multiplication is c bits less, so one can do it,
|
||||||
|
// sum results from different buckets and double it c times,
|
||||||
|
// then add with (s mod 2^c) P parts
|
||||||
|
let mut exp = exp;
|
||||||
|
exp.shr(skip);
|
||||||
|
let exp = exp.as_ref()[0] % (1 << c);
|
||||||
|
|
||||||
|
if exp != 0 {
|
||||||
|
bases.add_assign_mixed(&mut buckets[(exp - 1) as usize])?;
|
||||||
|
} else {
|
||||||
|
bases.skip(1)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summation by parts
|
||||||
|
// e.g. 3a + 2b + 1c = a +
|
||||||
|
// (a) + b +
|
||||||
|
// ((a) + b) + c
|
||||||
|
let mut running_sum = G::Projective::zero();
|
||||||
|
for exp in buckets.into_iter().rev() {
|
||||||
|
running_sum.add_assign(&exp);
|
||||||
|
acc.add_assign(&running_sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(acc)
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
skip += c;
|
||||||
|
|
||||||
|
if skip >= <G::Engine as ScalarEngine>::Fr::NUM_BITS {
|
||||||
|
// There isn't another region.
|
||||||
|
Box::new(this)
|
||||||
|
} else {
|
||||||
|
// There's another region more significant. Calculate and join it with
|
||||||
|
// this region recursively.
|
||||||
|
Box::new(
|
||||||
|
this.join(multiexp_inner(pool, bases, density_map, exponents, skip, c, false))
|
||||||
|
.map(move |(this, mut higher)| {
|
||||||
|
for _ in 0..c {
|
||||||
|
higher.double();
|
||||||
|
}
|
||||||
|
|
||||||
|
higher.add_assign(&this);
|
||||||
|
|
||||||
|
higher
|
||||||
|
})
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Perform multi-exponentiation. The caller is responsible for ensuring the
|
/// Perform multi-exponentiation. The caller is responsible for ensuring the
|
||||||
/// query size is the same as the number of exponents.
|
/// query size is the same as the number of exponents.
|
||||||
pub fn multiexp<Q, D, G, S>(
|
pub fn multiexp<Q, D, G, S>(
|
||||||
|
@ -3,6 +3,7 @@ use crate::pairing::{
|
|||||||
CurveProjective,
|
CurveProjective,
|
||||||
CurveAffine,
|
CurveAffine,
|
||||||
GroupDecodingError,
|
GroupDecodingError,
|
||||||
|
RawEncodable,
|
||||||
EncodedPoint
|
EncodedPoint
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -90,6 +91,23 @@ impl Field for Fr {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl RawEncodable for Fr {
|
||||||
|
fn into_raw_uncompressed_le(&self) -> Self::Uncompressed {
|
||||||
|
Self::Uncompressed::empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_raw_uncompressed_le_unchecked(
|
||||||
|
_encoded: &Self::Uncompressed,
|
||||||
|
_infinity: bool
|
||||||
|
) -> Result<Self, GroupDecodingError> {
|
||||||
|
Ok(<Self as Field>::zero())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_raw_uncompressed_le(encoded: &Self::Uncompressed, _infinity: bool) -> Result<Self, GroupDecodingError> {
|
||||||
|
Self::from_raw_uncompressed_le_unchecked(&encoded, _infinity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SqrtField for Fr {
|
impl SqrtField for Fr {
|
||||||
fn legendre(&self) -> LegendreSymbol {
|
fn legendre(&self) -> LegendreSymbol {
|
||||||
// s = self^((r - 1) // 2)
|
// s = self^((r - 1) // 2)
|
||||||
@ -247,6 +265,14 @@ impl PrimeField for Fr {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn from_raw_repr(repr: FrRepr) -> Result<Self, PrimeFieldDecodingError> {
|
||||||
|
if repr.0[0] >= (MODULUS_R.0 as u64) {
|
||||||
|
Err(PrimeFieldDecodingError::NotInField(format!("{}", repr)))
|
||||||
|
} else {
|
||||||
|
Ok(Fr(Wrapping(repr.0[0] as u32)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn into_repr(&self) -> FrRepr {
|
fn into_repr(&self) -> FrRepr {
|
||||||
FrRepr::from(*self)
|
FrRepr::from(*self)
|
||||||
}
|
}
|
||||||
@ -255,6 +281,10 @@ impl PrimeField for Fr {
|
|||||||
Fr(MODULUS_R).into()
|
Fr(MODULUS_R).into()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn into_raw_repr(&self) -> FrRepr {
|
||||||
|
FrRepr::from(*self)
|
||||||
|
}
|
||||||
|
|
||||||
fn multiplicative_generator() -> Fr {
|
fn multiplicative_generator() -> Fr {
|
||||||
Fr(Wrapping(5))
|
Fr(Wrapping(5))
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user