From a99132955ae88701bf6729c60a950591ec60720b Mon Sep 17 00:00:00 2001 From: Alex Vlasov Date: Tue, 5 Mar 2019 09:44:38 +0100 Subject: [PATCH] fixed singlecore dense multiexp --- Cargo.toml | 2 +- src/multiexp.rs | 46 ++++++++++++++++++++-------------------------- src/singlecore.rs | 11 +++++------ 3 files changed, 26 insertions(+), 33 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 43d1f21..b45eb98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,4 +37,4 @@ default = ["multicore", "gm17", "sonic"] multicore = ["futures-cpupool", "num_cpus", "crossbeam"] sonic = ["tiny-keccak"] gm17 = [] -singlecore = ["futures-cpupool"] +singlecore = [] diff --git a/src/multiexp.rs b/src/multiexp.rs index 1e114d9..9fbec53 100644 --- a/src/multiexp.rs +++ b/src/multiexp.rs @@ -212,16 +212,18 @@ fn dense_multiexp_inner( handle_trivial: bool ) -> Result<::Projective, SynthesisError> { + use std::sync::{Mutex}; // Perform this region of the multiexp. We use a different strategy - go over region in parallel, // then over another region, etc. No Arc required let this = { // let mask = (1u64 << c) - 1u64; - let this_region = + let this_region = Mutex::new(::Projective::zero()); + let arc = Arc::new(this_region); pool.scope(bases.len(), |scope, chunk| { - let mut handles = vec![]; let mut this_acc = ::Projective::zero(); for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) { - let handle = + let this_region_rwlock = arc.clone(); + // let handle = scope.spawn(move |_| { let mut buckets = vec![::Projective::zero(); (1 << c) - 1]; // Accumulate the result @@ -261,33 +263,22 @@ fn dense_multiexp_inner( acc.add_assign(&running_sum); } - // acc contains values over this region - // s.send(acc).expect("must send result"); - - acc + let mut guard = match this_region_rwlock.lock() { + Ok(guard) => guard, + Err(poisoned) => { + panic!("poisoned!"); + // poisoned.into_inner() + } + }; + + (*guard).add_assign(&acc); }); - handles.push(handle); } - - // wait for all threads to finish - for r in handles.into_iter() { - let thread_result = r.join().unwrap(); - this_acc.add_assign(&thread_result); - } - - - this_acc }); - // let mut this_region = ::Projective::zero(); - // loop { - // if r.is_empty() { - // break; - // } - // let value = r.recv().expect("must have value"); - // this_region.add_assign(&value); - // } + let this_region = Arc::try_unwrap(arc).unwrap(); + let this_region = this_region.into_inner().unwrap(); this_region }; @@ -392,12 +383,15 @@ fn test_dense_multiexp() { use pairing::bn256::Bn256; use num_cpus; - const SAMPLES: usize = 1 << 22; + // const SAMPLES: usize = 1 << 22; + const SAMPLES: usize = 1 << 16; let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); let mut v = (0..SAMPLES).map(|_| ::Fr::rand(rng).into_repr()).collect::>(); let g = (0..SAMPLES).map(|_| ::G1::rand(rng).into_affine()).collect::>(); + println!("Done generating test points and scalars"); + let pool = Worker::new(); let start = std::time::Instant::now(); diff --git a/src/singlecore.rs b/src/singlecore.rs index e669ac6..df73c80 100644 --- a/src/singlecore.rs +++ b/src/singlecore.rs @@ -1,17 +1,15 @@ //! This is a dummy interface to substitute multicore worker //! in environments like WASM extern crate futures; -extern crate futures_cpupool; use std::marker::PhantomData; use self::futures::{Future, IntoFuture, Poll}; -use self::futures_cpupool::{CpuFuture, CpuPool}; +use self::futures::future::{result, FutureResult}; #[derive(Clone)] pub struct Worker { cpus: usize, - pool: CpuPool } impl Worker { @@ -21,7 +19,6 @@ impl Worker { pub(crate) fn new_with_cpus(cpus: usize) -> Worker { Worker { cpus: 1, - pool: CpuPool::new(1) } } @@ -42,8 +39,10 @@ impl Worker { R::Item: Send + 'static, R::Error: Send + 'static { + let future = f().into_future(); + WorkerFuture { - future: self.pool.spawn_fn(f) + future: result(future.wait()) } } @@ -80,7 +79,7 @@ impl<'a> Scope<'a> { } pub struct WorkerFuture { - future: CpuFuture + future: FutureResult } impl Future for WorkerFuture {