From 3e22a75c85787ef4a9df3eecf86e18e4b7930a92 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Tue, 3 Apr 2018 19:03:34 -0600 Subject: [PATCH 01/18] Initial commit --- .gitignore | 4 + COPYRIGHT | 14 ++ Cargo.toml | 14 ++ LICENSE-APACHE | 201 ++++++++++++++++++++++ LICENSE-MIT | 23 +++ README.md | 19 +++ examples/mimc.rs | 250 ++++++++++++++++++++++++++++ src/lib.rs | 425 +++++++++++++++++++++++++++++++++++++++++++++++ 8 files changed, 950 insertions(+) create mode 100644 .gitignore create mode 100644 COPYRIGHT create mode 100644 Cargo.toml create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT create mode 100644 README.md create mode 100644 examples/mimc.rs create mode 100644 src/lib.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1e98878 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +phase1* +/target/ +**/*.rs.bk +Cargo.lock diff --git a/COPYRIGHT b/COPYRIGHT new file mode 100644 index 0000000..3b6df59 --- /dev/null +++ b/COPYRIGHT @@ -0,0 +1,14 @@ +Copyrights in the "phase2" library are retained by their contributors. No +copyright assignment is required to contribute to the "phase2" library. + +The "phase2" library is licensed under either of + + * Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT) + +at your option. + +Unless you explicitly state otherwise, any contribution intentionally +submitted for inclusion in the work by you, as defined in the Apache-2.0 +license, shall be dual licensed as above, without any additional terms or +conditions. diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..cc31874 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "phase2" +version = "0.0.1" +authors = ["Sean Bowe "] +description = "zk-SNARK MPC" +documentation = "https://github.com/ebfull/phase2" +homepage = "https://github.com/ebfull/phase2" +license = "MIT/Apache-2.0" +repository = "https://github.com/ebfull/phase2" + +[dependencies] +pairing = "0.14" +rand = "0.4" +bellman = "0.1" diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 0000000..31aa793 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..74637de --- /dev/null +++ b/README.md @@ -0,0 +1,19 @@ +# phase2 [![Crates.io](https://img.shields.io/crates/v/phase2.svg)](https://crates.io/crates/phase2) # + +Under construction. ;) + +## License + +Licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally +submitted for inclusion in the work by you, as defined in the Apache-2.0 +license, shall be dual licensed as above, without any additional terms or +conditions. diff --git a/examples/mimc.rs b/examples/mimc.rs new file mode 100644 index 0000000..9405413 --- /dev/null +++ b/examples/mimc.rs @@ -0,0 +1,250 @@ +extern crate bellman; +extern crate pairing; +extern crate rand; +extern crate phase2; + +// For randomness (during paramgen and proof generation) +use rand::{thread_rng, Rng}; + +// For benchmarking +use std::time::{Duration, Instant}; + +// Bring in some tools for using pairing-friendly curves +use pairing::{ + Engine, + Field, +}; + +// We're going to use the BLS12-381 pairing-friendly elliptic curve. +use pairing::bls12_381::{ + Bls12 +}; + +// We'll use these interfaces to construct our circuit. +use bellman::{ + Circuit, + ConstraintSystem, + SynthesisError +}; + +// We're going to use the Groth16 proving system. +use bellman::groth16::{ + Proof, + prepare_verifying_key, + create_random_proof, + verify_proof, +}; + +const MIMC_ROUNDS: usize = 322; + +/// This is an implementation of MiMC, specifically a +/// variant named `LongsightF322p3` for BLS12-381. +/// See http://eprint.iacr.org/2016/492 for more +/// information about this construction. +/// +/// ``` +/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) { +/// for i from 0 up to 321 { +/// xL, xR := xR + (xL + Ci)^3, xL +/// } +/// return xL +/// } +/// ``` +fn mimc( + mut xl: E::Fr, + mut xr: E::Fr, + constants: &[E::Fr] +) -> E::Fr +{ + assert_eq!(constants.len(), MIMC_ROUNDS); + + for i in 0..MIMC_ROUNDS { + let mut tmp1 = xl; + tmp1.add_assign(&constants[i]); + let mut tmp2 = tmp1; + tmp2.square(); + tmp2.mul_assign(&tmp1); + tmp2.add_assign(&xr); + xr = xl; + xl = tmp2; + } + + xl +} + +/// This is our demo circuit for proving knowledge of the +/// preimage of a MiMC hash invocation. +struct MiMCDemo<'a, E: Engine> { + xl: Option, + xr: Option, + constants: &'a [E::Fr] +} + +/// Our demo circuit implements this `Circuit` trait which +/// is used during paramgen and proving in order to +/// synthesize the constraint system. +impl<'a, E: Engine> Circuit for MiMCDemo<'a, E> { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + assert_eq!(self.constants.len(), MIMC_ROUNDS); + + // Allocate the first component of the preimage. + let mut xl_value = self.xl; + let mut xl = cs.alloc(|| "preimage xl", || { + xl_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + // Allocate the second component of the preimage. + let mut xr_value = self.xr; + let mut xr = cs.alloc(|| "preimage xr", || { + xr_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + for i in 0..MIMC_ROUNDS { + // xL, xR := xR + (xL + Ci)^3, xL + let cs = &mut cs.namespace(|| format!("round {}", i)); + + // tmp = (xL + Ci)^2 + let mut tmp_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.square(); + e + }); + let mut tmp = cs.alloc(|| "tmp", || { + tmp_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + cs.enforce( + || "tmp = (xL + Ci)^2", + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + tmp + ); + + // new_xL = xR + (xL + Ci)^3 + // new_xL = xR + tmp * (xL + Ci) + // new_xL - xR = tmp * (xL + Ci) + let mut new_xl_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.mul_assign(&tmp_value.unwrap()); + e.add_assign(&xr_value.unwrap()); + e + }); + + let mut new_xl = if i == (MIMC_ROUNDS-1) { + // This is the last round, xL is our image and so + // we allocate a public input. + cs.alloc_input(|| "image", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + } else { + cs.alloc(|| "new_xl", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + }; + + cs.enforce( + || "new_xL = xR + (xL + Ci)^3", + |lc| lc + tmp, + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + new_xl - xr + ); + + // xR = xL + xr = xl; + xr_value = xl_value; + + // xL = new_xL + xl = new_xl; + xl_value = new_xl_value; + } + + Ok(()) + } +} + +fn main() { + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut thread_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + + println!("Creating parameters..."); + + // Create parameters for our circuit + let params = { + let c = MiMCDemo:: { + xl: None, + xr: None, + constants: &constants + }; + + phase2::new_parameters(c).unwrap() + }; + + // Prepare the verification key (for proof verification) + let pvk = prepare_verifying_key(¶ms.vk); + + println!("Creating proofs..."); + + // Let's benchmark stuff! + const SAMPLES: u32 = 50; + let mut total_proving = Duration::new(0, 0); + let mut total_verifying = Duration::new(0, 0); + + // Just a place to put the proof data, so we can + // benchmark deserialization. + let mut proof_vec = vec![]; + + for _ in 0..SAMPLES { + // Generate a random preimage and compute the image + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + proof_vec.truncate(0); + + let start = Instant::now(); + { + // Create an instance of our circuit (with the + // witness) + let c = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants + }; + + // Create a groth16 proof with our parameters. + let proof = create_random_proof(c, ¶ms, rng).unwrap(); + + proof.write(&mut proof_vec).unwrap(); + } + + total_proving += start.elapsed(); + + let start = Instant::now(); + let proof = Proof::read(&proof_vec[..]).unwrap(); + // Check the proof + assert!(verify_proof( + &pvk, + &proof, + &[image] + ).unwrap()); + total_verifying += start.elapsed(); + } + let proving_avg = total_proving / SAMPLES; + let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (proving_avg.as_secs() as f64); + + let verifying_avg = total_verifying / SAMPLES; + let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (verifying_avg.as_secs() as f64); + + println!("Average proving time: {:?} seconds", proving_avg); + println!("Average verifying time: {:?} seconds", verifying_avg); +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..523417b --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,425 @@ +extern crate pairing; +extern crate bellman; + +use std::{ + io::{ + self, + Read, + BufReader + }, + fs::{ + File + }, + sync::{ + Arc + } +}; + +use pairing::{ + Engine, + Field, + EncodedPoint, + CurveAffine, + CurveProjective, + bls12_381::{ + Bls12, + Fr, + G1, + G2, + G1Affine, + G1Uncompressed, + G2Affine, + G2Uncompressed + } +}; + +use bellman::{ + Circuit, + SynthesisError, + Variable, + Index, + ConstraintSystem, + LinearCombination, + groth16::{ + Parameters, + VerifyingKey + }, + multicore::Worker +}; + +/// This is our assembly structure that we'll use to synthesize the +/// circuit into a QAP. +struct KeypairAssembly { + num_inputs: usize, + num_aux: usize, + num_constraints: usize, + at_inputs: Vec>, + bt_inputs: Vec>, + ct_inputs: Vec>, + at_aux: Vec>, + bt_aux: Vec>, + ct_aux: Vec> +} + +impl ConstraintSystem for KeypairAssembly { + type Root = Self; + + fn alloc( + &mut self, + _: A, + _: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_aux; + self.num_aux += 1; + + self.at_aux.push(vec![]); + self.bt_aux.push(vec![]); + self.ct_aux.push(vec![]); + + Ok(Variable::new_unchecked(Index::Aux(index))) + } + + fn alloc_input( + &mut self, + _: A, + _: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_inputs; + self.num_inputs += 1; + + self.at_inputs.push(vec![]); + self.bt_inputs.push(vec![]); + self.ct_inputs.push(vec![]); + + Ok(Variable::new_unchecked(Index::Input(index))) + } + + fn enforce( + &mut self, + _: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + fn eval( + l: LinearCombination, + inputs: &mut [Vec<(E::Fr, usize)>], + aux: &mut [Vec<(E::Fr, usize)>], + this_constraint: usize + ) + { + for &(var, coeff) in l.as_ref() { + match var.get_unchecked() { + Index::Input(id) => inputs[id].push((coeff, this_constraint)), + Index::Aux(id) => aux[id].push((coeff, this_constraint)) + } + } + } + + eval(a(LinearCombination::zero()), &mut self.at_inputs, &mut self.at_aux, self.num_constraints); + eval(b(LinearCombination::zero()), &mut self.bt_inputs, &mut self.bt_aux, self.num_constraints); + eval(c(LinearCombination::zero()), &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints); + + self.num_constraints += 1; + } + + fn push_namespace(&mut self, _: N) + where NR: Into, N: FnOnce() -> NR + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) + { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + +pub fn new_parameters( + circuit: C, +) -> Result, SynthesisError> + where C: Circuit +{ + let mut assembly = KeypairAssembly { + num_inputs: 0, + num_aux: 0, + num_constraints: 0, + at_inputs: vec![], + bt_inputs: vec![], + ct_inputs: vec![], + at_aux: vec![], + bt_aux: vec![], + ct_aux: vec![] + }; + + // Allocate the "one" input variable + assembly.alloc_input(|| "", || Ok(Fr::one()))?; + + // Synthesize the circuit. + circuit.synthesize(&mut assembly)?; + + // Input constraints to ensure full density of IC query + // x * 0 = 0 + for i in 0..assembly.num_inputs { + assembly.enforce(|| "", + |lc| lc + Variable::new_unchecked(Index::Input(i)), + |lc| lc, + |lc| lc, + ); + } + + // Compute the size of our evaluation domain + let mut m = 1; + let mut exp = 0; + while m < assembly.num_constraints { + m *= 2; + exp += 1; + + // Powers of Tau ceremony can't support more than 2^21 + if exp > 21 { + return Err(SynthesisError::PolynomialDegreeTooLarge) + } + } + + // Try to load "phase1radix2m{}" + let f = match File::open(format!("phase1radix2m{}", exp)) { + Ok(f) => f, + Err(e) => { + panic!("Couldn't load phase1radix2m{}: {:?}", exp, e); + } + }; + let f = &mut BufReader::with_capacity(1024 * 1024, f); + + let read_g1 = |reader: &mut BufReader| -> io::Result { + let mut repr = G1Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + repr.into_affine_unchecked() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let read_g2 = |reader: &mut BufReader| -> io::Result { + let mut repr = G2Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + repr.into_affine_unchecked() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let alpha = read_g1(f)?; + let beta_g1 = read_g1(f)?; + let beta_g2 = read_g2(f)?; + + let mut coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + coeffs_g1.push(read_g1(f)?); + } + + let mut coeffs_g2 = Vec::with_capacity(m); + for _ in 0..m { + coeffs_g2.push(read_g2(f)?); + } + + let mut alpha_coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + alpha_coeffs_g1.push(read_g1(f)?); + } + + let mut beta_coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + beta_coeffs_g1.push(read_g1(f)?); + } + + // These are `Arc` so that later it'll be easier + // to use multiexp during QAP evaluation (which + // requires a futures-based API) + let coeffs_g1 = Arc::new(coeffs_g1); + let coeffs_g2 = Arc::new(coeffs_g2); + let alpha_coeffs_g1 = Arc::new(alpha_coeffs_g1); + let beta_coeffs_g1 = Arc::new(beta_coeffs_g1); + + let mut h = Vec::with_capacity(m - 1); + for _ in 0..(m - 1) { + h.push(read_g1(f)?); + } + + let mut ic = vec![G1::zero(); assembly.num_inputs]; + let mut l = vec![G1::zero(); assembly.num_aux]; + let mut a_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux]; + let mut b_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux]; + let mut b_g2 = vec![G2::zero(); assembly.num_inputs + assembly.num_aux]; + + fn eval( + // Lagrange coefficients for tau + coeffs_g1: Arc>, + coeffs_g2: Arc>, + alpha_coeffs_g1: Arc>, + beta_coeffs_g1: Arc>, + + // QAP polynomials + at: &[Vec<(Fr, usize)>], + bt: &[Vec<(Fr, usize)>], + ct: &[Vec<(Fr, usize)>], + + // Resulting evaluated QAP polynomials + a_g1: &mut [G1], + b_g1: &mut [G1], + b_g2: &mut [G2], + ext: &mut [G1], + + // Worker + worker: &Worker + ) + { + // Sanity check + assert_eq!(a_g1.len(), at.len()); + assert_eq!(a_g1.len(), bt.len()); + assert_eq!(a_g1.len(), ct.len()); + assert_eq!(a_g1.len(), b_g1.len()); + assert_eq!(a_g1.len(), b_g2.len()); + assert_eq!(a_g1.len(), ext.len()); + + // Evaluate polynomials in multiple threads + worker.scope(a_g1.len(), |scope, chunk| { + for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in + a_g1.chunks_mut(chunk) + .zip(b_g1.chunks_mut(chunk)) + .zip(b_g2.chunks_mut(chunk)) + .zip(ext.chunks_mut(chunk)) + .zip(at.chunks(chunk)) + .zip(bt.chunks(chunk)) + .zip(ct.chunks(chunk)) + { + let coeffs_g1 = coeffs_g1.clone(); + let coeffs_g2 = coeffs_g2.clone(); + let alpha_coeffs_g1 = alpha_coeffs_g1.clone(); + let beta_coeffs_g1 = beta_coeffs_g1.clone(); + + scope.spawn(move || { + for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in + a_g1.iter_mut() + .zip(b_g1.iter_mut()) + .zip(b_g2.iter_mut()) + .zip(ext.iter_mut()) + .zip(at.iter()) + .zip(bt.iter()) + .zip(ct.iter()) + { + for &(coeff, lag) in at { + a_g1.add_assign(&coeffs_g1[lag].mul(coeff)); + ext.add_assign(&beta_coeffs_g1[lag].mul(coeff)); + } + + for &(coeff, lag) in bt { + b_g1.add_assign(&coeffs_g1[lag].mul(coeff)); + b_g2.add_assign(&coeffs_g2[lag].mul(coeff)); + ext.add_assign(&alpha_coeffs_g1[lag].mul(coeff)); + } + + for &(coeff, lag) in ct { + ext.add_assign(&coeffs_g1[lag].mul(coeff)); + } + } + + // Batch normalize + G1::batch_normalization(a_g1); + G1::batch_normalization(b_g1); + G2::batch_normalization(b_g2); + G1::batch_normalization(ext); + }); + } + }); + } + + let worker = Worker::new(); + + // Evaluate for inputs. + eval( + coeffs_g1.clone(), + coeffs_g2.clone(), + alpha_coeffs_g1.clone(), + beta_coeffs_g1.clone(), + &assembly.at_inputs, + &assembly.bt_inputs, + &assembly.ct_inputs, + &mut a_g1[0..assembly.num_inputs], + &mut b_g1[0..assembly.num_inputs], + &mut b_g2[0..assembly.num_inputs], + &mut ic, + &worker + ); + + // Evaluate for auxillary variables. + eval( + coeffs_g1.clone(), + coeffs_g2.clone(), + alpha_coeffs_g1.clone(), + beta_coeffs_g1.clone(), + &assembly.at_aux, + &assembly.bt_aux, + &assembly.ct_aux, + &mut a_g1[assembly.num_inputs..], + &mut b_g1[assembly.num_inputs..], + &mut b_g2[assembly.num_inputs..], + &mut l, + &worker + ); + + // Don't allow any elements be unconstrained, so that + // the L query is always fully dense. + for e in l.iter() { + if e.is_zero() { + return Err(SynthesisError::UnconstrainedVariable); + } + } + + let vk = VerifyingKey { + alpha_g1: alpha, + beta_g1: beta_g1, + beta_g2: beta_g2, + gamma_g2: G2Affine::one(), + delta_g1: G1Affine::one(), + delta_g2: G2Affine::one(), + ic: ic.into_iter().map(|e| e.into_affine()).collect() + }; + + Ok(Parameters { + vk: vk, + h: Arc::new(h), + l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()), + + // Filter points at infinity away from A/B queries + a: Arc::new(a_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()) + }) +} From e11d51354fa383dabb2cdce5c74d63da337b27fe Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Thu, 5 Apr 2018 12:59:00 -0600 Subject: [PATCH 02/18] MPCParameters demonstration. --- Cargo.toml | 6 + examples/mimc.rs | 18 +- src/lib.rs | 451 ++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 471 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cc31874..8d74dfb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,3 +12,9 @@ repository = "https://github.com/ebfull/phase2" pairing = "0.14" rand = "0.4" bellman = "0.1" +byteorder = "1" +blake2 = "0.6.1" +num_cpus = "1" +crossbeam = "0.3" +generic-array = "0.8.3" +typenum = "1.9.0" diff --git a/examples/mimc.rs b/examples/mimc.rs index 9405413..25c58b0 100644 --- a/examples/mimc.rs +++ b/examples/mimc.rs @@ -177,7 +177,7 @@ fn main() { println!("Creating parameters..."); // Create parameters for our circuit - let params = { + let mut params = { let c = MiMCDemo:: { xl: None, xr: None, @@ -187,6 +187,20 @@ fn main() { phase2::new_parameters(c).unwrap() }; + let old_params = params.clone(); + let (pubkey, privkey) = phase2::keypair(rng, ¶ms); + params.transform(&pubkey, &privkey); + + assert!(phase2::verify_transform(&old_params, ¶ms)); + + let old_params = params.clone(); + let (pubkey, privkey) = phase2::keypair(rng, ¶ms); + params.transform(&pubkey, &privkey); + + assert!(phase2::verify_transform(&old_params, ¶ms)); + + let params = params.params(); + // Prepare the verification key (for proof verification) let pvk = prepare_verifying_key(¶ms.vk); @@ -220,7 +234,7 @@ fn main() { }; // Create a groth16 proof with our parameters. - let proof = create_random_proof(c, ¶ms, rng).unwrap(); + let proof = create_random_proof(c, params, rng).unwrap(); proof.write(&mut proof_vec).unwrap(); } diff --git a/src/lib.rs b/src/lib.rs index 523417b..5182a60 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,27 @@ extern crate pairing; extern crate bellman; +extern crate rand; +extern crate byteorder; +extern crate blake2; +extern crate num_cpus; +extern crate crossbeam; +extern crate generic_array; +extern crate typenum; + +use blake2::{Blake2b, Digest}; +use generic_array::GenericArray; +use typenum::consts::U64; + +use byteorder::{ + BigEndian, + ReadBytesExt +}; use std::{ io::{ self, Read, + Write, BufReader }, fs::{ @@ -17,10 +34,12 @@ use std::{ use pairing::{ Engine, + PrimeField, Field, EncodedPoint, CurveAffine, CurveProjective, + Wnaf, bls12_381::{ Bls12, Fr, @@ -47,6 +66,13 @@ use bellman::{ multicore::Worker }; +use rand::{ + Rng, + Rand, + ChaChaRng, + SeedableRng +}; + /// This is our assembly structure that we'll use to synthesize the /// circuit into a QAP. struct KeypairAssembly { @@ -156,7 +182,7 @@ impl ConstraintSystem for KeypairAssembly { pub fn new_parameters( circuit: C, -) -> Result, SynthesisError> +) -> Result where C: Circuit { let mut assembly = KeypairAssembly { @@ -412,7 +438,7 @@ pub fn new_parameters( ic: ic.into_iter().map(|e| e.into_affine()).collect() }; - Ok(Parameters { + let params = Parameters { vk: vk, h: Arc::new(h), l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()), @@ -421,5 +447,426 @@ pub fn new_parameters( a: Arc::new(a_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()) + }; + + let h = { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + + params.write(&mut sink).unwrap(); + + sink.into_hash() + }; + + let mut cs_hash = [0; 64]; + cs_hash.copy_from_slice(h.as_ref()); + + Ok(MPCParameters { + params: params, + cs_hash: cs_hash, + contributions: vec![] }) } + +/// MPC parameters are just like bellman `Parameters` except, when serialized, +/// they contain a transcript of contributions at the end, which can be verified. +#[derive(Clone)] +pub struct MPCParameters { + params: Parameters, + cs_hash: [u8; 64], + contributions: Vec +} + +impl MPCParameters { + pub fn params(&self) -> &Parameters { + &self.params + } + + pub fn transform( + &mut self, + pubkey: &PublicKey, + privkey: &PrivateKey + ) + { + fn batch_exp(bases: &mut [C], coeff: C::Scalar) { + let coeff = coeff.into_repr(); + + let mut projective = vec![C::Projective::zero(); bases.len()]; + let cpus = num_cpus::get(); + let chunk_size = if bases.len() < cpus { + 1 + } else { + bases.len() / cpus + }; + + // Perform wNAF over multiple cores, placing results into `projective`. + crossbeam::scope(|scope| { + for (bases, projective) in bases.chunks_mut(chunk_size) + .zip(projective.chunks_mut(chunk_size)) + { + scope.spawn(move || { + let mut wnaf = Wnaf::new(); + + for (base, projective) in bases.iter_mut() + .zip(projective.iter_mut()) + { + *projective = wnaf.base(base.into_projective(), 1).scalar(coeff); + } + }); + } + }); + + // Perform batch normalization + crossbeam::scope(|scope| { + for projective in projective.chunks_mut(chunk_size) + { + scope.spawn(move || { + C::Projective::batch_normalization(projective); + }); + } + }); + + // Turn it all back into affine points + for (projective, affine) in projective.iter().zip(bases.iter_mut()) { + *affine = projective.into_affine(); + } + } + + let delta_inv = privkey.delta.inverse().unwrap(); + let mut l = (&self.params.l[..]).to_vec(); + let mut h = (&self.params.h[..]).to_vec(); + batch_exp(&mut l, delta_inv); + batch_exp(&mut h, delta_inv); + self.params.l = Arc::new(l); + self.params.h = Arc::new(h); + + self.params.vk.delta_g1 = self.params.vk.delta_g1.mul(privkey.delta).into_affine(); + self.params.vk.delta_g2 = self.params.vk.delta_g2.mul(privkey.delta).into_affine(); + + self.contributions.push(pubkey.clone()); + } +} + +#[derive(Clone)] +pub struct PublicKey { + /// This is the delta (in G1) after the transformation, kept so that we + /// can check correctness of the public keys without having the entire + /// interstitial parameters for each contribution. + delta_after: G1Affine, + + /// Random element chosen by the contributor. + s: G1Affine, + + /// That element, taken to the contributor's secret delta. + s_delta: G1Affine, + + /// r is H(last_pubkey | s | s_delta), r_delta proves knowledge of delta + r_delta: G2Affine, + + /// Hash of the transcript (used for mapping to r) + transcript: [u8; 64], +} + +impl PartialEq for PublicKey { + fn eq(&self, other: &PublicKey) -> bool { + self.delta_after == other.delta_after && + self.s == other.s && + self.s_delta == other.s_delta && + self.r_delta == other.r_delta && + &self.transcript[..] == &other.transcript[..] + } +} + +pub fn verify_transform( + before: &MPCParameters, + after: &MPCParameters +) -> bool +{ + // Parameter size doesn't change! + if before.params.vk.ic.len() != after.params.vk.ic.len() { + return false; + } + + if before.params.h.len() != after.params.h.len() { + return false; + } + + if before.params.l.len() != after.params.l.len() { + return false; + } + + if before.params.a.len() != after.params.a.len() { + return false; + } + + if before.params.b_g1.len() != after.params.b_g1.len() { + return false; + } + + if before.params.b_g2.len() != after.params.b_g2.len() { + return false; + } + + // IC shouldn't change at all, since gamma = 1 + if before.params.vk.ic != after.params.vk.ic { + return false; + } + + // Transformations involve a single new contribution + if after.contributions.len() != (before.contributions.len() + 1) { + return false; + } + + // All of the previous pubkeys should be the same + if &before.contributions[..] != &after.contributions[0..before.contributions.len()] { + return false; + } + + let pubkey = after.contributions.last().unwrap(); + + // The new pubkey's claimed value of delta should match the + // parameters + if pubkey.delta_after != after.params.vk.delta_g1 { + return false; + } + + // The `cs_hash` should not change. It's initialized at the beginning + if &before.cs_hash[..] != &after.cs_hash[..] { + return false; + } + + // H(cs_hash | | s | s_delta) + let h = { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + + sink.write_all(&before.cs_hash[..]).unwrap(); + for pubkey in &before.contributions { + sink.write_all(pubkey.delta_after.into_uncompressed().as_ref()).unwrap(); + } + sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap(); + sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap(); + + sink.into_hash() + }; + + // The transcript must be consistent + if &pubkey.transcript[..] != h.as_ref() { + return false; + } + + let r = hash_to_g2(h.as_ref()).into_affine(); + + // Check the signature of knowledge + if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) { + return false; + } + + // Check the change from the old delta is consistent + if !same_ratio( + (before.params.vk.delta_g1, after.params.vk.delta_g1), + (r, pubkey.r_delta) + ) { + return false; + } + + if !same_ratio( + (before.params.vk.delta_g2, after.params.vk.delta_g2), + (pubkey.s, pubkey.s_delta) + ) { + return false; + } + + // H and L queries should be updated + if !same_ratio( + merge_pairs(&before.params.h, &after.params.h), + (pubkey.r_delta, r) // reversed for inverse + ) { + return false; + } + + if !same_ratio( + merge_pairs(&before.params.l, &after.params.l), + (pubkey.r_delta, r) // reversed for inverse + ) { + return false; + } + + true +} + +/// Checks if pairs have the same ratio. +fn same_ratio( + g1: (G1, G1), + g2: (G1::Pair, G1::Pair) +) -> bool +{ + g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0) +} + +/// Computes a random linear combination over v1/v2. +/// +/// Checking that many pairs of elements are exponentiated by +/// the same `x` can be achieved (with high probability) with +/// the following technique: +/// +/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute +/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some +/// random r1, r2, r3. Given (g, g^s)... +/// +/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3) +/// +/// ... with high probability. +fn merge_pairs(v1: &[G], v2: &[G]) -> (G, G) +{ + use std::sync::{Arc, Mutex}; + use rand::{thread_rng}; + + assert_eq!(v1.len(), v2.len()); + + let chunk = (v1.len() / num_cpus::get()) + 1; + + let s = Arc::new(Mutex::new(G::Projective::zero())); + let sx = Arc::new(Mutex::new(G::Projective::zero())); + + crossbeam::scope(|scope| { + for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) { + let s = s.clone(); + let sx = sx.clone(); + + scope.spawn(move || { + // We do not need to be overly cautious of the RNG + // used for this check. + let rng = &mut thread_rng(); + + let mut wnaf = Wnaf::new(); + let mut local_s = G::Projective::zero(); + let mut local_sx = G::Projective::zero(); + + for (v1, v2) in v1.iter().zip(v2.iter()) { + let rho = G::Scalar::rand(rng); + let mut wnaf = wnaf.scalar(rho.into_repr()); + let v1 = wnaf.base(v1.into_projective()); + let v2 = wnaf.base(v2.into_projective()); + + local_s.add_assign(&v1); + local_sx.add_assign(&v2); + } + + s.lock().unwrap().add_assign(&local_s); + sx.lock().unwrap().add_assign(&local_sx); + }); + } + }); + + let s = s.lock().unwrap().into_affine(); + let sx = sx.lock().unwrap().into_affine(); + + (s, sx) +} + +pub struct PrivateKey { + delta: Fr +} + +pub fn keypair( + rng: &mut R, + current: &MPCParameters, +) -> (PublicKey, PrivateKey) +{ + // Sample random delta + let delta: Fr = rng.gen(); + + // Compute delta s-pair in G1 + let s = G1::rand(rng).into_affine(); + let s_delta = s.mul(delta).into_affine(); + + // H(cs_hash | | s | s_delta) + let h = { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + + sink.write_all(¤t.cs_hash[..]).unwrap(); + for pubkey in ¤t.contributions { + sink.write_all(pubkey.delta_after.into_uncompressed().as_ref()).unwrap(); + } + sink.write_all(s.into_uncompressed().as_ref()).unwrap(); + sink.write_all(s_delta.into_uncompressed().as_ref()).unwrap(); + + sink.into_hash() + }; + + // This avoids making a weird assumption about the hash into the + // group. + let mut transcript = [0; 64]; + transcript.copy_from_slice(h.as_ref()); + + // Compute delta s-pair in G2 + let r = hash_to_g2(h.as_ref()).into_affine(); + let r_delta = r.mul(delta).into_affine(); + + ( + PublicKey { + delta_after: current.params.vk.delta_g1.mul(delta).into_affine(), + s: s, + s_delta: s_delta, + r_delta: r_delta, + transcript: transcript + }, + PrivateKey { + delta: delta + } + ) +} + +/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less +/// than 32 bytes. +fn hash_to_g2(mut digest: &[u8]) -> G2 +{ + assert!(digest.len() >= 32); + + let mut seed = Vec::with_capacity(8); + + for _ in 0..8 { + seed.push(digest.read_u32::().expect("assertion above guarantees this to work")); + } + + ChaChaRng::from_seed(&seed).gen() +} + +/// Abstraction over a writer which hashes the data being written. +pub struct HashWriter { + writer: W, + hasher: Blake2b +} + +impl HashWriter { + /// Construct a new `HashWriter` given an existing `writer` by value. + pub fn new(writer: W) -> Self { + HashWriter { + writer: writer, + hasher: Blake2b::default() + } + } + + /// Destroy this writer and return the hash of what was written. + pub fn into_hash(self) -> GenericArray { + self.hasher.result() + } +} + +impl Write for HashWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + let bytes = self.writer.write(buf)?; + + if bytes > 0 { + self.hasher.input(&buf[0..bytes]); + } + + Ok(bytes) + } + + fn flush(&mut self) -> io::Result<()> { + self.writer.flush() + } +} From a94574436d4db28528559cdfdae9911e265019c0 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Thu, 5 Apr 2018 13:48:37 -0600 Subject: [PATCH 03/18] Serialization of MPCParameters and PublicKey. --- examples/mimc.rs | 16 +++++++ src/lib.rs | 108 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 123 insertions(+), 1 deletion(-) diff --git a/examples/mimc.rs b/examples/mimc.rs index 25c58b0..f91641f 100644 --- a/examples/mimc.rs +++ b/examples/mimc.rs @@ -191,6 +191,14 @@ fn main() { let (pubkey, privkey) = phase2::keypair(rng, ¶ms); params.transform(&pubkey, &privkey); + { + let mut w = vec![]; + pubkey.write(&mut w).unwrap(); + + let deser = phase2::PublicKey::read(&w[..]).unwrap(); + assert!(pubkey == deser); + } + assert!(phase2::verify_transform(&old_params, ¶ms)); let old_params = params.clone(); @@ -199,6 +207,14 @@ fn main() { assert!(phase2::verify_transform(&old_params, ¶ms)); + { + let mut w = vec![]; + params.write(&mut w).unwrap(); + + let deser = phase2::MPCParameters::read(&w[..], true).unwrap(); + assert!(params == deser); + } + let params = params.params(); // Prepare the verification key (for proof verification) diff --git a/src/lib.rs b/src/lib.rs index 5182a60..23a9422 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,7 +14,8 @@ use typenum::consts::U64; use byteorder::{ BigEndian, - ReadBytesExt + ReadBytesExt, + WriteBytesExt }; use std::{ @@ -477,7 +478,53 @@ pub struct MPCParameters { contributions: Vec } +impl PartialEq for MPCParameters { + fn eq(&self, other: &MPCParameters) -> bool { + self.params == other.params && + &self.cs_hash[..] == &other.cs_hash[..] && + self.contributions == other.contributions + } +} + impl MPCParameters { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + self.params.write(&mut writer)?; + writer.write_all(&self.cs_hash)?; + + writer.write_u32::(self.contributions.len() as u32)?; + for pubkey in &self.contributions { + pubkey.write(&mut writer)?; + } + + Ok(()) + } + + pub fn read( + mut reader: R, + checked: bool + ) -> io::Result + { + let params = Parameters::read(&mut reader, checked)?; + + let mut cs_hash = [0u8; 64]; + reader.read_exact(&mut cs_hash)?; + + let contributions_len = reader.read_u32::()? as usize; + + let mut contributions = vec![]; + for _ in 0..contributions_len { + contributions.push(PublicKey::read(&mut reader)?); + } + + Ok(MPCParameters { + params, cs_hash, contributions + }) + } + pub fn params(&self) -> &Parameters { &self.params } @@ -567,6 +614,65 @@ pub struct PublicKey { transcript: [u8; 64], } +impl PublicKey { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + writer.write_all(self.delta_after.into_uncompressed().as_ref())?; + writer.write_all(self.s.into_uncompressed().as_ref())?; + writer.write_all(self.s_delta.into_uncompressed().as_ref())?; + writer.write_all(self.r_delta.into_uncompressed().as_ref())?; + writer.write_all(&self.transcript)?; + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g1_repr = G1Uncompressed::empty(); + let mut g2_repr = G2Uncompressed::empty(); + + reader.read_exact(g1_repr.as_mut())?; + let delta_after = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if delta_after.is_zero() { + return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")); + } + + reader.read_exact(g1_repr.as_mut())?; + let s = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if s.is_zero() { + return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")); + } + + reader.read_exact(g1_repr.as_mut())?; + let s_delta = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if s_delta.is_zero() { + return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")); + } + + reader.read_exact(g2_repr.as_mut())?; + let r_delta = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + if r_delta.is_zero() { + return Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")); + } + + let mut transcript = [0u8; 64]; + reader.read_exact(&mut transcript)?; + + Ok(PublicKey { + delta_after, s, s_delta, r_delta, transcript + }) + } +} + impl PartialEq for PublicKey { fn eq(&self, other: &PublicKey) -> bool { self.delta_after == other.delta_after && From 462a681a1da6faea3788813c95af1a096399ea28 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Thu, 5 Apr 2018 15:13:29 -0600 Subject: [PATCH 04/18] Verify transformations using a full verifier --- examples/mimc.rs | 12 ++- src/lib.rs | 266 ++++++++++++++++++++++++++++------------------- 2 files changed, 168 insertions(+), 110 deletions(-) diff --git a/examples/mimc.rs b/examples/mimc.rs index f91641f..0f0d2a4 100644 --- a/examples/mimc.rs +++ b/examples/mimc.rs @@ -199,13 +199,21 @@ fn main() { assert!(pubkey == deser); } - assert!(phase2::verify_transform(&old_params, ¶ms)); + phase2::verify_transform(MiMCDemo:: { + xl: None, + xr: None, + constants: &constants + }, &old_params, ¶ms).unwrap(); let old_params = params.clone(); let (pubkey, privkey) = phase2::keypair(rng, ¶ms); params.transform(&pubkey, &privkey); - assert!(phase2::verify_transform(&old_params, ¶ms)); + phase2::verify_transform(MiMCDemo:: { + xl: None, + xr: None, + constants: &constants + }, &old_params, ¶ms).unwrap(); { let mut w = vec![]; diff --git a/src/lib.rs b/src/lib.rs index 23a9422..b29c330 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -533,7 +533,7 @@ impl MPCParameters { &mut self, pubkey: &PublicKey, privkey: &PrivateKey - ) + ) -> [u8; 64] { fn batch_exp(bases: &mut [C], coeff: C::Scalar) { let coeff = coeff.into_repr(); @@ -591,6 +591,146 @@ impl MPCParameters { self.params.vk.delta_g2 = self.params.vk.delta_g2.mul(privkey.delta).into_affine(); self.contributions.push(pubkey.clone()); + + { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + pubkey.write(&mut sink).unwrap(); + let h = sink.into_hash(); + let mut response = [0u8; 64]; + response.copy_from_slice(h.as_ref()); + response + } + } + + pub fn verify>( + &self, + circuit: C + ) -> Result, ()> + { + let initial_params = new_parameters(circuit).map_err(|_| ())?; + + // H/L will change, but should have same length + if initial_params.params.h.len() != self.params.h.len() { + return Err(()); + } + if initial_params.params.l.len() != self.params.l.len() { + return Err(()); + } + + // A/B_G1/B_G2 doesn't change at all + if initial_params.params.a != self.params.a { + return Err(()); + } + if initial_params.params.b_g1 != self.params.b_g1 { + return Err(()); + } + if initial_params.params.b_g2 != self.params.b_g2 { + return Err(()); + } + + // alpha/beta/gamma don't change + if initial_params.params.vk.alpha_g1 != self.params.vk.alpha_g1 { + return Err(()); + } + if initial_params.params.vk.beta_g1 != self.params.vk.beta_g1 { + return Err(()); + } + if initial_params.params.vk.beta_g2 != self.params.vk.beta_g2 { + return Err(()); + } + if initial_params.params.vk.gamma_g2 != self.params.vk.gamma_g2 { + return Err(()); + } + + // IC shouldn't change, as gamma doesn't change + if initial_params.params.vk.ic != self.params.vk.ic { + return Err(()); + } + + // cs_hash should be the same + if &initial_params.cs_hash[..] != &self.cs_hash[..] { + return Err(()); + } + + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + sink.write_all(&initial_params.cs_hash[..]).unwrap(); + + let mut current_delta = G1Affine::one(); + let mut result = vec![]; + + for pubkey in &self.contributions { + let mut our_sink = sink.clone(); + our_sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap(); + our_sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap(); + + sink.write_all(pubkey.delta_after.into_uncompressed().as_ref()).unwrap(); + + let h = our_sink.into_hash(); + + // The transcript must be consistent + if &pubkey.transcript[..] != h.as_ref() { + return Err(()); + } + + let r = hash_to_g2(h.as_ref()).into_affine(); + + // Check the signature of knowledge + if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) { + return Err(()); + } + + // Check the change from the old delta is consistent + if !same_ratio( + (current_delta, pubkey.delta_after), + (r, pubkey.r_delta) + ) { + return Err(()); + } + + current_delta = pubkey.delta_after; + + { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + pubkey.write(&mut sink).unwrap(); + let h = sink.into_hash(); + let mut response = [0u8; 64]; + response.copy_from_slice(h.as_ref()); + result.push(response); + } + } + + // Current parameters should have consistent delta in G1 + if current_delta != self.params.vk.delta_g1 { + return Err(()); + } + + // Current parameters should have consistent delta in G2 + if !same_ratio( + (G1Affine::one(), current_delta), + (G2Affine::one(), self.params.vk.delta_g2) + ) { + return Err(()); + } + + // H and L queries should be updated with delta^-1 + if !same_ratio( + merge_pairs(&initial_params.params.h, &self.params.h), + (self.params.vk.delta_g2, G2Affine::one()) // reversed for inverse + ) { + return Err(()); + } + + if !same_ratio( + merge_pairs(&initial_params.params.l, &self.params.l), + (self.params.vk.delta_g2, G2Affine::one()) // reversed for inverse + ) { + return Err(()); + } + + Ok(result) } } @@ -683,122 +823,23 @@ impl PartialEq for PublicKey { } } -pub fn verify_transform( +pub fn verify_transform>( + circuit: C, before: &MPCParameters, after: &MPCParameters -) -> bool +) -> Result, ()> { - // Parameter size doesn't change! - if before.params.vk.ic.len() != after.params.vk.ic.len() { - return false; - } - - if before.params.h.len() != after.params.h.len() { - return false; - } - - if before.params.l.len() != after.params.l.len() { - return false; - } - - if before.params.a.len() != after.params.a.len() { - return false; - } - - if before.params.b_g1.len() != after.params.b_g1.len() { - return false; - } - - if before.params.b_g2.len() != after.params.b_g2.len() { - return false; - } - - // IC shouldn't change at all, since gamma = 1 - if before.params.vk.ic != after.params.vk.ic { - return false; - } - - // Transformations involve a single new contribution + // Transformation involves a single new object if after.contributions.len() != (before.contributions.len() + 1) { - return false; + return Err(()); } - // All of the previous pubkeys should be the same + // None of the previous transformations should change if &before.contributions[..] != &after.contributions[0..before.contributions.len()] { - return false; + return Err(()); } - let pubkey = after.contributions.last().unwrap(); - - // The new pubkey's claimed value of delta should match the - // parameters - if pubkey.delta_after != after.params.vk.delta_g1 { - return false; - } - - // The `cs_hash` should not change. It's initialized at the beginning - if &before.cs_hash[..] != &after.cs_hash[..] { - return false; - } - - // H(cs_hash | | s | s_delta) - let h = { - let sink = io::sink(); - let mut sink = HashWriter::new(sink); - - sink.write_all(&before.cs_hash[..]).unwrap(); - for pubkey in &before.contributions { - sink.write_all(pubkey.delta_after.into_uncompressed().as_ref()).unwrap(); - } - sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap(); - sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap(); - - sink.into_hash() - }; - - // The transcript must be consistent - if &pubkey.transcript[..] != h.as_ref() { - return false; - } - - let r = hash_to_g2(h.as_ref()).into_affine(); - - // Check the signature of knowledge - if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) { - return false; - } - - // Check the change from the old delta is consistent - if !same_ratio( - (before.params.vk.delta_g1, after.params.vk.delta_g1), - (r, pubkey.r_delta) - ) { - return false; - } - - if !same_ratio( - (before.params.vk.delta_g2, after.params.vk.delta_g2), - (pubkey.s, pubkey.s_delta) - ) { - return false; - } - - // H and L queries should be updated - if !same_ratio( - merge_pairs(&before.params.h, &after.params.h), - (pubkey.r_delta, r) // reversed for inverse - ) { - return false; - } - - if !same_ratio( - merge_pairs(&before.params.l, &after.params.l), - (pubkey.r_delta, r) // reversed for inverse - ) { - return false; - } - - true + after.verify(circuit) } /// Checks if pairs have the same ratio. @@ -946,6 +987,15 @@ pub struct HashWriter { hasher: Blake2b } +impl Clone for HashWriter { + fn clone(&self) -> HashWriter { + HashWriter { + writer: io::sink(), + hasher: self.hasher.clone() + } + } +} + impl HashWriter { /// Construct a new `HashWriter` given an existing `writer` by value. pub fn new(writer: W) -> Self { From 2196f97fb7e086875a6cef516d576e12407bfa68 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Thu, 5 Apr 2018 18:41:15 -0600 Subject: [PATCH 05/18] Clean up API and add comments --- examples/mimc.rs | 34 +- src/lib.rs | 903 ++++++++++++++++++++++++++++++----------------- 2 files changed, 583 insertions(+), 354 deletions(-) diff --git a/examples/mimc.rs b/examples/mimc.rs index 0f0d2a4..620e6d9 100644 --- a/examples/mimc.rs +++ b/examples/mimc.rs @@ -184,46 +184,20 @@ fn main() { constants: &constants }; - phase2::new_parameters(c).unwrap() + phase2::MPCParameters::new(c).unwrap() }; let old_params = params.clone(); let (pubkey, privkey) = phase2::keypair(rng, ¶ms); - params.transform(&pubkey, &privkey); + params.contribute(&pubkey, &privkey); - { - let mut w = vec![]; - pubkey.write(&mut w).unwrap(); - - let deser = phase2::PublicKey::read(&w[..]).unwrap(); - assert!(pubkey == deser); - } - - phase2::verify_transform(MiMCDemo:: { + phase2::verify_contribution(MiMCDemo:: { xl: None, xr: None, constants: &constants }, &old_params, ¶ms).unwrap(); - let old_params = params.clone(); - let (pubkey, privkey) = phase2::keypair(rng, ¶ms); - params.transform(&pubkey, &privkey); - - phase2::verify_transform(MiMCDemo:: { - xl: None, - xr: None, - constants: &constants - }, &old_params, ¶ms).unwrap(); - - { - let mut w = vec![]; - params.write(&mut w).unwrap(); - - let deser = phase2::MPCParameters::read(&w[..], true).unwrap(); - assert!(params == deser); - } - - let params = params.params(); + let params = params.get_params(); // Prepare the verification key (for proof verification) let pvk = prepare_verifying_key(¶ms.vk); diff --git a/src/lib.rs b/src/lib.rs index b29c330..25f103a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,206 @@ +//! # zk-SNARK MPCs, made easy. +//! +//! ## Make your circuit +//! +//! Grab the [`bellman`](https://github.com/ebfull/bellman) and +//! [`pairing`](https://github.com/ebfull/bellman) crates. Bellman +//! provides a trait called `Circuit`, which you must implement +//! for your computation. +//! +//! Here's a silly example: proving you know the cube root of +//! a field element. +//! +//! ```rust +//! extern crate pairing; +//! extern crate bellman; +//! +//! use pairing::{Engine, Field}; +//! use bellman::{ +//! Circuit, +//! ConstraintSystem, +//! SynthesisError, +//! }; +//! +//! struct CubeRoot { +//! cube_root: Option +//! } +//! +//! impl Circuit for CubeRoot { +//! fn synthesize>( +//! self, +//! cs: &mut CS +//! ) -> Result<(), SynthesisError> +//! { +//! // Witness the cube root +//! let root = cs.alloc(|| "root", || { +//! self.cube_root.ok_or(SynthesisError::AssignmentMissing) +//! })?; +//! +//! // Witness the square of the cube root +//! let square = cs.alloc(|| "square", || { +//! self.cube_root +//! .ok_or(SynthesisError::AssignmentMissing) +//! .map(|mut root| {root.square(); root }) +//! })?; +//! +//! // Enforce that `square` is root^2 +//! cs.enforce( +//! || "squaring", +//! |lc| lc + root, +//! |lc| lc + root, +//! |lc| lc + square +//! ); +//! +//! // Witness the cube, as a public input +//! let cube = cs.alloc_input(|| "cube", || { +//! self.cube_root +//! .ok_or(SynthesisError::AssignmentMissing) +//! .map(|root| { +//! let mut tmp = root; +//! tmp.square(); +//! tmp.mul_assign(&root); +//! tmp +//! }) +//! })?; +//! +//! // Enforce that `cube` is root^3 +//! // i.e. that `cube` is `root` * `square` +//! cs.enforce( +//! || "cubing", +//! |lc| lc + root, +//! |lc| lc + square, +//! |lc| lc + cube +//! ); +//! +//! Ok(()) +//! } +//! } +//! ``` +//! +//! ## Create some proofs +//! +//! Now that we have `CubeRoot` implementing `Circuit`, +//! let's create some parameters and make some proofs. +//! +//! ```rust,ignore +//! extern crate rand; +//! +//! use pairing::bls12_381::{Bls12, Fr}; +//! use bellman::groth16::{ +//! generate_random_parameters, +//! create_random_proof, +//! prepare_verifying_key, +//! verify_proof +//! }; +//! use rand::{OsRng, Rand}; +//! +//! let rng = &mut OsRng::new(); +//! +//! // Create public parameters for our circuit +//! let params = { +//! let circuit = CubeRoot:: { +//! cube_root: None +//! }; +//! +//! generate_random_parameters::( +//! circuit, +//! rng +//! ).unwrap() +//! }; +//! +//! // Prepare the verifying key for verification +//! let pvk = prepare_verifying_key(¶ms.vk); +//! +//! // Let's start making proofs! +//! for _ in 0..50 { +//! // Verifier picks a cube in the field. +//! // Let's just make a random one. +//! let root = Fr::rand(rng); +//! let mut cube = root; +//! cube.square(); +//! cube.mul_assign(&root); +//! +//! // Prover gets the cube, figures out the cube +//! // root, and makes the proof: +//! let proof = create_random_proof( +//! CubeRoot:: { +//! cube_root: Some(root) +//! }, ¶ms, rng +//! ).unwrap(); +//! +//! // Verifier checks the proof against the cube +//! assert!(verify_proof(&pvk, &proof, &[cube]).unwrap()); +//! } +//! ``` +//! ## Creating parameters +//! +//! Notice in the previous example that we created our zk-SNARK +//! parameters by calling `generate_random_parameters`. However, +//! if you wanted you could have called `generate_parameters` +//! with some secret numbers you chose, and kept them for +//! yourself. Given those numbers, you can create false proofs. +//! +//! In order to convince others you didn't, a multi-party +//! computation (MPC) can be used. The MPC has the property that +//! only one participant needs to be honest for the parameters to +//! be secure. This crate (`phase2`) is about creating parameters +//! securely using such an MPC. +//! +//! Let's start by using `phase2` to create some base parameters +//! for our circuit: +//! +//! ```rust,ignore +//! extern crate phase2; +//! +//! let mut params = phase2::MPCParameters::new(CubeRoot { +//! cube_root: None +//! }).unwrap(); +//! ``` +//! +//! The first time you try this, it will try to read a file like +//! `phase1radix2m2` from the current directory. You need to grab +//! that from the Powers of Tau. +//! +//! These parameters are not safe to use; false proofs can be +//! created for them. Let's contribute some randomness to these +//! parameters. +//! +//! ```rust,ignore +//! // Create a random keypair for our parameters +//! let (pubkey, privkey) = phase2::keypair(rng, ¶ms); +//! +//! // Contribute to the parameters. Remember this hash, it's +//! // how we know our contribution is in the parameters! +//! let hash = params.contribute(&pubkey, &privkey); +//! +//! // Throw away the private key! +//! drop(privkey); +//! ``` +//! +//! These parameters are now secure to use, so long as you destroyed +//! the privkey. That may not be convincing to others, so let them +//! contribute randomness too! `params` can be serialized and sent +//! elsewhere, where they can do the same thing and send new +//! parameters back to you. Only one person needs to destroy the +//! `privkey` for the final parameters to be secure. +//! +//! Once you're done setting up the parameters, you can verify the +//! parameters: +//! +//! ```rust,ignore +//! let contributions = params.verify(CubeRoot { +//! cube_root: None +//! }).expect("parameters should be valid!"); +//! +//! // We need to check the `contributions` to see if our `hash` +//! // is in it (see above, when we first contributed) +//! assert!(phase2::contains_contribution(&contributions, &hash)); +//! ``` +//! +//! Great, now if you're happy, grab the Groth16 `Parameters` with +//! `params.params()`, so that you can interact with the bellman APIs +//! just as before. + extern crate pairing; extern crate bellman; extern crate rand; @@ -181,294 +384,6 @@ impl ConstraintSystem for KeypairAssembly { } } -pub fn new_parameters( - circuit: C, -) -> Result - where C: Circuit -{ - let mut assembly = KeypairAssembly { - num_inputs: 0, - num_aux: 0, - num_constraints: 0, - at_inputs: vec![], - bt_inputs: vec![], - ct_inputs: vec![], - at_aux: vec![], - bt_aux: vec![], - ct_aux: vec![] - }; - - // Allocate the "one" input variable - assembly.alloc_input(|| "", || Ok(Fr::one()))?; - - // Synthesize the circuit. - circuit.synthesize(&mut assembly)?; - - // Input constraints to ensure full density of IC query - // x * 0 = 0 - for i in 0..assembly.num_inputs { - assembly.enforce(|| "", - |lc| lc + Variable::new_unchecked(Index::Input(i)), - |lc| lc, - |lc| lc, - ); - } - - // Compute the size of our evaluation domain - let mut m = 1; - let mut exp = 0; - while m < assembly.num_constraints { - m *= 2; - exp += 1; - - // Powers of Tau ceremony can't support more than 2^21 - if exp > 21 { - return Err(SynthesisError::PolynomialDegreeTooLarge) - } - } - - // Try to load "phase1radix2m{}" - let f = match File::open(format!("phase1radix2m{}", exp)) { - Ok(f) => f, - Err(e) => { - panic!("Couldn't load phase1radix2m{}: {:?}", exp, e); - } - }; - let f = &mut BufReader::with_capacity(1024 * 1024, f); - - let read_g1 = |reader: &mut BufReader| -> io::Result { - let mut repr = G1Uncompressed::empty(); - reader.read_exact(repr.as_mut())?; - - repr.into_affine_unchecked() - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) - .and_then(|e| if e.is_zero() { - Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) - } else { - Ok(e) - }) - }; - - let read_g2 = |reader: &mut BufReader| -> io::Result { - let mut repr = G2Uncompressed::empty(); - reader.read_exact(repr.as_mut())?; - - repr.into_affine_unchecked() - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) - .and_then(|e| if e.is_zero() { - Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) - } else { - Ok(e) - }) - }; - - let alpha = read_g1(f)?; - let beta_g1 = read_g1(f)?; - let beta_g2 = read_g2(f)?; - - let mut coeffs_g1 = Vec::with_capacity(m); - for _ in 0..m { - coeffs_g1.push(read_g1(f)?); - } - - let mut coeffs_g2 = Vec::with_capacity(m); - for _ in 0..m { - coeffs_g2.push(read_g2(f)?); - } - - let mut alpha_coeffs_g1 = Vec::with_capacity(m); - for _ in 0..m { - alpha_coeffs_g1.push(read_g1(f)?); - } - - let mut beta_coeffs_g1 = Vec::with_capacity(m); - for _ in 0..m { - beta_coeffs_g1.push(read_g1(f)?); - } - - // These are `Arc` so that later it'll be easier - // to use multiexp during QAP evaluation (which - // requires a futures-based API) - let coeffs_g1 = Arc::new(coeffs_g1); - let coeffs_g2 = Arc::new(coeffs_g2); - let alpha_coeffs_g1 = Arc::new(alpha_coeffs_g1); - let beta_coeffs_g1 = Arc::new(beta_coeffs_g1); - - let mut h = Vec::with_capacity(m - 1); - for _ in 0..(m - 1) { - h.push(read_g1(f)?); - } - - let mut ic = vec![G1::zero(); assembly.num_inputs]; - let mut l = vec![G1::zero(); assembly.num_aux]; - let mut a_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux]; - let mut b_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux]; - let mut b_g2 = vec![G2::zero(); assembly.num_inputs + assembly.num_aux]; - - fn eval( - // Lagrange coefficients for tau - coeffs_g1: Arc>, - coeffs_g2: Arc>, - alpha_coeffs_g1: Arc>, - beta_coeffs_g1: Arc>, - - // QAP polynomials - at: &[Vec<(Fr, usize)>], - bt: &[Vec<(Fr, usize)>], - ct: &[Vec<(Fr, usize)>], - - // Resulting evaluated QAP polynomials - a_g1: &mut [G1], - b_g1: &mut [G1], - b_g2: &mut [G2], - ext: &mut [G1], - - // Worker - worker: &Worker - ) - { - // Sanity check - assert_eq!(a_g1.len(), at.len()); - assert_eq!(a_g1.len(), bt.len()); - assert_eq!(a_g1.len(), ct.len()); - assert_eq!(a_g1.len(), b_g1.len()); - assert_eq!(a_g1.len(), b_g2.len()); - assert_eq!(a_g1.len(), ext.len()); - - // Evaluate polynomials in multiple threads - worker.scope(a_g1.len(), |scope, chunk| { - for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in - a_g1.chunks_mut(chunk) - .zip(b_g1.chunks_mut(chunk)) - .zip(b_g2.chunks_mut(chunk)) - .zip(ext.chunks_mut(chunk)) - .zip(at.chunks(chunk)) - .zip(bt.chunks(chunk)) - .zip(ct.chunks(chunk)) - { - let coeffs_g1 = coeffs_g1.clone(); - let coeffs_g2 = coeffs_g2.clone(); - let alpha_coeffs_g1 = alpha_coeffs_g1.clone(); - let beta_coeffs_g1 = beta_coeffs_g1.clone(); - - scope.spawn(move || { - for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in - a_g1.iter_mut() - .zip(b_g1.iter_mut()) - .zip(b_g2.iter_mut()) - .zip(ext.iter_mut()) - .zip(at.iter()) - .zip(bt.iter()) - .zip(ct.iter()) - { - for &(coeff, lag) in at { - a_g1.add_assign(&coeffs_g1[lag].mul(coeff)); - ext.add_assign(&beta_coeffs_g1[lag].mul(coeff)); - } - - for &(coeff, lag) in bt { - b_g1.add_assign(&coeffs_g1[lag].mul(coeff)); - b_g2.add_assign(&coeffs_g2[lag].mul(coeff)); - ext.add_assign(&alpha_coeffs_g1[lag].mul(coeff)); - } - - for &(coeff, lag) in ct { - ext.add_assign(&coeffs_g1[lag].mul(coeff)); - } - } - - // Batch normalize - G1::batch_normalization(a_g1); - G1::batch_normalization(b_g1); - G2::batch_normalization(b_g2); - G1::batch_normalization(ext); - }); - } - }); - } - - let worker = Worker::new(); - - // Evaluate for inputs. - eval( - coeffs_g1.clone(), - coeffs_g2.clone(), - alpha_coeffs_g1.clone(), - beta_coeffs_g1.clone(), - &assembly.at_inputs, - &assembly.bt_inputs, - &assembly.ct_inputs, - &mut a_g1[0..assembly.num_inputs], - &mut b_g1[0..assembly.num_inputs], - &mut b_g2[0..assembly.num_inputs], - &mut ic, - &worker - ); - - // Evaluate for auxillary variables. - eval( - coeffs_g1.clone(), - coeffs_g2.clone(), - alpha_coeffs_g1.clone(), - beta_coeffs_g1.clone(), - &assembly.at_aux, - &assembly.bt_aux, - &assembly.ct_aux, - &mut a_g1[assembly.num_inputs..], - &mut b_g1[assembly.num_inputs..], - &mut b_g2[assembly.num_inputs..], - &mut l, - &worker - ); - - // Don't allow any elements be unconstrained, so that - // the L query is always fully dense. - for e in l.iter() { - if e.is_zero() { - return Err(SynthesisError::UnconstrainedVariable); - } - } - - let vk = VerifyingKey { - alpha_g1: alpha, - beta_g1: beta_g1, - beta_g2: beta_g2, - gamma_g2: G2Affine::one(), - delta_g1: G1Affine::one(), - delta_g2: G2Affine::one(), - ic: ic.into_iter().map(|e| e.into_affine()).collect() - }; - - let params = Parameters { - vk: vk, - h: Arc::new(h), - l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()), - - // Filter points at infinity away from A/B queries - a: Arc::new(a_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), - b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), - b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()) - }; - - let h = { - let sink = io::sink(); - let mut sink = HashWriter::new(sink); - - params.write(&mut sink).unwrap(); - - sink.into_hash() - }; - - let mut cs_hash = [0; 64]; - cs_hash.copy_from_slice(h.as_ref()); - - Ok(MPCParameters { - params: params, - cs_hash: cs_hash, - contributions: vec![] - }) -} - /// MPC parameters are just like bellman `Parameters` except, when serialized, /// they contain a transcript of contributions at the end, which can be verified. #[derive(Clone)] @@ -487,49 +402,313 @@ impl PartialEq for MPCParameters { } impl MPCParameters { - pub fn write( - &self, - mut writer: W - ) -> io::Result<()> + /// Create new Groth16 parameters (compatible with bellman) for a + /// given circuit. The resulting parameters are unsafe to use + /// until there are contributions (see `transform`). + pub fn new( + circuit: C, + ) -> Result + where C: Circuit { - self.params.write(&mut writer)?; - writer.write_all(&self.cs_hash)?; + let mut assembly = KeypairAssembly { + num_inputs: 0, + num_aux: 0, + num_constraints: 0, + at_inputs: vec![], + bt_inputs: vec![], + ct_inputs: vec![], + at_aux: vec![], + bt_aux: vec![], + ct_aux: vec![] + }; - writer.write_u32::(self.contributions.len() as u32)?; - for pubkey in &self.contributions { - pubkey.write(&mut writer)?; + // Allocate the "one" input variable + assembly.alloc_input(|| "", || Ok(Fr::one()))?; + + // Synthesize the circuit. + circuit.synthesize(&mut assembly)?; + + // Input constraints to ensure full density of IC query + // x * 0 = 0 + for i in 0..assembly.num_inputs { + assembly.enforce(|| "", + |lc| lc + Variable::new_unchecked(Index::Input(i)), + |lc| lc, + |lc| lc, + ); } - Ok(()) - } + // Compute the size of our evaluation domain + let mut m = 1; + let mut exp = 0; + while m < assembly.num_constraints { + m *= 2; + exp += 1; - pub fn read( - mut reader: R, - checked: bool - ) -> io::Result - { - let params = Parameters::read(&mut reader, checked)?; - - let mut cs_hash = [0u8; 64]; - reader.read_exact(&mut cs_hash)?; - - let contributions_len = reader.read_u32::()? as usize; - - let mut contributions = vec![]; - for _ in 0..contributions_len { - contributions.push(PublicKey::read(&mut reader)?); + // Powers of Tau ceremony can't support more than 2^21 + if exp > 21 { + return Err(SynthesisError::PolynomialDegreeTooLarge) + } } + // Try to load "phase1radix2m{}" + let f = match File::open(format!("phase1radix2m{}", exp)) { + Ok(f) => f, + Err(e) => { + panic!("Couldn't load phase1radix2m{}: {:?}", exp, e); + } + }; + let f = &mut BufReader::with_capacity(1024 * 1024, f); + + let read_g1 = |reader: &mut BufReader| -> io::Result { + let mut repr = G1Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + repr.into_affine_unchecked() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let read_g2 = |reader: &mut BufReader| -> io::Result { + let mut repr = G2Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + repr.into_affine_unchecked() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let alpha = read_g1(f)?; + let beta_g1 = read_g1(f)?; + let beta_g2 = read_g2(f)?; + + let mut coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + coeffs_g1.push(read_g1(f)?); + } + + let mut coeffs_g2 = Vec::with_capacity(m); + for _ in 0..m { + coeffs_g2.push(read_g2(f)?); + } + + let mut alpha_coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + alpha_coeffs_g1.push(read_g1(f)?); + } + + let mut beta_coeffs_g1 = Vec::with_capacity(m); + for _ in 0..m { + beta_coeffs_g1.push(read_g1(f)?); + } + + // These are `Arc` so that later it'll be easier + // to use multiexp during QAP evaluation (which + // requires a futures-based API) + let coeffs_g1 = Arc::new(coeffs_g1); + let coeffs_g2 = Arc::new(coeffs_g2); + let alpha_coeffs_g1 = Arc::new(alpha_coeffs_g1); + let beta_coeffs_g1 = Arc::new(beta_coeffs_g1); + + let mut h = Vec::with_capacity(m - 1); + for _ in 0..(m - 1) { + h.push(read_g1(f)?); + } + + let mut ic = vec![G1::zero(); assembly.num_inputs]; + let mut l = vec![G1::zero(); assembly.num_aux]; + let mut a_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux]; + let mut b_g1 = vec![G1::zero(); assembly.num_inputs + assembly.num_aux]; + let mut b_g2 = vec![G2::zero(); assembly.num_inputs + assembly.num_aux]; + + fn eval( + // Lagrange coefficients for tau + coeffs_g1: Arc>, + coeffs_g2: Arc>, + alpha_coeffs_g1: Arc>, + beta_coeffs_g1: Arc>, + + // QAP polynomials + at: &[Vec<(Fr, usize)>], + bt: &[Vec<(Fr, usize)>], + ct: &[Vec<(Fr, usize)>], + + // Resulting evaluated QAP polynomials + a_g1: &mut [G1], + b_g1: &mut [G1], + b_g2: &mut [G2], + ext: &mut [G1], + + // Worker + worker: &Worker + ) + { + // Sanity check + assert_eq!(a_g1.len(), at.len()); + assert_eq!(a_g1.len(), bt.len()); + assert_eq!(a_g1.len(), ct.len()); + assert_eq!(a_g1.len(), b_g1.len()); + assert_eq!(a_g1.len(), b_g2.len()); + assert_eq!(a_g1.len(), ext.len()); + + // Evaluate polynomials in multiple threads + worker.scope(a_g1.len(), |scope, chunk| { + for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in + a_g1.chunks_mut(chunk) + .zip(b_g1.chunks_mut(chunk)) + .zip(b_g2.chunks_mut(chunk)) + .zip(ext.chunks_mut(chunk)) + .zip(at.chunks(chunk)) + .zip(bt.chunks(chunk)) + .zip(ct.chunks(chunk)) + { + let coeffs_g1 = coeffs_g1.clone(); + let coeffs_g2 = coeffs_g2.clone(); + let alpha_coeffs_g1 = alpha_coeffs_g1.clone(); + let beta_coeffs_g1 = beta_coeffs_g1.clone(); + + scope.spawn(move || { + for ((((((a_g1, b_g1), b_g2), ext), at), bt), ct) in + a_g1.iter_mut() + .zip(b_g1.iter_mut()) + .zip(b_g2.iter_mut()) + .zip(ext.iter_mut()) + .zip(at.iter()) + .zip(bt.iter()) + .zip(ct.iter()) + { + for &(coeff, lag) in at { + a_g1.add_assign(&coeffs_g1[lag].mul(coeff)); + ext.add_assign(&beta_coeffs_g1[lag].mul(coeff)); + } + + for &(coeff, lag) in bt { + b_g1.add_assign(&coeffs_g1[lag].mul(coeff)); + b_g2.add_assign(&coeffs_g2[lag].mul(coeff)); + ext.add_assign(&alpha_coeffs_g1[lag].mul(coeff)); + } + + for &(coeff, lag) in ct { + ext.add_assign(&coeffs_g1[lag].mul(coeff)); + } + } + + // Batch normalize + G1::batch_normalization(a_g1); + G1::batch_normalization(b_g1); + G2::batch_normalization(b_g2); + G1::batch_normalization(ext); + }); + } + }); + } + + let worker = Worker::new(); + + // Evaluate for inputs. + eval( + coeffs_g1.clone(), + coeffs_g2.clone(), + alpha_coeffs_g1.clone(), + beta_coeffs_g1.clone(), + &assembly.at_inputs, + &assembly.bt_inputs, + &assembly.ct_inputs, + &mut a_g1[0..assembly.num_inputs], + &mut b_g1[0..assembly.num_inputs], + &mut b_g2[0..assembly.num_inputs], + &mut ic, + &worker + ); + + // Evaluate for auxillary variables. + eval( + coeffs_g1.clone(), + coeffs_g2.clone(), + alpha_coeffs_g1.clone(), + beta_coeffs_g1.clone(), + &assembly.at_aux, + &assembly.bt_aux, + &assembly.ct_aux, + &mut a_g1[assembly.num_inputs..], + &mut b_g1[assembly.num_inputs..], + &mut b_g2[assembly.num_inputs..], + &mut l, + &worker + ); + + // Don't allow any elements be unconstrained, so that + // the L query is always fully dense. + for e in l.iter() { + if e.is_zero() { + return Err(SynthesisError::UnconstrainedVariable); + } + } + + let vk = VerifyingKey { + alpha_g1: alpha, + beta_g1: beta_g1, + beta_g2: beta_g2, + gamma_g2: G2Affine::one(), + delta_g1: G1Affine::one(), + delta_g2: G2Affine::one(), + ic: ic.into_iter().map(|e| e.into_affine()).collect() + }; + + let params = Parameters { + vk: vk, + h: Arc::new(h), + l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()), + + // Filter points at infinity away from A/B queries + a: Arc::new(a_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()) + }; + + let h = { + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + + params.write(&mut sink).unwrap(); + + sink.into_hash() + }; + + let mut cs_hash = [0; 64]; + cs_hash.copy_from_slice(h.as_ref()); + Ok(MPCParameters { - params, cs_hash, contributions + params: params, + cs_hash: cs_hash, + contributions: vec![] }) } - pub fn params(&self) -> &Parameters { + /// Get the underlying Groth16 `Parameters` + pub fn get_params(&self) -> &Parameters { &self.params } - pub fn transform( + /// Contributes some randomness to the parameters. Only one + /// contributor needs to destroy their `PrivateKey` to keep + /// the parameters secure. See `keypair()` for creating + /// keypairs. + /// + /// This function returns a "hash" that is bound to the + /// contribution. Contributors can use this hash to make + /// sure their contribution is in the final parameters, by + /// checking to see if it appears in the output of + /// `MPCParameters::verify`. + pub fn contribute( &mut self, pubkey: &PublicKey, privkey: &PrivateKey @@ -592,6 +771,7 @@ impl MPCParameters { self.contributions.push(pubkey.clone()); + // Calculate the hash of the public key and return it { let sink = io::sink(); let mut sink = HashWriter::new(sink); @@ -603,12 +783,17 @@ impl MPCParameters { } } + /// Verify the correctness of the parameters, given a circuit + /// instance. This will return all of the hashes that + /// contributors obtained when they ran + /// `MPCParameters::contribute`, for ensuring that contributions + /// exist in the final parameters. pub fn verify>( &self, circuit: C ) -> Result, ()> { - let initial_params = new_parameters(circuit).map_err(|_| ())?; + let initial_params = MPCParameters::new(circuit).map_err(|_| ())?; // H/L will change, but should have same length if initial_params.params.h.len() != self.params.h.len() { @@ -732,8 +917,51 @@ impl MPCParameters { Ok(result) } + + /// Serialize these parameters. The serialized parameters + /// can be read by bellman as Groth16 `Parameters`. + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + self.params.write(&mut writer)?; + writer.write_all(&self.cs_hash)?; + + writer.write_u32::(self.contributions.len() as u32)?; + for pubkey in &self.contributions { + pubkey.write(&mut writer)?; + } + + Ok(()) + } + + /// Deserialize these parameters. + pub fn read( + mut reader: R, + checked: bool + ) -> io::Result + { + let params = Parameters::read(&mut reader, checked)?; + + let mut cs_hash = [0u8; 64]; + reader.read_exact(&mut cs_hash)?; + + let contributions_len = reader.read_u32::()? as usize; + + let mut contributions = vec![]; + for _ in 0..contributions_len { + contributions.push(PublicKey::read(&mut reader)?); + } + + Ok(MPCParameters { + params, cs_hash, contributions + }) + } } +/// This allows others to verify that you contributed. The hash produced +/// by `MPCParameters::contribute` is just a BLAKE2b hash of this object. #[derive(Clone)] pub struct PublicKey { /// This is the delta (in G1) after the transformation, kept so that we @@ -755,7 +983,7 @@ pub struct PublicKey { } impl PublicKey { - pub fn write( + fn write( &self, mut writer: W ) -> io::Result<()> @@ -769,7 +997,7 @@ impl PublicKey { Ok(()) } - pub fn read( + fn read( mut reader: R ) -> io::Result { @@ -823,11 +1051,16 @@ impl PartialEq for PublicKey { } } -pub fn verify_transform>( +/// Verify a contribution, given the old parameters and +/// the new parameters. This is basically a wrapper around +/// `MPCParameters::verify` which just checks that a new +/// contribution was added and none of the existing +/// contributions were changed. +pub fn verify_contribution>( circuit: C, before: &MPCParameters, after: &MPCParameters -) -> Result, ()> +) -> Result<[u8; 64], ()> { // Transformation involves a single new object if after.contributions.len() != (before.contributions.len() + 1) { @@ -839,7 +1072,7 @@ pub fn verify_transform>( return Err(()); } - after.verify(circuit) + after.verify(circuit).map(|v| *v.last().unwrap()) } /// Checks if pairs have the same ratio. @@ -912,10 +1145,15 @@ fn merge_pairs(v1: &[G], v2: &[G]) -> (G, G) (s, sx) } +/// This needs to be destroyed by at least one participant +/// for the final parameters to be secure. pub struct PrivateKey { delta: Fr } +/// Compute a keypair, given the current parameters. Keypairs +/// cannot be reused for multiple contributions or contributions +/// in different parameters. pub fn keypair( rng: &mut R, current: &MPCParameters, @@ -982,7 +1220,7 @@ fn hash_to_g2(mut digest: &[u8]) -> G2 } /// Abstraction over a writer which hashes the data being written. -pub struct HashWriter { +struct HashWriter { writer: W, hasher: Blake2b } @@ -1026,3 +1264,20 @@ impl Write for HashWriter { self.writer.flush() } } + +/// This is a cheap helper utility that exists purely +/// because Rust still doesn't have type-level integers +/// and so doesn't implement `PartialEq` for `[T; 64]` +pub fn contains_contribution( + contributions: &[[u8; 64]], + my_contribution: &[u8; 64] +) -> bool +{ + for contrib in contributions { + if &contrib[..] == &my_contribution[..] { + return true + } + } + + return false +} From 68c4bdb6ef8df4d3921cb5e0048c0e6c78bdc1e8 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Thu, 5 Apr 2018 18:42:59 -0600 Subject: [PATCH 06/18] Bump version --- Cargo.toml | 2 +- README.md | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 8d74dfb..8cbfa95 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "phase2" -version = "0.0.1" +version = "0.1.0" authors = ["Sean Bowe "] description = "zk-SNARK MPC" documentation = "https://github.com/ebfull/phase2" diff --git a/README.md b/README.md index 74637de..828eb5c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,12 @@ # phase2 [![Crates.io](https://img.shields.io/crates/v/phase2.svg)](https://crates.io/crates/phase2) # -Under construction. ;) +This library is still under development. + +## [Documentation](https://docs.rs/phase2/) + +## Security Warnings + +This library does not make any guarantees about constant-time operations, memory access patterns, or resistance to side-channel attacks. ## License From d60a14b08b81a669b36a1f5f34563196e0648f12 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sat, 7 Apr 2018 20:56:28 -0600 Subject: [PATCH 07/18] Fix doc comment --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 25f103a..c3f2c8a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -404,7 +404,7 @@ impl PartialEq for MPCParameters { impl MPCParameters { /// Create new Groth16 parameters (compatible with bellman) for a /// given circuit. The resulting parameters are unsafe to use - /// until there are contributions (see `transform`). + /// until there are contributions (see `contribute()`). pub fn new( circuit: C, ) -> Result From 4f80de43a2d2006aa8d0179eafe412ce9bf2da2f Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sun, 8 Apr 2018 13:49:01 -0600 Subject: [PATCH 08/18] Simplify API and improve performance --- examples/mimc.rs | 17 ++++-- src/lib.rs | 136 ++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 136 insertions(+), 17 deletions(-) diff --git a/examples/mimc.rs b/examples/mimc.rs index 620e6d9..e2d42b3 100644 --- a/examples/mimc.rs +++ b/examples/mimc.rs @@ -188,14 +188,23 @@ fn main() { }; let old_params = params.clone(); - let (pubkey, privkey) = phase2::keypair(rng, ¶ms); - params.contribute(&pubkey, &privkey); + params.contribute(rng); - phase2::verify_contribution(MiMCDemo:: { + let first_contrib = phase2::verify_contribution(&old_params, ¶ms).unwrap(); + + let old_params = params.clone(); + params.contribute(rng); + + let second_contrib = phase2::verify_contribution(&old_params, ¶ms).unwrap(); + + let verification_result = params.verify(MiMCDemo:: { xl: None, xr: None, constants: &constants - }, &old_params, ¶ms).unwrap(); + }).unwrap(); + + assert!(phase2::contains_contribution(&verification_result, &first_contrib)); + assert!(phase2::contains_contribution(&verification_result, &second_contrib)); let params = params.get_params(); diff --git a/src/lib.rs b/src/lib.rs index c3f2c8a..345ff26 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -708,12 +708,14 @@ impl MPCParameters { /// sure their contribution is in the final parameters, by /// checking to see if it appears in the output of /// `MPCParameters::verify`. - pub fn contribute( + pub fn contribute( &mut self, - pubkey: &PublicKey, - privkey: &PrivateKey + rng: &mut R ) -> [u8; 64] { + // Generate a keypair + let (pubkey, privkey) = keypair(rng, self); + fn batch_exp(bases: &mut [C], coeff: C::Scalar) { let coeff = coeff.into_repr(); @@ -963,7 +965,7 @@ impl MPCParameters { /// This allows others to verify that you contributed. The hash produced /// by `MPCParameters::contribute` is just a BLAKE2b hash of this object. #[derive(Clone)] -pub struct PublicKey { +struct PublicKey { /// This is the delta (in G1) after the transformation, kept so that we /// can check correctness of the public keys without having the entire /// interstitial parameters for each contribution. @@ -1052,12 +1054,8 @@ impl PartialEq for PublicKey { } /// Verify a contribution, given the old parameters and -/// the new parameters. This is basically a wrapper around -/// `MPCParameters::verify` which just checks that a new -/// contribution was added and none of the existing -/// contributions were changed. -pub fn verify_contribution>( - circuit: C, +/// the new parameters. Returns the hash of the contribution. +pub fn verify_contribution( before: &MPCParameters, after: &MPCParameters ) -> Result<[u8; 64], ()> @@ -1072,7 +1070,119 @@ pub fn verify_contribution>( return Err(()); } - after.verify(circuit).map(|v| *v.last().unwrap()) + // H/L will change, but should have same length + if before.params.h.len() != after.params.h.len() { + return Err(()); + } + if before.params.l.len() != after.params.l.len() { + return Err(()); + } + + // A/B_G1/B_G2 doesn't change at all + if before.params.a != after.params.a { + return Err(()); + } + if before.params.b_g1 != after.params.b_g1 { + return Err(()); + } + if before.params.b_g2 != after.params.b_g2 { + return Err(()); + } + + // alpha/beta/gamma don't change + if before.params.vk.alpha_g1 != after.params.vk.alpha_g1 { + return Err(()); + } + if before.params.vk.beta_g1 != after.params.vk.beta_g1 { + return Err(()); + } + if before.params.vk.beta_g2 != after.params.vk.beta_g2 { + return Err(()); + } + if before.params.vk.gamma_g2 != after.params.vk.gamma_g2 { + return Err(()); + } + + // IC shouldn't change, as gamma doesn't change + if before.params.vk.ic != after.params.vk.ic { + return Err(()); + } + + // cs_hash should be the same + if &before.cs_hash[..] != &after.cs_hash[..] { + return Err(()); + } + + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + sink.write_all(&before.cs_hash[..]).unwrap(); + + for pubkey in &before.contributions { + sink.write_all(pubkey.delta_after.into_uncompressed().as_ref()).unwrap(); + } + + let pubkey = after.contributions.last().unwrap(); + sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap(); + sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap(); + + let h = sink.into_hash(); + + // The transcript must be consistent + if &pubkey.transcript[..] != h.as_ref() { + return Err(()); + } + + let r = hash_to_g2(h.as_ref()).into_affine(); + + // Check the signature of knowledge + if !same_ratio((r, pubkey.r_delta), (pubkey.s, pubkey.s_delta)) { + return Err(()); + } + + // Check the change from the old delta is consistent + if !same_ratio( + (before.params.vk.delta_g1, pubkey.delta_after), + (r, pubkey.r_delta) + ) { + return Err(()); + } + + // Current parameters should have consistent delta in G1 + if pubkey.delta_after != after.params.vk.delta_g1 { + return Err(()); + } + + // Current parameters should have consistent delta in G2 + if !same_ratio( + (G1Affine::one(), pubkey.delta_after), + (G2Affine::one(), after.params.vk.delta_g2) + ) { + return Err(()); + } + + // H and L queries should be updated with delta^-1 + if !same_ratio( + merge_pairs(&before.params.h, &after.params.h), + (after.params.vk.delta_g2, before.params.vk.delta_g2) // reversed for inverse + ) { + return Err(()); + } + + if !same_ratio( + merge_pairs(&before.params.l, &after.params.l), + (after.params.vk.delta_g2, before.params.vk.delta_g2) // reversed for inverse + ) { + return Err(()); + } + + let sink = io::sink(); + let mut sink = HashWriter::new(sink); + pubkey.write(&mut sink).unwrap(); + let h = sink.into_hash(); + let mut response = [0u8; 64]; + response.copy_from_slice(h.as_ref()); + + Ok(response) } /// Checks if pairs have the same ratio. @@ -1147,14 +1257,14 @@ fn merge_pairs(v1: &[G], v2: &[G]) -> (G, G) /// This needs to be destroyed by at least one participant /// for the final parameters to be secure. -pub struct PrivateKey { +struct PrivateKey { delta: Fr } /// Compute a keypair, given the current parameters. Keypairs /// cannot be reused for multiple contributions or contributions /// in different parameters. -pub fn keypair( +fn keypair( rng: &mut R, current: &MPCParameters, ) -> (PublicKey, PrivateKey) From 4fe95794c57b81b533332f105a83ece95fe49349 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sun, 8 Apr 2018 14:03:41 -0600 Subject: [PATCH 09/18] Add some `expect`s. --- examples/mimc.rs | 4 ++-- src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/mimc.rs b/examples/mimc.rs index e2d42b3..7000118 100644 --- a/examples/mimc.rs +++ b/examples/mimc.rs @@ -190,12 +190,12 @@ fn main() { let old_params = params.clone(); params.contribute(rng); - let first_contrib = phase2::verify_contribution(&old_params, ¶ms).unwrap(); + let first_contrib = phase2::verify_contribution(&old_params, ¶ms).expect("should verify"); let old_params = params.clone(); params.contribute(rng); - let second_contrib = phase2::verify_contribution(&old_params, ¶ms).unwrap(); + let second_contrib = phase2::verify_contribution(&old_params, ¶ms).expect("should verify"); let verification_result = params.verify(MiMCDemo:: { xl: None, diff --git a/src/lib.rs b/src/lib.rs index 345ff26..c76dd0a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -760,7 +760,7 @@ impl MPCParameters { } } - let delta_inv = privkey.delta.inverse().unwrap(); + let delta_inv = privkey.delta.inverse().expect("nonzero"); let mut l = (&self.params.l[..]).to_vec(); let mut h = (&self.params.h[..]).to_vec(); batch_exp(&mut l, delta_inv); From b0c2cebc9c85ec70780f12427ad206c8efb358c6 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sun, 8 Apr 2018 14:06:20 -0600 Subject: [PATCH 10/18] Write entire pubkeys for signatures. --- src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index c76dd0a..c7375d8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -852,7 +852,7 @@ impl MPCParameters { our_sink.write_all(pubkey.s.into_uncompressed().as_ref()).unwrap(); our_sink.write_all(pubkey.s_delta.into_uncompressed().as_ref()).unwrap(); - sink.write_all(pubkey.delta_after.into_uncompressed().as_ref()).unwrap(); + pubkey.write(&mut sink).unwrap(); let h = our_sink.into_hash(); @@ -1118,7 +1118,7 @@ pub fn verify_contribution( sink.write_all(&before.cs_hash[..]).unwrap(); for pubkey in &before.contributions { - sink.write_all(pubkey.delta_after.into_uncompressed().as_ref()).unwrap(); + pubkey.write(&mut sink).unwrap(); } let pubkey = after.contributions.last().unwrap(); @@ -1276,14 +1276,14 @@ fn keypair( let s = G1::rand(rng).into_affine(); let s_delta = s.mul(delta).into_affine(); - // H(cs_hash | | s | s_delta) + // H(cs_hash | | s | s_delta) let h = { let sink = io::sink(); let mut sink = HashWriter::new(sink); sink.write_all(¤t.cs_hash[..]).unwrap(); for pubkey in ¤t.contributions { - sink.write_all(pubkey.delta_after.into_uncompressed().as_ref()).unwrap(); + pubkey.write(&mut sink).unwrap(); } sink.write_all(s.into_uncompressed().as_ref()).unwrap(); sink.write_all(s_delta.into_uncompressed().as_ref()).unwrap(); From c0009ae41172b0739149595f2fdc333803483bad Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sun, 8 Apr 2018 14:12:43 -0600 Subject: [PATCH 11/18] Fix documentation --- src/lib.rs | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index c7375d8..aae5552 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -166,23 +166,17 @@ //! parameters. //! //! ```rust,ignore -//! // Create a random keypair for our parameters -//! let (pubkey, privkey) = phase2::keypair(rng, ¶ms); -//! -//! // Contribute to the parameters. Remember this hash, it's -//! // how we know our contribution is in the parameters! -//! let hash = params.contribute(&pubkey, &privkey); -//! -//! // Throw away the private key! -//! drop(privkey); +//! // Contribute randomness to the parameters. Remember this hash, +//! // it's how we know our contribution is in the parameters! +//! let hash = params.contribute(rng); //! ``` //! -//! These parameters are now secure to use, so long as you destroyed -//! the privkey. That may not be convincing to others, so let them +//! These parameters are now secure to use, so long as you weren't +//! malicious. That may not be convincing to others, so let them //! contribute randomness too! `params` can be serialized and sent //! elsewhere, where they can do the same thing and send new -//! parameters back to you. Only one person needs to destroy the -//! `privkey` for the final parameters to be secure. +//! parameters back to you. Only one person needs to be honest for +//! the final parameters to be secure. //! //! Once you're done setting up the parameters, you can verify the //! parameters: @@ -699,9 +693,8 @@ impl MPCParameters { } /// Contributes some randomness to the parameters. Only one - /// contributor needs to destroy their `PrivateKey` to keep - /// the parameters secure. See `keypair()` for creating - /// keypairs. + /// contributor needs to be honest for the parameters to be + /// secure. /// /// This function returns a "hash" that is bound to the /// contribution. Contributors can use this hash to make @@ -938,7 +931,9 @@ impl MPCParameters { Ok(()) } - /// Deserialize these parameters. + /// Deserialize these parameters. If `checked` is false, + /// we won't perform curve validity and group order + /// checks. pub fn read( mut reader: R, checked: bool From 23887cd35fe9345dae07af595041cc39bb55725b Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sun, 8 Apr 2018 14:16:31 -0600 Subject: [PATCH 12/18] Version bump --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 8cbfa95..88b66cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "phase2" -version = "0.1.0" +version = "0.2.0" authors = ["Sean Bowe "] description = "zk-SNARK MPC" documentation = "https://github.com/ebfull/phase2" From 3014f3c8bc12d3577e6d3daf74c0df588ea8c673 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sun, 8 Apr 2018 15:02:19 -0600 Subject: [PATCH 13/18] Switch to simpler BLAKE2b implementation. --- Cargo.toml | 4 +--- src/lib.rs | 18 ++++++++---------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 88b66cb..9540956 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,8 +13,6 @@ pairing = "0.14" rand = "0.4" bellman = "0.1" byteorder = "1" -blake2 = "0.6.1" num_cpus = "1" crossbeam = "0.3" -generic-array = "0.8.3" -typenum = "1.9.0" +blake2-rfc = "0.2" diff --git a/src/lib.rs b/src/lib.rs index aae5552..a9c7a52 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -199,15 +199,11 @@ extern crate pairing; extern crate bellman; extern crate rand; extern crate byteorder; -extern crate blake2; +extern crate blake2_rfc; extern crate num_cpus; extern crate crossbeam; -extern crate generic_array; -extern crate typenum; -use blake2::{Blake2b, Digest}; -use generic_array::GenericArray; -use typenum::consts::U64; +use blake2_rfc::blake2b::Blake2b; use byteorder::{ BigEndian, @@ -1344,13 +1340,15 @@ impl HashWriter { pub fn new(writer: W) -> Self { HashWriter { writer: writer, - hasher: Blake2b::default() + hasher: Blake2b::new(64) } } /// Destroy this writer and return the hash of what was written. - pub fn into_hash(self) -> GenericArray { - self.hasher.result() + pub fn into_hash(self) -> [u8; 64] { + let mut tmp = [0u8; 64]; + tmp.copy_from_slice(self.hasher.finalize().as_ref()); + tmp } } @@ -1359,7 +1357,7 @@ impl Write for HashWriter { let bytes = self.writer.write(buf)?; if bytes > 0 { - self.hasher.input(&buf[0..bytes]); + self.hasher.update(&buf[0..bytes]); } Ok(bytes) From 00b83471ab1905d57a4a151bd1d8658a827c3395 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sun, 8 Apr 2018 15:16:11 -0600 Subject: [PATCH 14/18] Bump version again --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 9540956..693e4a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "phase2" -version = "0.2.0" +version = "0.2.1" authors = ["Sean Bowe "] description = "zk-SNARK MPC" documentation = "https://github.com/ebfull/phase2" From 80065497e07b9c307aefc9338ea75e6524525ff2 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Sun, 8 Apr 2018 15:17:22 -0600 Subject: [PATCH 15/18] Update documentation --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 693e4a8..f4ffdc4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,8 +2,8 @@ name = "phase2" version = "0.2.1" authors = ["Sean Bowe "] -description = "zk-SNARK MPC" -documentation = "https://github.com/ebfull/phase2" +description = "Library for performing MPCs for creating zk-SNARK public parameters" +documentation = "https://docs.rs/phase2" homepage = "https://github.com/ebfull/phase2" license = "MIT/Apache-2.0" repository = "https://github.com/ebfull/phase2" From 9c45e44efd32e567bee93ae908415102366f6e58 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Thu, 12 Apr 2018 22:27:49 -0600 Subject: [PATCH 16/18] Update link for Powers of Tau. --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index a9c7a52..e8de130 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -159,7 +159,7 @@ //! //! The first time you try this, it will try to read a file like //! `phase1radix2m2` from the current directory. You need to grab -//! that from the Powers of Tau. +//! that from the [Powers of Tau](https://lists.z.cash.foundation/pipermail/zapps-wg/2018/000362.html). //! //! These parameters are not safe to use; false proofs can be //! created for them. Let's contribute some randomness to these From 385f6222b43157de362c084f5bdabdc06b422067 Mon Sep 17 00:00:00 2001 From: Sean Bowe Date: Thu, 12 Apr 2018 22:28:02 -0600 Subject: [PATCH 17/18] Version bump --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index f4ffdc4..034af7a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "phase2" -version = "0.2.1" +version = "0.2.2" authors = ["Sean Bowe "] description = "Library for performing MPCs for creating zk-SNARK public parameters" documentation = "https://docs.rs/phase2" From 0c2b25fe8879deeb46530e91a5654d353a9bf94c Mon Sep 17 00:00:00 2001 From: Peter van Nostrand Date: Sun, 22 Apr 2018 14:49:22 -0400 Subject: [PATCH 18/18] Fixed link to pairing library in docs. --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index e8de130..e091603 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,7 +3,7 @@ //! ## Make your circuit //! //! Grab the [`bellman`](https://github.com/ebfull/bellman) and -//! [`pairing`](https://github.com/ebfull/bellman) crates. Bellman +//! [`pairing`](https://github.com/ebfull/pairing) crates. Bellman //! provides a trait called `Circuit`, which you must implement //! for your computation. //!