Add 'powersoftau/' from commit '34b4d09b4ccc459d757a77c94b391a1f64f66f76'

git-subtree-dir: powersoftau
git-subtree-mainline: 37bdc022d6ae906eb583746fea8784edc6bd7c6b
git-subtree-split: 34b4d09b4ccc459d757a77c94b391a1f64f66f76
This commit is contained in:
Kobi Gurkan 2019-09-02 15:50:38 +03:00
commit 5d6690f9b8
23 changed files with 5085 additions and 0 deletions

6
powersoftau/.gitignore vendored Normal file

@ -0,0 +1,6 @@
/target/
**/*.rs.bk
transcript*
challenge*
response*
new_challenge*

14
powersoftau/COPYRIGHT Normal file

@ -0,0 +1,14 @@
Copyrights in the "powersoftau" project are retained by their contributors. No
copyright assignment is required to contribute to the "powersoftau" project.
The "powersoftau" project is licensed under either of
* Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

603
powersoftau/Cargo.lock generated Normal file

@ -0,0 +1,603 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "arrayvec"
version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bellman"
version = "0.2.0"
source = "git+https://github.com/matterinc/bellman?tag=0.2.0#6e45a4b233e97a71f4a8a0565c8f8d753c04c08f"
dependencies = [
"bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"blake2-rfc 0.2.18 (git+https://github.com/gtank/blake2-rfc?rev=7a5b5fc99ae483a0043db7547fb79a6fa44b88a9)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"pairing 0.16.2 (git+https://github.com/matterinc/pairing?tag=0.16.2)",
"rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bit-vec"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "blake2"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"crypto-mac 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"digest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "blake2-rfc"
version = "0.2.18"
source = "git+https://github.com/gtank/blake2-rfc?rev=7a5b5fc99ae483a0043db7547fb79a6fa44b88a9#7a5b5fc99ae483a0043db7547fb79a6fa44b88a9"
dependencies = [
"arrayvec 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "byte-tools"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "byteorder"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cfg-if"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "constant_time_eq"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "crossbeam"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "crossbeam"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-epoch 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-channel"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-deque"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"crossbeam-epoch 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-epoch"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"arrayvec 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-queue"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-utils"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crypto-mac"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "digest"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "either"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "ff"
version = "0.5.0"
source = "git+https://github.com/matterinc/ff?tag=0.5#056a13b95f4b971a9ae2c6fbb5fbc9f1e4f4828e"
dependencies = [
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"ff_derive 0.4.0 (git+https://github.com/matterinc/ff?tag=0.5)",
"rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ff_derive"
version = "0.4.0"
source = "git+https://github.com/matterinc/ff?tag=0.5#056a13b95f4b971a9ae2c6fbb5fbc9f1e4f4828e"
dependencies = [
"num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fuchsia-cprng"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "futures"
version = "0.1.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "futures-cpupool"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "gcc"
version = "0.3.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "generic-array"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
"typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hex"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "hex-literal"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro-hack 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hex-literal-impl"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro-hack 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "itertools"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"either 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "itoa"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libc"
version = "0.2.49"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "memmap"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "memoffset"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "nodrop"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "num-bigint"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-integer"
version = "0.1.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-traits"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "num_cpus"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "pairing"
version = "0.16.2"
source = "git+https://github.com/matterinc/pairing?tag=0.16.2#c2af46cac3e6ebc8e1e1f37bb993e5e6c7f689d1"
dependencies = [
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"ff 0.5.0 (git+https://github.com/matterinc/ff?tag=0.5)",
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "powersoftau"
version = "0.2.0"
dependencies = [
"bellman 0.2.0 (git+https://github.com/matterinc/bellman?tag=0.2.0)",
"blake2 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "proc-macro-hack"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro-hack-impl 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "proc-macro-hack-impl"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "proc-macro2"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "quote"
version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand"
version = "0.3.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_core"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_core"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rdrand"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "redox_syscall"
version = "0.1.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rust-crypto"
version = "0.2.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc-serialize"
version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "ryu"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "scopeguard"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde"
version = "1.0.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde_derive"
version = "1.0.88"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
"syn 0.15.26 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "serde_json"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "smallvec"
version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "syn"
version = "0.14.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "syn"
version = "0.15.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
"quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "time"
version = "0.1.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "typenum"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unicode-xid"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum arrayvec 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "92c7fb76bc8826a8b33b4ee5bb07a247a81e76764ab4d55e8f73e3a4d8808c71"
"checksum bellman 0.2.0 (git+https://github.com/matterinc/bellman?tag=0.2.0)" = "<none>"
"checksum bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f"
"checksum blake2 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "53bf612c0f2839b7e764ebac65d6cb985f7c6812de399d0728038f4b1da141bc"
"checksum blake2-rfc 0.2.18 (git+https://github.com/gtank/blake2-rfc?rev=7a5b5fc99ae483a0043db7547fb79a6fa44b88a9)" = "<none>"
"checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40"
"checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb"
"checksum cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11d43355396e872eefb45ce6342e4374ed7bc2b3a502d1b28e36d6e23c05d1f4"
"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e"
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
"checksum crossbeam 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b14492071ca110999a20bf90e3833406d5d66bfd93b4e52ec9539025ff43fe0d"
"checksum crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0f0ed1a4de2235cabda8558ff5840bffb97fcb64c97827f354a451307df5f72b"
"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71"
"checksum crossbeam-epoch 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "04c9e3102cc2d69cd681412141b390abd55a362afc1540965dad0ad4d34280b4"
"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
"checksum crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f8306fcef4a7b563b76b7dd949ca48f52bc1141aa067d2ea09565f3e2652aa5c"
"checksum crypto-mac 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "779015233ac67d65098614aec748ac1c756ab6677fa2e14cf8b37c08dfed1198"
"checksum digest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e5b29bf156f3f4b3c4f610a25ff69370616ae6e0657d416de22645483e72af0a"
"checksum either 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c67353c641dc847124ea1902d69bd753dee9bb3beff9aa3662ecf86c971d1fac"
"checksum ff 0.5.0 (git+https://github.com/matterinc/ff?tag=0.5)" = "<none>"
"checksum ff_derive 0.4.0 (git+https://github.com/matterinc/ff?tag=0.5)" = "<none>"
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
"checksum futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)" = "49e7653e374fe0d0c12de4250f0bdb60680b8c80eed558c5c7538eec9c89e21b"
"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4"
"checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
"checksum generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fceb69994e330afed50c93524be68c42fa898c2d9fd4ee8da03bd7363acd26f2"
"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
"checksum hex-literal 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "27455ce8b4a6666c87220e4b59c9a83995476bdadc10197905e61dbe906e36fa"
"checksum hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d340b6514f232f6db1bd16db65302a5278a04fef9ce867cb932e7e5fa21130a"
"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358"
"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
"checksum libc 0.2.49 (registry+https://github.com/rust-lang/crates.io-index)" = "413f3dfc802c5dc91dc570b05125b6cda9855edfaa9825c9849807876376e70e"
"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945"
"checksum num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "57450397855d951f1a41305e54851b1a7b8f5d2e349543a02a2effe25459f718"
"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea"
"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
"checksum num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1a23f0ed30a54abaa0c7e83b1d2d87ada7c3c23078d1d87815af3e3b6385fbba"
"checksum pairing 0.16.2 (git+https://github.com/matterinc/pairing?tag=0.16.2)" = "<none>"
"checksum proc-macro-hack 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2c725b36c99df7af7bf9324e9c999b9e37d92c8f8caf106d82e1d7953218d2d8"
"checksum proc-macro-hack-impl 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2b753ad9ed99dd8efeaa7d2fb8453c8f6bc3e54b97966d35f1bc77ca6865254a"
"checksum proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)" = "4d317f9caece796be1980837fd5cb3dfec5613ebdb04ad0956deea83ce168915"
"checksum quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "cdd8e04bd9c52e0342b406469d494fcb033be4bdbe5c606016defbb1681411e1"
"checksum rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
"checksum rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0e7a549d590831370895ab7ba4ea0c1b6b011d106b5ff2da6eee112615e6dc0"
"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
"checksum redox_syscall 0.1.51 (registry+https://github.com/rust-lang/crates.io-index)" = "423e376fffca3dfa06c9e9790a9ccd282fafb3cc6e6397d01dbf64f9bacc6b85"
"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
"checksum ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "eb9e9b8cde282a9fe6a42dd4681319bfb63f121b8a8ee9439c6f4107e58a46f7"
"checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27"
"checksum serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)" = "9f301d728f2b94c9a7691c90f07b0b4e8a4517181d9461be94c04bddeb4bd850"
"checksum serde_derive 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)" = "beed18e6f5175aef3ba670e57c60ef3b1b74d250d962a26604bff4c80e970dd4"
"checksum serde_json 1.0.38 (registry+https://github.com/rust-lang/crates.io-index)" = "27dce848e7467aa0e2fcaf0a413641499c0b745452aaca1194d24dedde9e13c9"
"checksum smallvec 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c4488ae950c49d403731982257768f48fada354a5203fe81f9bb6f43ca9002be"
"checksum syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)" = "261ae9ecaa397c42b960649561949d69311f08eeaea86a65696e6e46517cf741"
"checksum syn 0.15.26 (registry+https://github.com/rust-lang/crates.io-index)" = "f92e629aa1d9c827b2bb8297046c1ccffc57c99b947a680d3ccff1f136a3bee9"
"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f"
"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169"
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
"checksum winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "92c1eb33641e276cfa214a0522acad57be5c56b10cb348b3c5117db75f3ac4b0"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

30
powersoftau/Cargo.toml Normal file

@ -0,0 +1,30 @@
[package]
name = "powersoftau"
version = "0.2.0"
authors = ["Sean Bowe", "Alex Vlasov"]
license = "MIT/Apache-2.0"
edition = "2018"
description = "Communal zk-SNARK MPC for Public Parameters"
documentation = "https://docs.rs/powersoftau/"
homepage = "https://github.com/matter-labs/powersoftau"
repository = "https://github.com/matter-labs/powersoftau"
[dependencies]
rand = "0.4"
crossbeam = "0.3.0"
num_cpus = "1.7.0"
blake2 = "0.6.1"
generic-array = "0.8.3"
typenum = "1.9.0"
byteorder = "1.1.0"
hex-literal = "0.1"
rust-crypto = "0.2"
memmap = "0.7.0"
itertools = "0.8.0"
bellman = { git = 'https://github.com/matterinc/bellman', tag = "0.2.0"}
[features]

201
powersoftau/LICENSE-APACHE Normal file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
powersoftau/LICENSE-MIT Normal file

@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

85
powersoftau/README.md Normal file

@ -0,0 +1,85 @@
# Powers of Tau
## Original story
This is a [multi-party computation](https://en.wikipedia.org/wiki/Secure_multi-party_computation) (MPC) ceremony which constructs partial zk-SNARK parameters for _all_ circuits up to a depth of 2<sup>21</sup>. It works by taking a step that is performed by all zk-SNARK MPCs and performing it in just one single ceremony. This makes individual zk-SNARK MPCs much cheaper and allows them to scale to practically unbounded numbers of participants.
This protocol is described in a [forthcoming paper](https://eprint.iacr.org/2017/1050). It produces parameters for an adaptation of [Jens Groth's 2016 pairing-based proving system](https://eprint.iacr.org/2016/260) using the [BLS12-381](https://github.com/ebfull/pairing/tree/master/src/bls12_381) elliptic curve construction. The security proof relies on a randomness beacon being applied at the end of the ceremony.
## Contributions
Extended to support Ethereum's BN256 curve and made it easier to change size of the ceremony. In addition proof generation process can be done in memory constrained environments now. Benchmark is around `1.3 Gb` of memory and `3 hours` for a `2^26` power of tau on BN256 curve on my personal laptop
## Instructions
Instructions for a planned ceremony will be posted when everything is tested and finalized.
---
## To run the ceremony on your laptop:
1. Preparation:
```
rustup update # tested on rustup 1.17.0
cargo build
```
2. Put `response` file from the previous ceremony to root directory.
3. To generate `new_challenge` run:
```
cargo run --release --bin verify_transform_constrained # this will generate new_challenge from response file
```
4. Backup old files and replace `challenge` file:
```
mv challenge challenge_old
mv response response_old
mv new_challenge challenge
```
5. Run ceremony:
```
cargo run --release --bin compute_constrained # generate response file
```
Put your hash from output response to private gist (example: https://gist.github.com/skywinder/c35ab03c66c6b200b33ea2f388a6df89)
6. Reboot laptop to clean up toxic waste.
7. Save `response` file and give it to the next participant.
## Recommendations from original ceremony
Participants of the ceremony sample some randomness, perform a computation, and then destroy the randomness. **Only one participant needs to do this successfully to ensure the final parameters are secure.** In order to see that this randomness is truly destroyed, participants may take various kinds of precautions:
* putting the machine in a Faraday cage
* destroying the machine afterwards
* running the software on secure hardware
* not connecting the hardware to any networks
* using multiple machines and randomly picking the result of one of them to use
* using different code than what we have provided
* using a secure operating system
* using an operating system that nobody would expect you to use (Rust can compile to Mac OS X and Windows)
* using an unusual Rust toolchain or [alternate rust compiler](https://github.com/thepowersgang/mrustc)
* lots of other ideas we can't think of
It is totally up to the participants. In general, participants should beware of side-channel attacks and assume that remnants of the randomness will be in RAM after the computation has finished.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

@ -0,0 +1,464 @@
//! This ceremony constructs the "powers of tau" for Jens Groth's 2016 zk-SNARK proving
//! system using the BLS12-381 pairing-friendly elliptic curve construction.
//!
//! # Overview
//!
//! Participants of the ceremony receive a "challenge" file containing:
//!
//! * the BLAKE2b hash of the last file entered into the transcript
//! * an `Accumulator` (with curve points encoded in uncompressed form for fast deserialization)
//!
//! The participant runs a tool which generates a random keypair (`PublicKey`, `PrivateKey`)
//! used for modifying the `Accumulator` from the "challenge" file. The keypair is then used to
//! transform the `Accumulator`, and a "response" file is generated containing:
//!
//! * the BLAKE2b hash of the "challenge" file (thus forming a hash chain over the entire transcript)
//! * an `Accumulator` (with curve points encoded in compressed form for fast uploading)
//! * the `PublicKey`
//!
//! This "challenge" file is entered into the protocol transcript. A given transcript is valid
//! if the transformations between consecutive `Accumulator`s verify with their respective
//! `PublicKey`s. Participants (and the public) can ensure that their contribution to the
//! `Accumulator` was accepted by ensuring the transcript contains their "response" file, ideally
//! by comparison of the BLAKE2b hash of the "response" file.
//!
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
//! public parameters for all circuits within a bounded size.
extern crate rand;
extern crate crossbeam;
extern crate num_cpus;
extern crate blake2;
extern crate generic_array;
extern crate typenum;
extern crate byteorder;
extern crate bellman;
extern crate memmap;
use memmap::{Mmap, MmapMut};
use bellman::pairing::ff::{Field, PrimeField};
use byteorder::{ReadBytesExt, BigEndian};
use rand::{SeedableRng, Rng, Rand};
use rand::chacha::ChaChaRng;
use bellman::pairing::bn256::{Bn256};
use bellman::pairing::*;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use generic_array::GenericArray;
use typenum::consts::U64;
use blake2::{Blake2b, Digest};
use std::fmt;
use super::keypair::*;
use super::utils::*;
use super::parameters::*;
/// The `Accumulator` is an object that participants of the ceremony contribute
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
/// fixed generators, and additionally in G1 over two other generators of exponents
/// `alpha` and `beta` over those fixed generators. In other words:
///
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
#[derive(Eq, Clone)]
pub struct Accumulator<E: Engine, P: PowersOfTauParameters> {
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
pub tau_powers_g1: Vec<E::G1Affine>,
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
pub tau_powers_g2: Vec<E::G2Affine>,
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
pub alpha_tau_powers_g1: Vec<E::G1Affine>,
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
pub beta_tau_powers_g1: Vec<E::G1Affine>,
/// beta
pub beta_g2: E::G2Affine,
/// Keep parameters here
pub parameters: P
}
impl<E: Engine, P: PowersOfTauParameters> PartialEq for Accumulator<E, P> {
fn eq(&self, other: &Accumulator<E, P>) -> bool {
self.tau_powers_g1.eq(&other.tau_powers_g1) &&
self.tau_powers_g2.eq(&other.tau_powers_g2) &&
self.alpha_tau_powers_g1.eq(&other.alpha_tau_powers_g1) &&
self.beta_tau_powers_g1.eq(&other.beta_tau_powers_g1) &&
self.beta_g2 == other.beta_g2
}
}
impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
pub fn new(parameters: P) -> Self {
Accumulator {
tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_G1_LENGTH],
tau_powers_g2: vec![E::G2Affine::one(); P::TAU_POWERS_LENGTH],
alpha_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
beta_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
beta_g2: E::G2Affine::one(),
parameters: parameters
}
}
/// Write the accumulator with some compression behavior.
pub fn serialize<W: Write>(
&self,
writer: &mut W,
compression: UseCompression
) -> io::Result<()>
{
fn write_all<W: Write, C: CurveAffine>(
writer: &mut W,
c: &[C],
compression: UseCompression
) -> io::Result<()>
{
for c in c {
write_point(writer, c, compression)?;
}
Ok(())
}
write_all(writer, &self.tau_powers_g1, compression)?;
write_all(writer, &self.tau_powers_g2, compression)?;
write_all(writer, &self.alpha_tau_powers_g1, compression)?;
write_all(writer, &self.beta_tau_powers_g1, compression)?;
write_all(writer, &[self.beta_g2], compression)?;
Ok(())
}
/// Read the accumulator from disk with some compression behavior. `checked`
/// indicates whether we should check it's a valid element of the group and
/// not the point at infinity.
pub fn deserialize<R: Read>(
reader: &mut R,
compression: UseCompression,
checked: CheckForCorrectness,
parameters: P
) -> Result<Self, DeserializationError>
{
fn read_all<EE: Engine, R: Read, C: CurveAffine<Engine = EE, Scalar = EE::Fr> > (
reader: &mut R,
size: usize,
compression: UseCompression,
checked: CheckForCorrectness
) -> Result<Vec<C>, DeserializationError>
{
fn decompress_all<R: Read, ENC: EncodedPoint>(
reader: &mut R,
size: usize,
checked: CheckForCorrectness
) -> Result<Vec<ENC::Affine>, DeserializationError>
{
// Read the encoded elements
let mut res = vec![ENC::empty(); size];
for encoded in &mut res {
reader.read_exact(encoded.as_mut())?;
}
// Allocate space for the deserialized elements
let mut res_affine = vec![ENC::Affine::zero(); size];
let mut chunk_size = res.len() / num_cpus::get();
if chunk_size == 0 {
chunk_size = 1;
}
// If any of our threads encounter a deserialization/IO error, catch
// it with this.
let decoding_error = Arc::new(Mutex::new(None));
crossbeam::scope(|scope| {
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
let decoding_error = decoding_error.clone();
scope.spawn(move || {
for (source, target) in source.iter().zip(target.iter_mut()) {
match {
// If we're a participant, we don't need to check all of the
// elements in the accumulator, which saves a lot of time.
// The hash chain prevents this from being a problem: the
// transcript guarantees that the accumulator was properly
// formed.
match checked {
CheckForCorrectness::Yes => {
// Points at infinity are never expected in the accumulator
source.into_affine().map_err(|e| e.into()).and_then(|source| {
if source.is_zero() {
Err(DeserializationError::PointAtInfinity)
} else {
Ok(source)
}
})
},
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
}
}
{
Ok(source) => {
*target = source;
},
Err(e) => {
*decoding_error.lock().unwrap() = Some(e);
}
}
}
});
}
});
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
Some(e) => {
Err(e)
},
None => {
Ok(res_affine)
}
}
}
match compression {
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked)
}
}
let tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_G1_LENGTH, compression, checked)?;
let tau_powers_g2 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
let alpha_tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
let beta_tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
let beta_g2 = read_all::<E, _, _>(reader, 1, compression, checked)?[0];
Ok(Accumulator {
tau_powers_g1: tau_powers_g1,
tau_powers_g2: tau_powers_g2,
alpha_tau_powers_g1: alpha_tau_powers_g1,
beta_tau_powers_g1: beta_tau_powers_g1,
beta_g2: beta_g2,
parameters: parameters
})
}
/// Transforms the accumulator with a private key.
pub fn transform(&mut self, key: &PrivateKey<E>)
{
// Construct the powers of tau
let mut taupowers = vec![E::Fr::zero(); P::TAU_POWERS_G1_LENGTH];
let chunk_size = P::TAU_POWERS_G1_LENGTH / num_cpus::get();
// Construct exponents in parallel
crossbeam::scope(|scope| {
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
scope.spawn(move || {
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
for t in taupowers {
*t = acc;
acc.mul_assign(&key.tau);
}
});
}
});
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
/// exponent.
fn batch_exp<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr> >(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
assert_eq!(bases.len(), exp.len());
let mut projective = vec![C::Projective::zero(); bases.len()];
let chunk_size = bases.len() / num_cpus::get();
// Perform wNAF over multiple cores, placing results into `projective`.
crossbeam::scope(|scope| {
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
.zip(exp.chunks(chunk_size))
.zip(projective.chunks_mut(chunk_size))
{
scope.spawn(move || {
let mut wnaf = Wnaf::new();
for ((base, exp), projective) in bases.iter_mut()
.zip(exp.iter())
.zip(projective.iter_mut())
{
let mut exp = *exp;
if let Some(coeff) = coeff {
exp.mul_assign(coeff);
}
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
}
});
}
});
// Perform batch normalization
crossbeam::scope(|scope| {
for projective in projective.chunks_mut(chunk_size)
{
scope.spawn(move || {
C::Projective::batch_normalization(projective);
});
}
});
// Turn it all back into affine points
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
*affine = projective.into_affine();
}
}
batch_exp::<E, _>(&mut self.tau_powers_g1, &taupowers[0..], None);
batch_exp::<E, _>(&mut self.tau_powers_g2, &taupowers[0..P::TAU_POWERS_LENGTH], None);
batch_exp::<E, _>(&mut self.alpha_tau_powers_g1, &taupowers[0..P::TAU_POWERS_LENGTH], Some(&key.alpha));
batch_exp::<E, _>(&mut self.beta_tau_powers_g1, &taupowers[0..P::TAU_POWERS_LENGTH], Some(&key.beta));
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
}
}
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(before: &Accumulator<E, P>, after: &Accumulator<E, P>, key: &PublicKey<E>, digest: &[u8]) -> bool
{
assert_eq!(digest.len(), 64);
let compute_g2_s = |g1_s: E::G1Affine, g1_s_x: E::G1Affine, personalization: u8| {
let mut h = Blake2b::default();
h.input(&[personalization]);
h.input(digest);
h.input(g1_s.into_uncompressed().as_ref());
h.input(g1_s_x.into_uncompressed().as_ref());
hash_to_g2::<E>(h.result().as_ref()).into_affine()
};
let tau_g2_s = compute_g2_s(key.tau_g1.0, key.tau_g1.1, 0);
let alpha_g2_s = compute_g2_s(key.alpha_g1.0, key.alpha_g1.1, 1);
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
// Check the proofs-of-knowledge for tau/alpha/beta
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
return false;
}
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
return false;
}
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
return false;
}
// Check the correctness of the generators for tau powers
if after.tau_powers_g1[0] != E::G1Affine::one() {
return false;
}
if after.tau_powers_g2[0] != E::G2Affine::one() {
return false;
}
// Did the participant multiply the previous tau by the new one?
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
return false;
}
// Did the participant multiply the previous alpha by the new one?
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
return false;
}
// Did the participant multiply the previous beta by the new one?
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
return false;
}
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
return false;
}
// Are the powers of tau correct?
if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
return false;
}
if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) {
return false;
}
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
return false;
}
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
return false;
}
true
}
/// Abstraction over a reader which hashes the data being read.
pub struct HashReader<R: Read> {
reader: R,
hasher: Blake2b
}
impl<R: Read> HashReader<R> {
/// Construct a new `HashReader` given an existing `reader` by value.
pub fn new(reader: R) -> Self {
HashReader {
reader: reader,
hasher: Blake2b::default()
}
}
/// Destroy this reader and return the hash of what was read.
pub fn into_hash(self) -> GenericArray<u8, U64> {
self.hasher.result()
}
}
impl<R: Read> Read for HashReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let bytes = self.reader.read(buf)?;
if bytes > 0 {
self.hasher.input(&buf[0..bytes]);
}
Ok(bytes)
}
}
/// Abstraction over a writer which hashes the data being written.
pub struct HashWriter<W: Write> {
writer: W,
hasher: Blake2b
}
impl<W: Write> HashWriter<W> {
/// Construct a new `HashWriter` given an existing `writer` by value.
pub fn new(writer: W) -> Self {
HashWriter {
writer: writer,
hasher: Blake2b::default()
}
}
/// Destroy this writer and return the hash of what was written.
pub fn into_hash(self) -> GenericArray<u8, U64> {
self.hasher.result()
}
}
impl<W: Write> Write for HashWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let bytes = self.writer.write(buf)?;
if bytes > 0 {
self.hasher.input(&buf[0..bytes]);
}
Ok(bytes)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}

@ -0,0 +1,843 @@
/// Memory constrained accumulator that checks parts of the initial information in parts that fit to memory
/// and then contributes to entropy in parts as well
extern crate rand;
extern crate crossbeam;
extern crate num_cpus;
extern crate blake2;
extern crate generic_array;
extern crate typenum;
extern crate byteorder;
extern crate bellman;
extern crate memmap;
extern crate itertools;
use itertools::Itertools;
use memmap::{Mmap, MmapMut};
use bellman::pairing::ff::{Field, PrimeField};
use byteorder::{ReadBytesExt, BigEndian};
use rand::{SeedableRng, Rng, Rand};
use rand::chacha::ChaChaRng;
use bellman::pairing::bn256::{Bn256};
use bellman::pairing::*;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use generic_array::GenericArray;
use typenum::consts::U64;
use blake2::{Blake2b, Digest};
use std::fmt;
use super::keypair::*;
use super::utils::*;
use super::parameters::*;
pub enum AccumulatorState{
Empty,
NonEmpty,
Transformed,
}
/// The `Accumulator` is an object that participants of the ceremony contribute
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
/// fixed generators, and additionally in G1 over two other generators of exponents
/// `alpha` and `beta` over those fixed generators. In other words:
///
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
pub struct BachedAccumulator<E: Engine, P: PowersOfTauParameters> {
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
pub tau_powers_g1: Vec<E::G1Affine>,
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
pub tau_powers_g2: Vec<E::G2Affine>,
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
pub alpha_tau_powers_g1: Vec<E::G1Affine>,
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
pub beta_tau_powers_g1: Vec<E::G1Affine>,
/// beta
pub beta_g2: E::G2Affine,
/// Hash chain hash
pub hash: GenericArray<u8, U64>,
/// Keep parameters here as a marker
marker: std::marker::PhantomData<P>,
}
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
/// Calcualte the contibution hash from the resulting file. Original powers of tau implementaiton
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
/// implementation now writes without a particular order, so plain recalculation at the end
/// of the procedure is more efficient
pub fn calculate_hash(
input_map: &Mmap
) -> GenericArray<u8, U64> {
let chunk_size = 1 << 30; // read by 1GB from map
let mut hasher = Blake2b::default();
for chunk in input_map.chunks(chunk_size) {
hasher.input(&chunk);
}
hasher.result()
}
}
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
pub fn empty() -> Self {
Self {
tau_powers_g1: vec![],
tau_powers_g2: vec![],
alpha_tau_powers_g1: vec![],
beta_tau_powers_g1: vec![],
beta_g2: E::G2Affine::zero(),
hash: blank_hash(),
marker: std::marker::PhantomData::<P>{}
}
}
}
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
fn g1_size(compression: UseCompression) -> usize {
match compression {
UseCompression::Yes => {
return P::G1_COMPRESSED_BYTE_SIZE;
},
UseCompression::No => {
return P::G1_UNCOMPRESSED_BYTE_SIZE;
}
}
}
fn g2_size(compression: UseCompression) -> usize {
match compression {
UseCompression::Yes => {
return P::G2_COMPRESSED_BYTE_SIZE;
},
UseCompression::No => {
return P::G2_UNCOMPRESSED_BYTE_SIZE;
}
}
}
fn get_size(element_type: ElementType, compression: UseCompression) -> usize {
let size = match element_type {
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => { Self::g1_size(compression) },
ElementType::BetaG2 | ElementType::TauG2 => { Self::g2_size(compression) }
};
size
}
/// File expected structure
/// HASH_SIZE bytes for the hash of the contribution
/// TAU_POWERS_G1_LENGTH of G1 points
/// TAU_POWERS_LENGTH of G2 points
/// TAU_POWERS_LENGTH of G1 points for alpha
/// TAU_POWERS_LENGTH of G1 points for beta
/// One G2 point for beta
/// Public key appended to the end of file, but it's irrelevant for an accumulator itself
fn calculate_mmap_position(index: usize, element_type: ElementType, compression: UseCompression) -> usize {
let g1_size = Self::g1_size(compression);
let g2_size = Self::g2_size(compression);
let required_tau_g1_power = P::TAU_POWERS_G1_LENGTH;
let required_power = P::TAU_POWERS_LENGTH;
let position = match element_type {
ElementType::TauG1 => {
let mut position = 0;
position += g1_size * index;
assert!(index < P::TAU_POWERS_G1_LENGTH, format!("Index of TauG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_G1_LENGTH, index));
position
},
ElementType::TauG2 => {
let mut position = 0;
position += g1_size * required_tau_g1_power;
assert!(index < P::TAU_POWERS_LENGTH, format!("Index of TauG2 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index));
position += g2_size * index;
position
},
ElementType::AlphaG1 => {
let mut position = 0;
position += g1_size * required_tau_g1_power;
position += g2_size * required_power;
assert!(index < P::TAU_POWERS_LENGTH, format!("Index of AlphaG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index));
position += g1_size * index;
position
},
ElementType::BetaG1 => {
let mut position = 0;
position += g1_size * required_tau_g1_power;
position += g2_size * required_power;
position += g1_size * required_power;
assert!(index < P::TAU_POWERS_LENGTH, format!("Index of BetaG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index));
position += g1_size * index;
position
},
ElementType::BetaG2 => {
let mut position = 0;
position += g1_size * required_tau_g1_power;
position += g2_size * required_power;
position += g1_size * required_power;
position += g1_size * required_power;
position
}
};
position + P::HASH_SIZE
}
}
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
pub fn verify_transformation(
input_map: &Mmap,
output_map: &Mmap,
key: &PublicKey<E>,
digest: &[u8],
input_is_compressed: UseCompression,
output_is_compressed: UseCompression,
check_input_for_correctness: CheckForCorrectness,
check_output_for_correctness: CheckForCorrectness,
) -> bool
{
use itertools::MinMaxResult::{MinMax};
assert_eq!(digest.len(), 64);
let tau_g2_s = compute_g2_s::<E>(&digest, &key.tau_g1.0, &key.tau_g1.1, 0);
let alpha_g2_s = compute_g2_s::<E>(&digest, &key.alpha_g1.0, &key.alpha_g1.1, 1);
let beta_g2_s = compute_g2_s::<E>(&digest, &key.beta_g1.0, &key.beta_g1.1, 2);
// Check the proofs-of-knowledge for tau/alpha/beta
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
println!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)");
return false;
}
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
println!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)");
return false;
}
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
println!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)");
return false;
}
// Load accumulators AND perform computations
let mut before = Self::empty();
let mut after = Self::empty();
// these checks only touch a part of the accumulator, so read two elements
{
let chunk_size = 2;
before.read_chunk(0, chunk_size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk from `challenge`");
after.read_chunk(0, chunk_size, output_is_compressed, check_output_for_correctness, &output_map).expect("must read a first chunk from `response`");
// Check the correctness of the generators for tau powers
if after.tau_powers_g1[0] != E::G1Affine::one() {
println!("tau_powers_g1[0] != 1");
return false;
}
if after.tau_powers_g2[0] != E::G2Affine::one() {
println!("tau_powers_g2[0] != 1");
return false;
}
// Did the participant multiply the previous tau by the new one?
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
println!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)");
return false;
}
// Did the participant multiply the previous alpha by the new one?
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
println!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)");
return false;
}
// Did the participant multiply the previous beta by the new one?
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)");
return false;
}
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)");
return false;
}
}
let tau_powers_g2_0 = after.tau_powers_g2[0].clone();
let tau_powers_g2_1 = after.tau_powers_g2[1].clone();
let tau_powers_g1_0 = after.tau_powers_g1[0].clone();
let tau_powers_g1_1 = after.tau_powers_g1[1].clone();
// Read by parts and just verify same ratios. Cause of two fixed variables above with tau_powers_g2_1 = tau_powers_g2_0 ^ s
// one does not need to care about some overlapping
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
before.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from `challenge`", start, end));
after.read_chunk(start, size, output_is_compressed, check_output_for_correctness, &output_map).expect(&format!("must read a chunk from {} to {} from `response`", start, end));
// Are the powers of tau correct?
if !same_ratio(power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) {
println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
if !same_ratio(power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)) {
println!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)");
return false;
}
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) {
println!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) {
println!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
return false;
}
println!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
before.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from `challenge`", start, end));
after.read_chunk(start, size, output_is_compressed, check_output_for_correctness, &output_map).expect(&format!("must read a chunk from {} to {} from `response`", start, end));
assert_eq!(before.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty");
assert_eq!(after.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty");
// Are the powers of tau correct?
if !same_ratio(power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) {
println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution");
return false;
}
println!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
true
}
pub fn decompress(
input_map: &Mmap,
output_map: &mut MmapMut,
check_input_for_correctness: CheckForCorrectness,
) -> io::Result<()>
{
use itertools::MinMaxResult::{MinMax};
let mut accumulator = Self::empty();
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator.read_chunk(start, size, UseCompression::Yes, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from source of decompression", start, end));
accumulator.write_chunk(start, UseCompression::No, output_map)?;
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator.read_chunk(start, size, UseCompression::Yes, check_input_for_correctness, &input_map).expect(&format!("must read a chunk from {} to {} from source of decompression", start, end));
assert_eq!(accumulator.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty");
assert_eq!(accumulator.alpha_tau_powers_g1.len(), 0, "during rest of tau g1 generation alpha*tau in g1 must be empty");
assert_eq!(accumulator.beta_tau_powers_g1.len(), 0, "during rest of tau g1 generation beta*tau in g1 must be empty");
accumulator.write_chunk(start, UseCompression::No, output_map)?;
} else {
panic!("Chunk does not have a min and max");
}
}
Ok(())
}
}
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
pub fn read_chunk (
&mut self,
from: usize,
size: usize,
compression: UseCompression,
checked: CheckForCorrectness,
input_map: &Mmap,
) -> Result<(), DeserializationError>
{
self.tau_powers_g1 = match compression {
UseCompression::Yes => {
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(from, size, ElementType::TauG1, compression, checked, &input_map)?
},
UseCompression::No => {
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(from, size, ElementType::TauG1, compression, checked, &input_map)?
},
};
self.tau_powers_g2 = match compression {
UseCompression::Yes => {
self.read_points_chunk::<<E::G2Affine as CurveAffine>::Compressed>(from, size, ElementType::TauG2, compression, checked, &input_map)?
},
UseCompression::No => {
self.read_points_chunk::<<E::G2Affine as CurveAffine>::Uncompressed>(from, size, ElementType::TauG2, compression, checked, &input_map)?
},
};
self.alpha_tau_powers_g1 = match compression {
UseCompression::Yes => {
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(from, size, ElementType::AlphaG1, compression, checked, &input_map)?
},
UseCompression::No => {
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(from, size, ElementType::AlphaG1, compression, checked, &input_map)?
},
};
self.beta_tau_powers_g1 = match compression {
UseCompression::Yes => {
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(from, size, ElementType::BetaG1, compression, checked, &input_map)?
},
UseCompression::No => {
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(from, size, ElementType::BetaG1, compression, checked, &input_map)?
},
};
self.beta_g2 = match compression {
UseCompression::Yes => {
let points = self.read_points_chunk::<<E::G2Affine as CurveAffine>::Compressed>(0, 1, ElementType::BetaG2, compression, checked, &input_map)?;
points[0]
},
UseCompression::No => {
let points = self.read_points_chunk::<<E::G2Affine as CurveAffine>::Uncompressed>(0, 1, ElementType::BetaG2, compression, checked, &input_map)?;
points[0]
},
};
Ok(())
}
fn read_points_chunk<ENC: EncodedPoint>(
&mut self,
from: usize,
size: usize,
element_type: ElementType,
compression: UseCompression,
checked: CheckForCorrectness,
input_map: &Mmap,
) -> Result<Vec<ENC::Affine>, DeserializationError>
{
// Read the encoded elements
let mut res = vec![ENC::empty(); size];
for (i, encoded) in res.iter_mut().enumerate() {
let index = from + i;
match element_type {
ElementType::TauG1 => {
if index >= P::TAU_POWERS_G1_LENGTH {
return Ok(vec![]);
}
},
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::BetaG2 | ElementType::TauG2 => {
if index >= P::TAU_POWERS_LENGTH {
return Ok(vec![]);
}
}
};
let position = Self::calculate_mmap_position(index, element_type, compression);
let element_size = Self::get_size(element_type, compression);
let memory_slice = input_map.get(position..position+element_size).expect("must read point data from file");
memory_slice.clone().read_exact(encoded.as_mut())?;
}
// Allocate space for the deserialized elements
let mut res_affine = vec![ENC::Affine::zero(); size];
let mut chunk_size = res.len() / num_cpus::get();
if chunk_size == 0 {
chunk_size = 1;
}
// If any of our threads encounter a deserialization/IO error, catch
// it with this.
let decoding_error = Arc::new(Mutex::new(None));
crossbeam::scope(|scope| {
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
let decoding_error = decoding_error.clone();
scope.spawn(move || {
assert_eq!(source.len(), target.len());
for (source, target) in source.iter().zip(target.iter_mut()) {
match {
// If we're a participant, we don't need to check all of the
// elements in the accumulator, which saves a lot of time.
// The hash chain prevents this from being a problem: the
// transcript guarantees that the accumulator was properly
// formed.
match checked {
CheckForCorrectness::Yes => {
// Points at infinity are never expected in the accumulator
source.into_affine().map_err(|e| e.into()).and_then(|source| {
if source.is_zero() {
Err(DeserializationError::PointAtInfinity)
} else {
Ok(source)
}
})
},
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
}
}
{
Ok(source) => {
*target = source;
},
Err(e) => {
*decoding_error.lock().unwrap() = Some(e);
}
}
}
});
}
});
// extra check that during the decompression all the the initially initialized infinitu points
// were replaced with something
for decoded in res_affine.iter() {
if decoded.is_zero() {
return Err(DeserializationError::PointAtInfinity);
}
}
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
Some(e) => {
Err(e)
},
None => {
Ok(res_affine)
}
}
}
}
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
fn write_all(
&mut self,
chunk_start: usize,
compression: UseCompression,
element_type: ElementType,
output_map: &mut MmapMut,
) -> io::Result<()>
{
match element_type {
ElementType::TauG1 => {
for (i, c) in self.tau_powers_g1.clone().iter().enumerate() {
let index = chunk_start + i;
self.write_point(index, c, compression, element_type.clone(), output_map)?;
}
},
ElementType::TauG2 => {
for (i, c) in self.tau_powers_g2.clone().iter().enumerate() {
let index = chunk_start + i;
self.write_point(index, c, compression, element_type.clone(), output_map)?;
}
},
ElementType::AlphaG1 => {
for (i, c) in self.alpha_tau_powers_g1.clone().iter().enumerate() {
let index = chunk_start + i;
self.write_point(index, c, compression, element_type.clone(), output_map)?;
}
},
ElementType::BetaG1 => {
for (i, c) in self.beta_tau_powers_g1.clone().iter().enumerate() {
let index = chunk_start + i;
self.write_point(index, c, compression, element_type.clone(), output_map)?;
}
},
ElementType::BetaG2 => {
let index = chunk_start;
self.write_point(index, &self.beta_g2.clone(), compression, element_type.clone(), output_map)?
}
};
output_map.flush()?;
Ok(())
}
fn write_point<C>(
&mut self,
index: usize,
p: &C,
compression: UseCompression,
element_type: ElementType,
output_map: &mut MmapMut,
) -> io::Result<()>
where C: CurveAffine<Engine = E, Scalar = E::Fr>
{
match element_type {
ElementType::TauG1 => {
if index >= P::TAU_POWERS_G1_LENGTH {
return Ok(());
}
},
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::BetaG2 | ElementType::TauG2 => {
if index >= P::TAU_POWERS_LENGTH {
return Ok(());
}
}
};
match compression {
UseCompression::Yes => {
let position = Self::calculate_mmap_position(index, element_type, compression);
// let size = self.get_size(element_type, compression);
(&mut output_map[position..]).write(p.into_compressed().as_ref())?;
},
UseCompression::No => {
let position = Self::calculate_mmap_position(index, element_type, compression);
// let size = self.get_size(element_type, compression);
(&mut output_map[position..]).write(p.into_uncompressed().as_ref())?;
},
};
Ok(())
}
/// Write the accumulator with some compression behavior.
pub fn write_chunk(
&mut self,
chunk_start: usize,
compression: UseCompression,
output_map: &mut MmapMut
) -> io::Result<()>
{
self.write_all(chunk_start, compression, ElementType::TauG1, output_map)?;
if chunk_start < P::TAU_POWERS_LENGTH {
self.write_all(chunk_start, compression, ElementType::TauG2, output_map)?;
self.write_all(chunk_start, compression, ElementType::AlphaG1, output_map)?;
self.write_all(chunk_start, compression, ElementType::BetaG1, output_map)?;
self.write_all(chunk_start, compression, ElementType::BetaG2, output_map)?;
}
Ok(())
}
}
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
/// Transforms the accumulator with a private key.
/// Due to large amount of data in a previous accumulator even in the compressed form
/// this function can now work on compressed input. Output can be made in any form
/// WARNING: Contributor does not have to check that values from challenge file were serialized
/// corrently, but we may want to enforce it if a ceremony coordinator does not recompress the previous
/// contribution into the new challenge file
pub fn transform(
input_map: &Mmap,
output_map: &mut MmapMut,
input_is_compressed: UseCompression,
compress_the_output: UseCompression,
check_input_for_correctness: CheckForCorrectness,
key: &PrivateKey<E>
) -> io::Result<()>
{
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
/// exponent.
fn batch_exp<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr> >(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
assert_eq!(bases.len(), exp.len());
let mut projective = vec![C::Projective::zero(); bases.len()];
let chunk_size = bases.len() / num_cpus::get();
// Perform wNAF over multiple cores, placing results into `projective`.
crossbeam::scope(|scope| {
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
.zip(exp.chunks(chunk_size))
.zip(projective.chunks_mut(chunk_size))
{
scope.spawn(move || {
let mut wnaf = Wnaf::new();
for ((base, exp), projective) in bases.iter_mut()
.zip(exp.iter())
.zip(projective.iter_mut())
{
let mut exp = *exp;
if let Some(coeff) = coeff {
exp.mul_assign(coeff);
}
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
}
});
}
});
// Perform batch normalization
crossbeam::scope(|scope| {
for projective in projective.chunks_mut(chunk_size)
{
scope.spawn(move || {
C::Projective::batch_normalization(projective);
});
}
});
// Turn it all back into affine points
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
*affine = projective.into_affine();
assert!(!affine.is_zero(), "your contribution happed to produce a point at infinity, please re-run");
}
}
let mut accumulator = Self::empty();
use itertools::MinMaxResult::{MinMax};
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk");
// Construct the powers of tau
let mut taupowers = vec![E::Fr::zero(); size];
let chunk_size = size / num_cpus::get();
// Construct exponents in parallel
crossbeam::scope(|scope| {
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
scope.spawn(move || {
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
for t in taupowers {
*t = acc;
acc.mul_assign(&key.tau);
}
});
}
});
batch_exp::<E, _>(&mut accumulator.tau_powers_g1, &taupowers[0..], None);
batch_exp::<E, _>(&mut accumulator.tau_powers_g2, &taupowers[0..], None);
batch_exp::<E, _>(&mut accumulator.alpha_tau_powers_g1, &taupowers[0..], Some(&key.alpha));
batch_exp::<E, _>(&mut accumulator.beta_tau_powers_g1, &taupowers[0..], Some(&key.beta));
accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
assert!(!accumulator.beta_g2.is_zero(), "your contribution happed to produce a point at infinity, please re-run");
accumulator.write_chunk(start, compress_the_output, output_map)?;
println!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
accumulator.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk");
assert_eq!(accumulator.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty");
// Construct the powers of tau
let mut taupowers = vec![E::Fr::zero(); size];
let chunk_size = size / num_cpus::get();
// Construct exponents in parallel
crossbeam::scope(|scope| {
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
scope.spawn(move || {
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
for t in taupowers {
*t = acc;
acc.mul_assign(&key.tau);
}
});
}
});
batch_exp::<E, _>(&mut accumulator.tau_powers_g1, &taupowers[0..], None);
accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
assert!(!accumulator.beta_g2.is_zero(), "your contribution happed to produce a point at infinity, please re-run");
accumulator.write_chunk(start, compress_the_output, output_map)?;
println!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
Ok(())
}
}
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
/// Transforms the accumulator with a private key.
pub fn generate_initial(
output_map: &mut MmapMut,
compress_the_output: UseCompression,
) -> io::Result<()>
{
use itertools::MinMaxResult::{MinMax};
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
let mut accumulator = Self {
tau_powers_g1: vec![E::G1Affine::one(); size],
tau_powers_g2: vec![E::G2Affine::one(); size],
alpha_tau_powers_g1: vec![E::G1Affine::one(); size],
beta_tau_powers_g1: vec![E::G1Affine::one(); size],
beta_g2: E::G2Affine::one(),
hash: blank_hash(),
marker: std::marker::PhantomData::<P>{}
};
accumulator.write_chunk(start, compress_the_output, output_map)?;
println!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
if let MinMax(start, end) = chunk.minmax() {
let size = end - start + 1;
let mut accumulator = Self {
tau_powers_g1: vec![E::G1Affine::one(); size],
tau_powers_g2: vec![],
alpha_tau_powers_g1: vec![],
beta_tau_powers_g1: vec![],
beta_g2: E::G2Affine::one(),
hash: blank_hash(),
marker: std::marker::PhantomData::<P>{}
};
accumulator.write_chunk(start, compress_the_output, output_map)?;
println!("Done processing {} powers of tau", end);
} else {
panic!("Chunk does not have a min and max");
}
}
Ok(())
}
}

@ -0,0 +1,190 @@
extern crate powersoftau;
extern crate bellman;
extern crate memmap;
extern crate rand;
extern crate blake2;
extern crate byteorder;
extern crate crypto;
// use powersoftau::bn256::{Bn256CeremonyParameters};
use powersoftau::small_bn256::{Bn256CeremonyParameters};
use powersoftau::batched_accumulator::{BachedAccumulator};
use powersoftau::keypair::{keypair};
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
use std::fs::OpenOptions;
use bellman::pairing::bn256::Bn256;
use memmap::*;
use std::io::Write;
use powersoftau::parameters::PowersOfTauParameters;
#[macro_use]
extern crate hex_literal;
const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No;
const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
fn main() {
println!("Will contribute a random beacon to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
// Create an RNG based on the outcome of the random beacon
let mut rng = {
use byteorder::{ReadBytesExt, BigEndian};
use rand::{SeedableRng};
use rand::chacha::ChaChaRng;
use crypto::sha2::Sha256;
use crypto::digest::Digest;
// Place block hash here (block number #564321)
let mut cur_hash: [u8; 32] = hex!("0000000000000000000a558a61ddc8ee4e488d647a747fe4dcc362fe2026c620");
// Performs 2^n hash iterations over it
const N: usize = 31;
for i in 0..(1u64<<N) {
// Print 1024 of the interstitial states
// so that verification can be
// parallelized
if i % (1u64<<(N-10)) == 0 {
print!("{}: ", i);
for b in cur_hash.iter() {
print!("{:02x}", b);
}
println!("");
}
let mut h = Sha256::new();
h.input(&cur_hash);
h.result(&mut cur_hash);
}
print!("Final result of beacon: ");
for b in cur_hash.iter() {
print!("{:02x}", b);
}
println!("");
let mut digest = &cur_hash[..];
let mut seed = [0u32; 8];
for i in 0..8 {
seed[i] = digest.read_u32::<BigEndian>().expect("digest is large enough for this to work");
}
ChaChaRng::from_seed(&seed)
};
println!("Done creating a beacon RNG");
// Try to load `./challenge` from disk.
let reader = OpenOptions::new()
.read(true)
.open("challenge").expect("unable open `./challenge` in this directory");
{
let metadata = reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
let expected_challenge_length = match INPUT_IS_COMPRESSED {
UseCompression::Yes => {
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
},
UseCompression::No => {
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
}
};
if metadata.len() != (expected_challenge_length as u64) {
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
}
}
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
// Create `./response` in this directory
let writer = OpenOptions::new()
.read(true)
.write(true)
.create_new(true)
.open("response").expect("unable to create `./response` in this directory");
let required_output_length = match COMPRESS_THE_OUTPUT {
UseCompression::Yes => {
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
},
UseCompression::No => {
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
}
};
writer.set_len(required_output_length as u64).expect("must make output file large enough");
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
println!("Calculating previous contribution hash...");
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
{
println!("Contributing on top of the hash:");
for line in current_accumulator_hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
writable_map.flush().expect("unable to write hash to `./response`");
}
// Construct our keypair using the RNG we created above
let (pubkey, privkey) = keypair(&mut rng, current_accumulator_hash.as_ref());
// Perform the transformation
println!("Computing and writing your contribution, this could take a while...");
// this computes a transformation and writes it
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
&readable_map,
&mut writable_map,
INPUT_IS_COMPRESSED,
COMPRESS_THE_OUTPUT,
CHECK_INPUT_CORRECTNESS,
&privkey
).expect("must transform with the key");
println!("Finihsing writing your contribution to `./response`...");
// Write the public key
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key");
// Get the hash of the contribution, so the user can compare later
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
print!("Done!\n\n\
Your contribution has been written to `./response`\n\n\
The BLAKE2b hash of `./response` is:\n");
for line in contribution_hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
println!("Thank you for your participation, much appreciated! :)");
}

@ -0,0 +1,195 @@
extern crate powersoftau;
extern crate bellman;
extern crate memmap;
extern crate rand;
extern crate blake2;
extern crate byteorder;
// use powersoftau::bn256::{Bn256CeremonyParameters};
use powersoftau::small_bn256::{Bn256CeremonyParameters};
use powersoftau::batched_accumulator::{BachedAccumulator};
use powersoftau::keypair::{keypair};
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
use std::fs::OpenOptions;
use bellman::pairing::bn256::Bn256;
use memmap::*;
use std::io::{Read, Write};
use powersoftau::parameters::PowersOfTauParameters;
const INPUT_IS_COMPRESSED: UseCompression = UseCompression::No;
const COMPRESS_THE_OUTPUT: UseCompression = UseCompression::Yes;
const CHECK_INPUT_CORRECTNESS: CheckForCorrectness = CheckForCorrectness::No;
fn main() {
println!("Will contribute to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
// Create an RNG based on a mixture of system randomness and user provided randomness
let mut rng = {
use byteorder::{ReadBytesExt, BigEndian};
use blake2::{Blake2b, Digest};
use rand::{SeedableRng, Rng, OsRng};
use rand::chacha::ChaChaRng;
let h = {
let mut system_rng = OsRng::new().unwrap();
let mut h = Blake2b::default();
// Gather 1024 bytes of entropy from the system
for _ in 0..1024 {
let r: u8 = system_rng.gen();
h.input(&[r]);
}
// Ask the user to provide some information for additional entropy
let mut user_input = String::new();
println!("Type some random text and press [ENTER] to provide additional entropy...");
std::io::stdin().read_line(&mut user_input).expect("expected to read some random text from the user");
// Hash it all up to make a seed
h.input(&user_input.as_bytes());
h.result()
};
let mut digest = &h[..];
// Interpret the first 32 bytes of the digest as 8 32-bit words
let mut seed = [0u32; 8];
for i in 0..8 {
seed[i] = digest.read_u32::<BigEndian>().expect("digest is large enough for this to work");
}
ChaChaRng::from_seed(&seed)
};
// Try to load `./challenge` from disk.
let reader = OpenOptions::new()
.read(true)
.open("challenge").expect("unable open `./challenge` in this directory");
{
let metadata = reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
let expected_challenge_length = match INPUT_IS_COMPRESSED {
UseCompression::Yes => {
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
},
UseCompression::No => {
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
}
};
if metadata.len() != (expected_challenge_length as u64) {
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
}
}
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
// Create `./response` in this directory
let writer = OpenOptions::new()
.read(true)
.write(true)
.create_new(true)
.open("response").expect("unable to create `./response` in this directory");
let required_output_length = match COMPRESS_THE_OUTPUT {
UseCompression::Yes => {
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
},
UseCompression::No => {
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
}
};
writer.set_len(required_output_length as u64).expect("must make output file large enough");
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
println!("Calculating previous contribution hash...");
assert!(UseCompression::No == INPUT_IS_COMPRESSED, "Hashing the compressed file in not yet defined");
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
{
println!("`challenge` file contains decompressed points and has a hash:");
for line in current_accumulator_hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
writable_map.flush().expect("unable to write hash to `./response`");
}
{
let mut challenge_hash = [0; 64];
let memory_slice = readable_map.get(0..64).expect("must read point data from file");
memory_slice.clone().read_exact(&mut challenge_hash).expect("couldn't read hash of challenge file from response file");
println!("`challenge` file claims (!!! Must not be blindly trusted) that it was based on the original contribution with a hash:");
for line in challenge_hash.chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
}
// Construct our keypair using the RNG we created above
let (pubkey, privkey) = keypair(&mut rng, current_accumulator_hash.as_ref());
// Perform the transformation
println!("Computing and writing your contribution, this could take a while...");
// this computes a transformation and writes it
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
&readable_map,
&mut writable_map,
INPUT_IS_COMPRESSED,
COMPRESS_THE_OUTPUT,
CHECK_INPUT_CORRECTNESS,
&privkey
).expect("must transform with the key");
println!("Finihsing writing your contribution to `./response`...");
// Write the public key
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, COMPRESS_THE_OUTPUT).expect("unable to write public key");
writable_map.flush().expect("must flush a memory map");
// Get the hash of the contribution, so the user can compare later
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
print!("Done!\n\n\
Your contribution has been written to `./response`\n\n\
The BLAKE2b hash of `./response` is:\n");
for line in contribution_hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
println!("Thank you for your participation, much appreciated! :)");
}

@ -0,0 +1,34 @@
extern crate powersoftau;
extern crate bellman;
// use powersoftau::bn256::{Bn256CeremonyParameters};
use powersoftau::small_bn256::{Bn256CeremonyParameters};
use powersoftau::accumulator::{Accumulator};
use powersoftau::utils::{blank_hash};
use powersoftau::parameters::{UseCompression};
use std::fs::OpenOptions;
use std::io::{Write, BufWriter};
use bellman::pairing::bn256::Bn256;
fn main() {
let writer = OpenOptions::new()
.read(false)
.write(true)
.create_new(true)
.open("challenge").expect("unable to create `./challenge`");
let mut writer = BufWriter::new(writer);
// Write a blank BLAKE2b hash:
writer.write_all(&blank_hash().as_slice()).expect("unable to write blank hash to `./challenge`");
let parameters = Bn256CeremonyParameters{};
let acc: Accumulator<Bn256, _> = Accumulator::new(parameters);
println!("Writing an empty accumulator to disk");
acc.serialize(&mut writer, UseCompression::No).expect("unable to write fresh accumulator to `./challenge`");
writer.flush().expect("unable to flush accumulator to disk");
println!("Wrote a fresh accumulator to `./challenge`");
}

@ -0,0 +1,81 @@
extern crate powersoftau;
extern crate bellman;
extern crate memmap;
// use powersoftau::bn256::{Bn256CeremonyParameters};
use powersoftau::small_bn256::{Bn256CeremonyParameters};
use powersoftau::batched_accumulator::{BachedAccumulator};
use powersoftau::parameters::{UseCompression};
use powersoftau::utils::{blank_hash};
use std::fs::OpenOptions;
use std::io::{Write};
use bellman::pairing::bn256::Bn256;
use memmap::*;
use powersoftau::parameters::PowersOfTauParameters;
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
fn main() {
println!("Will generate an empty accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
let file = OpenOptions::new()
.read(true)
.write(true)
.create_new(true)
.open("challenge").expect("unable to create `./challenge`");
let expected_challenge_length = match COMPRESS_NEW_CHALLENGE {
UseCompression::Yes => {
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - Bn256CeremonyParameters::PUBLIC_KEY_SIZE
},
UseCompression::No => {
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
}
};
file.set_len(expected_challenge_length as u64).expect("unable to allocate large enough file");
let mut writable_map = unsafe { MmapOptions::new().map_mut(&file).expect("unable to create a memory map") };
// Write a blank BLAKE2b hash:
let hash = blank_hash();
(&mut writable_map[0..]).write(hash.as_slice()).expect("unable to write a default hash to mmap");
writable_map.flush().expect("unable to write blank hash to `./challenge`");
println!("Blank hash for an empty challenge:");
for line in hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, COMPRESS_NEW_CHALLENGE).expect("generation of initial accumulator is successful");
writable_map.flush().expect("unable to flush memmap to disk");
// Get the hash of the contribution, so the user can compare later
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
println!("Empty contribution is formed with a hash:");
for line in contribution_hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
println!("Wrote a fresh accumulator to `./challenge`");
}

@ -0,0 +1,339 @@
extern crate pairing;
extern crate powersoftau;
extern crate rand;
extern crate blake2;
extern crate byteorder;
extern crate bellman;
use bellman::pairing::{CurveAffine, CurveProjective};
use bellman::pairing::bls12_381::{G1, G2};
use powersoftau::*;
use bellman::multicore::Worker;
use bellman::domain::{EvaluationDomain, Point};
use std::fs::OpenOptions;
use std::io::{self, BufReader, BufWriter, Write};
fn into_hex(h: &[u8]) -> String {
let mut f = String::new();
for byte in &h[..] {
f += &format!("{:02x}", byte);
}
f
}
// Computes the hash of the challenge file for the player,
// given the current state of the accumulator and the last
// response file hash.
fn get_challenge_file_hash(
acc: &Accumulator,
last_response_file_hash: &[u8; 64]
) -> [u8; 64]
{
let sink = io::sink();
let mut sink = HashWriter::new(sink);
sink.write_all(last_response_file_hash)
.unwrap();
acc.serialize(
&mut sink,
UseCompression::No
).unwrap();
let mut tmp = [0; 64];
tmp.copy_from_slice(sink.into_hash().as_slice());
tmp
}
// Computes the hash of the response file, given the new
// accumulator, the player's public key, and the challenge
// file's hash.
fn get_response_file_hash(
acc: &Accumulator,
pubkey: &PublicKey,
last_challenge_file_hash: &[u8; 64]
) -> [u8; 64]
{
let sink = io::sink();
let mut sink = HashWriter::new(sink);
sink.write_all(last_challenge_file_hash)
.unwrap();
acc.serialize(
&mut sink,
UseCompression::Yes
).unwrap();
pubkey.serialize(&mut sink).unwrap();
let mut tmp = [0; 64];
tmp.copy_from_slice(sink.into_hash().as_slice());
tmp
}
fn main() {
// Try to load `./transcript` from disk.
let reader = OpenOptions::new()
.read(true)
.open("transcript")
.expect("unable open `./transcript` in this directory");
let mut reader = BufReader::with_capacity(1024 * 1024, reader);
// Initialize the accumulator
let mut current_accumulator = Accumulator::new();
// The "last response file hash" is just a blank BLAKE2b hash
// at the beginning of the hash chain.
let mut last_response_file_hash = [0; 64];
last_response_file_hash.copy_from_slice(blank_hash().as_slice());
// There were 89 rounds.
for _ in 0..89 {
// Compute the hash of the challenge file that the player
// should have received.
let last_challenge_file_hash = get_challenge_file_hash(
&current_accumulator,
&last_response_file_hash
);
// Deserialize the accumulator provided by the player in
// their response file. It's stored in the transcript in
// uncompressed form so that we can more efficiently
// deserialize it.
let response_file_accumulator = Accumulator::deserialize(
&mut reader,
UseCompression::No,
CheckForCorrectness::Yes
).expect("unable to read uncompressed accumulator");
// Deserialize the public key provided by the player.
let response_file_pubkey = PublicKey::deserialize(&mut reader)
.expect("wasn't able to deserialize the response file's public key");
// Compute the hash of the response file. (we had it in uncompressed
// form in the transcript, but the response file is compressed to save
// participants bandwidth.)
last_response_file_hash = get_response_file_hash(
&response_file_accumulator,
&response_file_pubkey,
&last_challenge_file_hash
);
print!("{}", into_hex(&last_response_file_hash));
// Verify the transformation from the previous accumulator to the new
// one. This also verifies the correctness of the accumulators and the
// public keys, with respect to the transcript so far.
if !verify_transform(
&current_accumulator,
&response_file_accumulator,
&response_file_pubkey,
&last_challenge_file_hash
)
{
println!(" ... FAILED");
panic!("INVALID RESPONSE FILE!");
} else {
println!("");
}
current_accumulator = response_file_accumulator;
}
println!("Transcript OK!");
let worker = &Worker::new();
// Create the parameters for various 2^m circuit depths.
for m in 0..22 {
let paramname = format!("phase1radix2m{}", m);
println!("Creating {}", paramname);
let degree = 1 << m;
let mut g1_coeffs = EvaluationDomain::from_coeffs(
current_accumulator.tau_powers_g1[0..degree].iter()
.map(|e| Point(e.into_projective()))
.collect()
).unwrap();
let mut g2_coeffs = EvaluationDomain::from_coeffs(
current_accumulator.tau_powers_g2[0..degree].iter()
.map(|e| Point(e.into_projective()))
.collect()
).unwrap();
let mut g1_alpha_coeffs = EvaluationDomain::from_coeffs(
current_accumulator.alpha_tau_powers_g1[0..degree].iter()
.map(|e| Point(e.into_projective()))
.collect()
).unwrap();
let mut g1_beta_coeffs = EvaluationDomain::from_coeffs(
current_accumulator.beta_tau_powers_g1[0..degree].iter()
.map(|e| Point(e.into_projective()))
.collect()
).unwrap();
// This converts all of the elements into Lagrange coefficients
// for later construction of interpolation polynomials
g1_coeffs.ifft(&worker);
g2_coeffs.ifft(&worker);
g1_alpha_coeffs.ifft(&worker);
g1_beta_coeffs.ifft(&worker);
let g1_coeffs = g1_coeffs.into_coeffs();
let g2_coeffs = g2_coeffs.into_coeffs();
let g1_alpha_coeffs = g1_alpha_coeffs.into_coeffs();
let g1_beta_coeffs = g1_beta_coeffs.into_coeffs();
assert_eq!(g1_coeffs.len(), degree);
assert_eq!(g2_coeffs.len(), degree);
assert_eq!(g1_alpha_coeffs.len(), degree);
assert_eq!(g1_beta_coeffs.len(), degree);
// Remove the Point() wrappers
let mut g1_coeffs = g1_coeffs.into_iter()
.map(|e| e.0)
.collect::<Vec<_>>();
let mut g2_coeffs = g2_coeffs.into_iter()
.map(|e| e.0)
.collect::<Vec<_>>();
let mut g1_alpha_coeffs = g1_alpha_coeffs.into_iter()
.map(|e| e.0)
.collect::<Vec<_>>();
let mut g1_beta_coeffs = g1_beta_coeffs.into_iter()
.map(|e| e.0)
.collect::<Vec<_>>();
// Batch normalize
G1::batch_normalization(&mut g1_coeffs);
G2::batch_normalization(&mut g2_coeffs);
G1::batch_normalization(&mut g1_alpha_coeffs);
G1::batch_normalization(&mut g1_beta_coeffs);
// H query of Groth16 needs...
// x^i * (x^m - 1) for i in 0..=(m-2) a.k.a.
// x^(i + m) - x^i for i in 0..=(m-2)
// for radix2 evaluation domains
let mut h = Vec::with_capacity(degree - 1);
for i in 0..(degree-1) {
let mut tmp = current_accumulator.tau_powers_g1[i + degree].into_projective();
let mut tmp2 = current_accumulator.tau_powers_g1[i].into_projective();
tmp2.negate();
tmp.add_assign(&tmp2);
h.push(tmp);
}
// Batch normalize this as well
G1::batch_normalization(&mut h);
// Create the parameter file
let writer = OpenOptions::new()
.read(false)
.write(true)
.create_new(true)
.open(paramname)
.expect("unable to create parameter file in this directory");
let mut writer = BufWriter::new(writer);
// Write alpha (in g1)
// Needed by verifier for e(alpha, beta)
// Needed by prover for A and C elements of proof
writer.write_all(
current_accumulator.alpha_tau_powers_g1[0]
.into_uncompressed()
.as_ref()
).unwrap();
// Write beta (in g1)
// Needed by prover for C element of proof
writer.write_all(
current_accumulator.beta_tau_powers_g1[0]
.into_uncompressed()
.as_ref()
).unwrap();
// Write beta (in g2)
// Needed by verifier for e(alpha, beta)
// Needed by prover for B element of proof
writer.write_all(
current_accumulator.beta_g2
.into_uncompressed()
.as_ref()
).unwrap();
// Lagrange coefficients in G1 (for constructing
// LC/IC queries and precomputing polynomials for A)
for coeff in g1_coeffs {
// Was normalized earlier in parallel
let coeff = coeff.into_affine();
writer.write_all(
coeff.into_uncompressed()
.as_ref()
).unwrap();
}
// Lagrange coefficients in G2 (for precomputing
// polynomials for B)
for coeff in g2_coeffs {
// Was normalized earlier in parallel
let coeff = coeff.into_affine();
writer.write_all(
coeff.into_uncompressed()
.as_ref()
).unwrap();
}
// Lagrange coefficients in G1 with alpha (for
// LC/IC queries)
for coeff in g1_alpha_coeffs {
// Was normalized earlier in parallel
let coeff = coeff.into_affine();
writer.write_all(
coeff.into_uncompressed()
.as_ref()
).unwrap();
}
// Lagrange coefficients in G1 with beta (for
// LC/IC queries)
for coeff in g1_beta_coeffs {
// Was normalized earlier in parallel
let coeff = coeff.into_affine();
writer.write_all(
coeff.into_uncompressed()
.as_ref()
).unwrap();
}
// Bases for H polynomial computation
for coeff in h {
// Was normalized earlier in parallel
let coeff = coeff.into_affine();
writer.write_all(
coeff.into_uncompressed()
.as_ref()
).unwrap();
}
}
}

@ -0,0 +1,207 @@
extern crate powersoftau;
extern crate bellman;
extern crate memmap;
extern crate rand;
extern crate blake2;
extern crate byteorder;
// use powersoftau::bn256::{Bn256CeremonyParameters};
use powersoftau::small_bn256::{Bn256CeremonyParameters};
use powersoftau::batched_accumulator::{BachedAccumulator};
use powersoftau::keypair::{PublicKey};
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
use std::fs::OpenOptions;
use bellman::pairing::bn256::Bn256;
use memmap::*;
use std::io::{Read, Write};
use powersoftau::parameters::PowersOfTauParameters;
const PREVIOUS_CHALLENGE_IS_COMPRESSED: UseCompression = UseCompression::No;
const CONTRIBUTION_IS_COMPRESSED: UseCompression = UseCompression::Yes;
const COMPRESS_NEW_CHALLENGE: UseCompression = UseCompression::No;
fn main() {
println!("Will verify and decompress a contribution to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
// Try to load `./challenge` from disk.
let challenge_reader = OpenOptions::new()
.read(true)
.open("challenge").expect("unable open `./challenge` in this directory");
{
let metadata = challenge_reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
let expected_challenge_length = match PREVIOUS_CHALLENGE_IS_COMPRESSED {
UseCompression::Yes => {
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
},
UseCompression::No => {
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
}
};
if metadata.len() != (expected_challenge_length as u64) {
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
}
}
let challenge_readable_map = unsafe { MmapOptions::new().map(&challenge_reader).expect("unable to create a memory map for input") };
// Try to load `./response` from disk.
let response_reader = OpenOptions::new()
.read(true)
.open("response").expect("unable open `./response` in this directory");
{
let metadata = response_reader.metadata().expect("unable to get filesystem metadata for `./response`");
let expected_response_length = match CONTRIBUTION_IS_COMPRESSED {
UseCompression::Yes => {
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
},
UseCompression::No => {
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
}
};
if metadata.len() != (expected_response_length as u64) {
panic!("The size of `./response` should be {}, but it's {}, so something isn't right.", expected_response_length, metadata.len());
}
}
let response_readable_map = unsafe { MmapOptions::new().map(&response_reader).expect("unable to create a memory map for input") };
println!("Calculating previous challenge hash...");
// Check that contribution is correct
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&challenge_readable_map);
println!("Hash of the `challenge` file for verification:");
for line in current_accumulator_hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
// Check the hash chain - a new response must be based on the previous challenge!
{
let mut response_challenge_hash = [0; 64];
let memory_slice = response_readable_map.get(0..64).expect("must read point data from file");
memory_slice.clone().read_exact(&mut response_challenge_hash).expect("couldn't read hash of challenge file from response file");
println!("`response` was based on the hash:");
for line in response_challenge_hash.chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
if &response_challenge_hash[..] != current_accumulator_hash.as_slice() {
panic!("Hash chain failure. This is not the right response.");
}
}
let response_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&response_readable_map);
println!("Hash of the `response` file for verification:");
for line in response_hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
// get the contributor's public key
let public_key = PublicKey::<Bn256>::read::<Bn256CeremonyParameters>(&response_readable_map, CONTRIBUTION_IS_COMPRESSED)
.expect("wasn't able to deserialize the response file's public key");
// check that it follows the protocol
println!("Verifying a contribution to contain proper powers and correspond to the public key...");
let valid = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::verify_transformation(
&challenge_readable_map,
&response_readable_map,
&public_key,
current_accumulator_hash.as_slice(),
PREVIOUS_CHALLENGE_IS_COMPRESSED,
CONTRIBUTION_IS_COMPRESSED,
CheckForCorrectness::No,
CheckForCorrectness::Yes,
);
if !valid {
println!("Verification failed, contribution was invalid somehow.");
panic!("INVALID CONTRIBUTION!!!");
} else {
println!("Verification succeeded!");
}
if COMPRESS_NEW_CHALLENGE == UseCompression::Yes {
println!("Don't need to recompress the contribution, please copy `./response` as `./new_challenge`");
} else {
println!("Verification succeeded! Writing to `./new_challenge`...");
// Create `./new_challenge` in this directory
let writer = OpenOptions::new()
.read(true)
.write(true)
.create_new(true)
.open("new_challenge").expect("unable to create `./new_challenge` in this directory");
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
writer.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough");
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
{
(&mut writable_map[0..]).write(response_hash.as_slice()).expect("unable to write a default hash to mmap");
writable_map.flush().expect("unable to write hash to `./new_challenge`");
}
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::decompress(
&response_readable_map,
&mut writable_map,
CheckForCorrectness::No).expect("must decompress a response for a new challenge");
writable_map.flush().expect("must flush the memory map");
let new_challenge_readable_map = writable_map.make_read_only().expect("must make a map readonly");
let recompressed_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&new_challenge_readable_map);
println!("Here's the BLAKE2b hash of the decompressed participant's response as `new_challenge` file:");
for line in recompressed_hash.as_slice().chunks(16) {
print!("\t");
for section in line.chunks(4) {
for b in section {
print!("{:02x}", b);
}
print!(" ");
}
println!("");
}
println!("Done! `./new_challenge` contains the new challenge file. The other files");
println!("were left alone.");
}
}

@ -0,0 +1,860 @@
//! This ceremony constructs the "powers of tau" for Jens Groth's 2016 zk-SNARK proving
//! system using the BLS12-381 pairing-friendly elliptic curve construction.
//!
//! # Overview
//!
//! Participants of the ceremony receive a "challenge" file containing:
//!
//! * the BLAKE2b hash of the last file entered into the transcript
//! * an `Accumulator` (with curve points encoded in uncompressed form for fast deserialization)
//!
//! The participant runs a tool which generates a random keypair (`PublicKey`, `PrivateKey`)
//! used for modifying the `Accumulator` from the "challenge" file. The keypair is then used to
//! transform the `Accumulator`, and a "response" file is generated containing:
//!
//! * the BLAKE2b hash of the "challenge" file (thus forming a hash chain over the entire transcript)
//! * an `Accumulator` (with curve points encoded in compressed form for fast uploading)
//! * the `PublicKey`
//!
//! This "challenge" file is entered into the protocol transcript. A given transcript is valid
//! if the transformations between consecutive `Accumulator`s verify with their respective
//! `PublicKey`s. Participants (and the public) can ensure that their contribution to the
//! `Accumulator` was accepted by ensuring the transcript contains their "response" file, ideally
//! by comparison of the BLAKE2b hash of the "response" file.
//!
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
//! public parameters for all circuits within a bounded size.
extern crate pairing;
extern crate rand;
extern crate crossbeam;
extern crate num_cpus;
extern crate blake2;
extern crate generic_array;
extern crate typenum;
extern crate byteorder;
extern crate bellman;
use byteorder::{ReadBytesExt, BigEndian};
use rand::{SeedableRng, Rng, Rand};
use rand::chacha::ChaChaRng;
use bellman::pairing::bls12_381::*;
use bellman::pairing::*;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use generic_array::GenericArray;
use typenum::consts::U64;
use blake2::{Blake2b, Digest};
use std::fmt;
// This ceremony is based on the BLS12-381 elliptic curve construction.
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 96;
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 192;
const G1_COMPRESSED_BYTE_SIZE: usize = 48;
const G2_COMPRESSED_BYTE_SIZE: usize = 96;
/// The accumulator supports circuits with 2^21 multiplication gates.
const TAU_POWERS_LENGTH: usize = (1 << 21);
/// More tau powers are needed in G1 because the Groth16 H query
/// includes terms of the form tau^i * (tau^m - 1) = tau^(i+m) - tau^i
/// where the largest i = m - 2, requiring the computation of tau^(2m - 2)
/// and thus giving us a vector length of 2^22 - 1.
const TAU_POWERS_G1_LENGTH: usize = (TAU_POWERS_LENGTH << 1) - 1;
/// The size of the accumulator on disk.
pub const ACCUMULATOR_BYTE_SIZE: usize = (TAU_POWERS_G1_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) + // g1 tau powers
(TAU_POWERS_LENGTH * G2_UNCOMPRESSED_BYTE_SIZE) + // g2 tau powers
(TAU_POWERS_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) + // alpha tau powers
(TAU_POWERS_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) // beta tau powers
+ G2_UNCOMPRESSED_BYTE_SIZE // beta in g2
+ 64; // blake2b hash of previous contribution
/// The "public key" is used to verify a contribution was correctly
/// computed.
pub const PUBLIC_KEY_SIZE: usize = 3 * G2_UNCOMPRESSED_BYTE_SIZE + // tau, alpha, and beta in g2
6 * G1_UNCOMPRESSED_BYTE_SIZE; // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1
/// The size of the contribution on disk.
pub const CONTRIBUTION_BYTE_SIZE: usize = (TAU_POWERS_G1_LENGTH * G1_COMPRESSED_BYTE_SIZE) + // g1 tau powers
(TAU_POWERS_LENGTH * G2_COMPRESSED_BYTE_SIZE) + // g2 tau powers
(TAU_POWERS_LENGTH * G1_COMPRESSED_BYTE_SIZE) + // alpha tau powers
(TAU_POWERS_LENGTH * G1_COMPRESSED_BYTE_SIZE) // beta tau powers
+ G2_COMPRESSED_BYTE_SIZE // beta in g2
+ 64 // blake2b hash of input accumulator
+ PUBLIC_KEY_SIZE; // public key
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
/// than 32 bytes.
fn hash_to_g2(mut digest: &[u8]) -> G2
{
assert!(digest.len() >= 32);
let mut seed = Vec::with_capacity(8);
for _ in 0..8 {
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
}
ChaChaRng::from_seed(&seed).gen()
}
#[test]
fn test_hash_to_g2() {
assert!(
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])
==
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34])
);
assert!(
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])
!=
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33])
);
}
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
/// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity.
///
/// The elements in G2 are used to verify transformations of the accumulator. By its nature, the public key proves
/// knowledge of τ, α and β.
///
/// It is necessary to verify `same_ratio`((s<sub>1</sub>, s<sub>1</sub><sup>x</sup>), (H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)).
#[derive(PartialEq, Eq)]
pub struct PublicKey {
tau_g1: (G1Affine, G1Affine),
alpha_g1: (G1Affine, G1Affine),
beta_g1: (G1Affine, G1Affine),
tau_g2: G2Affine,
alpha_g2: G2Affine,
beta_g2: G2Affine
}
/// Contains the secrets τ, α and β that the participant of the ceremony must destroy.
pub struct PrivateKey {
tau: Fr,
alpha: Fr,
beta: Fr
}
/// Constructs a keypair given an RNG and a 64-byte transcript `digest`.
pub fn keypair<R: Rng>(rng: &mut R, digest: &[u8]) -> (PublicKey, PrivateKey)
{
assert_eq!(digest.len(), 64);
let tau = Fr::rand(rng);
let alpha = Fr::rand(rng);
let beta = Fr::rand(rng);
let mut op = |x, personalization: u8| {
// Sample random g^s
let g1_s = G1::rand(rng).into_affine();
// Compute g^{s*x}
let g1_s_x = g1_s.mul(x).into_affine();
// Compute BLAKE2b(personalization | transcript | g^s | g^{s*x})
let h = {
let mut h = Blake2b::default();
h.input(&[personalization]);
h.input(digest);
h.input(g1_s.into_uncompressed().as_ref());
h.input(g1_s_x.into_uncompressed().as_ref());
h.result()
};
// Hash into G2 as g^{s'}
let g2_s = hash_to_g2(h.as_ref()).into_affine();
// Compute g^{s'*x}
let g2_s_x = g2_s.mul(x).into_affine();
((g1_s, g1_s_x), g2_s_x)
};
let pk_tau = op(tau, 0);
let pk_alpha = op(alpha, 1);
let pk_beta = op(beta, 2);
(
PublicKey {
tau_g1: pk_tau.0,
alpha_g1: pk_alpha.0,
beta_g1: pk_beta.0,
tau_g2: pk_tau.1,
alpha_g2: pk_alpha.1,
beta_g2: pk_beta.1,
},
PrivateKey {
tau: tau,
alpha: alpha,
beta: beta
}
)
}
/// Determines if point compression should be used.
#[derive(Copy, Clone)]
pub enum UseCompression {
Yes,
No
}
/// Determines if points should be checked for correctness during deserialization.
/// This is not necessary for participants, because a transcript verifier can
/// check this theirself.
#[derive(Copy, Clone)]
pub enum CheckForCorrectness {
Yes,
No
}
fn write_point<W, G>(
writer: &mut W,
p: &G,
compression: UseCompression
) -> io::Result<()>
where W: Write,
G: CurveAffine
{
match compression {
UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()),
UseCompression::No => writer.write_all(p.into_uncompressed().as_ref()),
}
}
/// Errors that might occur during deserialization.
#[derive(Debug)]
pub enum DeserializationError {
IoError(io::Error),
DecodingError(GroupDecodingError),
PointAtInfinity
}
impl fmt::Display for DeserializationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e),
DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e),
DeserializationError::PointAtInfinity => write!(f, "Point at infinity found")
}
}
}
impl From<io::Error> for DeserializationError {
fn from(err: io::Error) -> DeserializationError {
DeserializationError::IoError(err)
}
}
impl From<GroupDecodingError> for DeserializationError {
fn from(err: GroupDecodingError) -> DeserializationError {
DeserializationError::DecodingError(err)
}
}
impl PublicKey {
/// Serialize the public key. Points are always in uncompressed form.
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>
{
write_point(writer, &self.tau_g1.0, UseCompression::No)?;
write_point(writer, &self.tau_g1.1, UseCompression::No)?;
write_point(writer, &self.alpha_g1.0, UseCompression::No)?;
write_point(writer, &self.alpha_g1.1, UseCompression::No)?;
write_point(writer, &self.beta_g1.0, UseCompression::No)?;
write_point(writer, &self.beta_g1.1, UseCompression::No)?;
write_point(writer, &self.tau_g2, UseCompression::No)?;
write_point(writer, &self.alpha_g2, UseCompression::No)?;
write_point(writer, &self.beta_g2, UseCompression::No)?;
Ok(())
}
/// Deserialize the public key. Points are always in uncompressed form, and
/// always checked, since there aren't very many of them. Does not allow any
/// points at infinity.
pub fn deserialize<R: Read>(reader: &mut R) -> Result<PublicKey, DeserializationError>
{
fn read_uncompressed<C: CurveAffine, R: Read>(reader: &mut R) -> Result<C, DeserializationError> {
let mut repr = C::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
let v = repr.into_affine()?;
if v.is_zero() {
Err(DeserializationError::PointAtInfinity)
} else {
Ok(v)
}
}
let tau_g1_s = read_uncompressed(reader)?;
let tau_g1_s_tau = read_uncompressed(reader)?;
let alpha_g1_s = read_uncompressed(reader)?;
let alpha_g1_s_alpha = read_uncompressed(reader)?;
let beta_g1_s = read_uncompressed(reader)?;
let beta_g1_s_beta = read_uncompressed(reader)?;
let tau_g2 = read_uncompressed(reader)?;
let alpha_g2 = read_uncompressed(reader)?;
let beta_g2 = read_uncompressed(reader)?;
Ok(PublicKey {
tau_g1: (tau_g1_s, tau_g1_s_tau),
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
beta_g1: (beta_g1_s, beta_g1_s_beta),
tau_g2: tau_g2,
alpha_g2: alpha_g2,
beta_g2: beta_g2
})
}
}
#[test]
fn test_pubkey_serialization() {
use rand::thread_rng;
let rng = &mut thread_rng();
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
let (pk, _) = keypair(rng, &digest);
let mut v = vec![];
pk.serialize(&mut v).unwrap();
assert_eq!(v.len(), PUBLIC_KEY_SIZE);
let deserialized = PublicKey::deserialize(&mut &v[..]).unwrap();
assert!(pk == deserialized);
}
/// The `Accumulator` is an object that participants of the ceremony contribute
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
/// fixed generators, and additionally in G1 over two other generators of exponents
/// `alpha` and `beta` over those fixed generators. In other words:
///
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
#[derive(PartialEq, Eq, Clone)]
pub struct Accumulator {
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
pub tau_powers_g1: Vec<G1Affine>,
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
pub tau_powers_g2: Vec<G2Affine>,
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
pub alpha_tau_powers_g1: Vec<G1Affine>,
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
pub beta_tau_powers_g1: Vec<G1Affine>,
/// beta
pub beta_g2: G2Affine
}
impl Accumulator {
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
pub fn new() -> Self {
Accumulator {
tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_G1_LENGTH],
tau_powers_g2: vec![G2Affine::one(); TAU_POWERS_LENGTH],
alpha_tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_LENGTH],
beta_tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_LENGTH],
beta_g2: G2Affine::one()
}
}
/// Write the accumulator with some compression behavior.
pub fn serialize<W: Write>(
&self,
writer: &mut W,
compression: UseCompression
) -> io::Result<()>
{
fn write_all<W: Write, C: CurveAffine>(
writer: &mut W,
c: &[C],
compression: UseCompression
) -> io::Result<()>
{
for c in c {
write_point(writer, c, compression)?;
}
Ok(())
}
write_all(writer, &self.tau_powers_g1, compression)?;
write_all(writer, &self.tau_powers_g2, compression)?;
write_all(writer, &self.alpha_tau_powers_g1, compression)?;
write_all(writer, &self.beta_tau_powers_g1, compression)?;
write_all(writer, &[self.beta_g2], compression)?;
Ok(())
}
/// Read the accumulator from disk with some compression behavior. `checked`
/// indicates whether we should check it's a valid element of the group and
/// not the point at infinity.
pub fn deserialize<R: Read>(
reader: &mut R,
compression: UseCompression,
checked: CheckForCorrectness
) -> Result<Self, DeserializationError>
{
fn read_all<R: Read, C: CurveAffine>(
reader: &mut R,
size: usize,
compression: UseCompression,
checked: CheckForCorrectness
) -> Result<Vec<C>, DeserializationError>
{
fn decompress_all<R: Read, E: EncodedPoint>(
reader: &mut R,
size: usize,
checked: CheckForCorrectness
) -> Result<Vec<E::Affine>, DeserializationError>
{
// Read the encoded elements
let mut res = vec![E::empty(); size];
for encoded in &mut res {
reader.read_exact(encoded.as_mut())?;
}
// Allocate space for the deserialized elements
let mut res_affine = vec![E::Affine::zero(); size];
let mut chunk_size = res.len() / num_cpus::get();
if chunk_size == 0 {
chunk_size = 1;
}
// If any of our threads encounter a deserialization/IO error, catch
// it with this.
let decoding_error = Arc::new(Mutex::new(None));
crossbeam::scope(|scope| {
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
let decoding_error = decoding_error.clone();
scope.spawn(move || {
for (source, target) in source.iter().zip(target.iter_mut()) {
match {
// If we're a participant, we don't need to check all of the
// elements in the accumulator, which saves a lot of time.
// The hash chain prevents this from being a problem: the
// transcript guarantees that the accumulator was properly
// formed.
match checked {
CheckForCorrectness::Yes => {
// Points at infinity are never expected in the accumulator
source.into_affine().map_err(|e| e.into()).and_then(|source| {
if source.is_zero() {
Err(DeserializationError::PointAtInfinity)
} else {
Ok(source)
}
})
},
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
}
}
{
Ok(source) => {
*target = source;
},
Err(e) => {
*decoding_error.lock().unwrap() = Some(e);
}
}
}
});
}
});
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
Some(e) => {
Err(e)
},
None => {
Ok(res_affine)
}
}
}
match compression {
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked)
}
}
let tau_powers_g1 = read_all(reader, TAU_POWERS_G1_LENGTH, compression, checked)?;
let tau_powers_g2 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
let alpha_tau_powers_g1 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
let beta_tau_powers_g1 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
let beta_g2 = read_all(reader, 1, compression, checked)?[0];
Ok(Accumulator {
tau_powers_g1: tau_powers_g1,
tau_powers_g2: tau_powers_g2,
alpha_tau_powers_g1: alpha_tau_powers_g1,
beta_tau_powers_g1: beta_tau_powers_g1,
beta_g2: beta_g2
})
}
/// Transforms the accumulator with a private key.
pub fn transform(&mut self, key: &PrivateKey)
{
// Construct the powers of tau
let mut taupowers = vec![Fr::zero(); TAU_POWERS_G1_LENGTH];
let chunk_size = TAU_POWERS_G1_LENGTH / num_cpus::get();
// Construct exponents in parallel
crossbeam::scope(|scope| {
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
scope.spawn(move || {
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
for t in taupowers {
*t = acc;
acc.mul_assign(&key.tau);
}
});
}
});
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
/// exponent.
fn batch_exp<C: CurveAffine>(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
assert_eq!(bases.len(), exp.len());
let mut projective = vec![C::Projective::zero(); bases.len()];
let chunk_size = bases.len() / num_cpus::get();
// Perform wNAF over multiple cores, placing results into `projective`.
crossbeam::scope(|scope| {
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
.zip(exp.chunks(chunk_size))
.zip(projective.chunks_mut(chunk_size))
{
scope.spawn(move || {
let mut wnaf = Wnaf::new();
for ((base, exp), projective) in bases.iter_mut()
.zip(exp.iter())
.zip(projective.iter_mut())
{
let mut exp = *exp;
if let Some(coeff) = coeff {
exp.mul_assign(coeff);
}
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
}
});
}
});
// Perform batch normalization
crossbeam::scope(|scope| {
for projective in projective.chunks_mut(chunk_size)
{
scope.spawn(move || {
C::Projective::batch_normalization(projective);
});
}
});
// Turn it all back into affine points
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
*affine = projective.into_affine();
}
}
batch_exp(&mut self.tau_powers_g1, &taupowers[0..], None);
batch_exp(&mut self.tau_powers_g2, &taupowers[0..TAU_POWERS_LENGTH], None);
batch_exp(&mut self.alpha_tau_powers_g1, &taupowers[0..TAU_POWERS_LENGTH], Some(&key.alpha));
batch_exp(&mut self.beta_tau_powers_g1, &taupowers[0..TAU_POWERS_LENGTH], Some(&key.beta));
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
}
}
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
pub fn verify_transform(before: &Accumulator, after: &Accumulator, key: &PublicKey, digest: &[u8]) -> bool
{
assert_eq!(digest.len(), 64);
let compute_g2_s = |g1_s: G1Affine, g1_s_x: G1Affine, personalization: u8| {
let mut h = Blake2b::default();
h.input(&[personalization]);
h.input(digest);
h.input(g1_s.into_uncompressed().as_ref());
h.input(g1_s_x.into_uncompressed().as_ref());
hash_to_g2(h.result().as_ref()).into_affine()
};
let tau_g2_s = compute_g2_s(key.tau_g1.0, key.tau_g1.1, 0);
let alpha_g2_s = compute_g2_s(key.alpha_g1.0, key.alpha_g1.1, 1);
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
// Check the proofs-of-knowledge for tau/alpha/beta
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
return false;
}
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
return false;
}
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
return false;
}
// Check the correctness of the generators for tau powers
if after.tau_powers_g1[0] != G1Affine::one() {
return false;
}
if after.tau_powers_g2[0] != G2Affine::one() {
return false;
}
// Did the participant multiply the previous tau by the new one?
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
return false;
}
// Did the participant multiply the previous alpha by the new one?
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
return false;
}
// Did the participant multiply the previous beta by the new one?
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
return false;
}
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
return false;
}
// Are the powers of tau correct?
if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
return false;
}
if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) {
return false;
}
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
return false;
}
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
return false;
}
true
}
/// Computes a random linear combination over v1/v2.
///
/// Checking that many pairs of elements are exponentiated by
/// the same `x` can be achieved (with high probability) with
/// the following technique:
///
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
/// random r1, r2, r3. Given (g, g^s)...
///
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
///
/// ... with high probability.
fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G)
{
use std::sync::{Arc, Mutex};
use rand::{thread_rng};
assert_eq!(v1.len(), v2.len());
let chunk = (v1.len() / num_cpus::get()) + 1;
let s = Arc::new(Mutex::new(G::Projective::zero()));
let sx = Arc::new(Mutex::new(G::Projective::zero()));
crossbeam::scope(|scope| {
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
let s = s.clone();
let sx = sx.clone();
scope.spawn(move || {
// We do not need to be overly cautious of the RNG
// used for this check.
let rng = &mut thread_rng();
let mut wnaf = Wnaf::new();
let mut local_s = G::Projective::zero();
let mut local_sx = G::Projective::zero();
for (v1, v2) in v1.iter().zip(v2.iter()) {
let rho = G::Scalar::rand(rng);
let mut wnaf = wnaf.scalar(rho.into_repr());
let v1 = wnaf.base(v1.into_projective());
let v2 = wnaf.base(v2.into_projective());
local_s.add_assign(&v1);
local_sx.add_assign(&v2);
}
s.lock().unwrap().add_assign(&local_s);
sx.lock().unwrap().add_assign(&local_sx);
});
}
});
let s = s.lock().unwrap().into_affine();
let sx = sx.lock().unwrap().into_affine();
(s, sx)
}
/// Construct a single pair (s, s^x) for a vector of
/// the form [1, x, x^2, x^3, ...].
fn power_pairs<G: CurveAffine>(v: &[G]) -> (G, G)
{
merge_pairs(&v[0..(v.len()-1)], &v[1..])
}
#[test]
fn test_power_pairs() {
use rand::thread_rng;
let rng = &mut thread_rng();
let mut v = vec![];
let x = Fr::rand(rng);
let mut acc = Fr::one();
for _ in 0..100 {
v.push(G1Affine::one().mul(acc).into_affine());
acc.mul_assign(&x);
}
let gx = G2Affine::one().mul(x).into_affine();
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
}
/// Checks if pairs have the same ratio.
fn same_ratio<G1: CurveAffine>(
g1: (G1, G1),
g2: (G1::Pair, G1::Pair)
) -> bool
{
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
}
#[test]
fn test_same_ratio() {
use rand::thread_rng;
let rng = &mut thread_rng();
let s = Fr::rand(rng);
let g1 = G1Affine::one();
let g2 = G2Affine::one();
let g1_s = g1.mul(s).into_affine();
let g2_s = g2.mul(s).into_affine();
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
}
#[test]
fn test_accumulator_serialization() {
use rand::thread_rng;
let rng = &mut thread_rng();
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
let mut acc = Accumulator::new();
let before = acc.clone();
let (pk, sk) = keypair(rng, &digest);
acc.transform(&sk);
assert!(verify_transform(&before, &acc, &pk, &digest));
digest[0] = !digest[0];
assert!(!verify_transform(&before, &acc, &pk, &digest));
let mut v = Vec::with_capacity(ACCUMULATOR_BYTE_SIZE - 64);
acc.serialize(&mut v, UseCompression::No).unwrap();
assert_eq!(v.len(), ACCUMULATOR_BYTE_SIZE - 64);
let deserialized = Accumulator::deserialize(&mut &v[..], UseCompression::No, CheckForCorrectness::No).unwrap();
assert!(acc == deserialized);
}
/// Compute BLAKE2b("")
pub fn blank_hash() -> GenericArray<u8, U64> {
Blake2b::new().result()
}
/// Abstraction over a reader which hashes the data being read.
pub struct HashReader<R: Read> {
reader: R,
hasher: Blake2b
}
impl<R: Read> HashReader<R> {
/// Construct a new `HashReader` given an existing `reader` by value.
pub fn new(reader: R) -> Self {
HashReader {
reader: reader,
hasher: Blake2b::default()
}
}
/// Destroy this reader and return the hash of what was read.
pub fn into_hash(self) -> GenericArray<u8, U64> {
self.hasher.result()
}
}
impl<R: Read> Read for HashReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let bytes = self.reader.read(buf)?;
if bytes > 0 {
self.hasher.input(&buf[0..bytes]);
}
Ok(bytes)
}
}
/// Abstraction over a writer which hashes the data being written.
pub struct HashWriter<W: Write> {
writer: W,
hasher: Blake2b
}
impl<W: Write> HashWriter<W> {
/// Construct a new `HashWriter` given an existing `writer` by value.
pub fn new(writer: W) -> Self {
HashWriter {
writer: writer,
hasher: Blake2b::default()
}
}
/// Destroy this writer and return the hash of what was written.
pub fn into_hash(self) -> GenericArray<u8, U64> {
self.hasher.result()
}
}
impl<W: Write> Write for HashWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let bytes = self.writer.write(buf)?;
if bytes > 0 {
self.hasher.input(&buf[0..bytes]);
}
Ok(bytes)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}

@ -0,0 +1,119 @@
extern crate rand;
extern crate crossbeam;
extern crate num_cpus;
extern crate blake2;
extern crate generic_array;
extern crate typenum;
extern crate byteorder;
extern crate bellman;
use self::bellman::pairing::ff::{Field, PrimeField};
use self::byteorder::{ReadBytesExt, BigEndian};
use self::rand::{SeedableRng, Rng, Rand};
use self::rand::chacha::ChaChaRng;
use self::bellman::pairing::bn256::{Bn256};
use self::bellman::pairing::*;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use self::generic_array::GenericArray;
use self::typenum::consts::U64;
use self::blake2::{Blake2b, Digest};
use std::fmt;
use crate::parameters::*;
use crate::keypair::*;
use crate::utils::*;
#[derive(Clone)]
pub struct Bn256CeremonyParameters {
}
impl PowersOfTauParameters for Bn256CeremonyParameters {
const REQUIRED_POWER: usize = 26; // generate to have roughly 64 million constraints
// This ceremony is based on the BN256 elliptic curve construction.
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128;
const G1_COMPRESSED_BYTE_SIZE: usize = 32;
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
}
#[test]
fn test_pubkey_serialization() {
use self::rand::thread_rng;
let rng = &mut thread_rng();
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
let (pk, _) = keypair::<_, Bn256>(rng, &digest);
let mut v = vec![];
pk.serialize(&mut v).unwrap();
assert_eq!(v.len(), Bn256CeremonyParameters::PUBLIC_KEY_SIZE);
let deserialized = PublicKey::<Bn256>::deserialize(&mut &v[..]).unwrap();
assert!(pk == deserialized);
}
#[test]
fn test_power_pairs() {
use self::rand::thread_rng;
use self::bellman::pairing::bn256::{Fr, G1Affine, G2Affine};
let rng = &mut thread_rng();
let mut v = vec![];
let x = Fr::rand(rng);
let mut acc = Fr::one();
for _ in 0..100 {
v.push(G1Affine::one().mul(acc).into_affine());
acc.mul_assign(&x);
}
let gx = G2Affine::one().mul(x).into_affine();
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
}
#[test]
fn test_same_ratio() {
use self::rand::thread_rng;
use self::bellman::pairing::bn256::{Fr, G1Affine, G2Affine};
let rng = &mut thread_rng();
let s = Fr::rand(rng);
let g1 = G1Affine::one();
let g2 = G2Affine::one();
let g1_s = g1.mul(s).into_affine();
let g2_s = g2.mul(s).into_affine();
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
}
#[test]
fn test_accumulator_serialization() {
use crate::accumulator::*;
use self::rand::thread_rng;
use self::bellman::pairing::bn256::{Bn256, Fr, G1Affine, G2Affine};
use self::PowersOfTauParameters;
let rng = &mut thread_rng();
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
let params = Bn256CeremonyParameters{};
let mut acc = Accumulator::<Bn256, _>::new(params.clone());
let before = acc.clone();
let (pk, sk) = keypair::<_, Bn256>(rng, &digest);
acc.transform(&sk);
assert!(verify_transform(&before, &acc, &pk, &digest));
digest[0] = !digest[0];
assert!(!verify_transform(&before, &acc, &pk, &digest));
let mut v = Vec::with_capacity(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
acc.serialize(&mut v, UseCompression::No).unwrap();
assert_eq!(v.len(), Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
let deserialized = Accumulator::deserialize(&mut &v[..], UseCompression::No, CheckForCorrectness::No, params).unwrap();
assert!(acc == deserialized);
}

306
powersoftau/src/keypair.rs Normal file

@ -0,0 +1,306 @@
extern crate rand;
extern crate crossbeam;
extern crate num_cpus;
extern crate blake2;
extern crate generic_array;
extern crate typenum;
extern crate byteorder;
extern crate bellman;
extern crate memmap;
extern crate itertools;
use itertools::Itertools;
use memmap::{Mmap, MmapMut};
use self::bellman::pairing::ff::{Field, PrimeField};
use self::byteorder::{ReadBytesExt, BigEndian};
use self::rand::{SeedableRng, Rng, Rand};
use self::rand::chacha::ChaChaRng;
use self::bellman::pairing::bn256::{Bn256};
use self::bellman::pairing::*;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use self::generic_array::GenericArray;
use self::typenum::consts::U64;
use self::blake2::{Blake2b, Digest};
use std::fmt;
use super::utils::*;
use super::parameters::*;
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
/// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity.
///
/// The elements in G2 are used to verify transformations of the accumulator. By its nature, the public key proves
/// knowledge of τ, α and β.
///
/// It is necessary to verify `same_ratio`((s<sub>1</sub>, s<sub>1</sub><sup>x</sup>), (H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)).
#[derive(Eq)]
pub struct PublicKey<E: Engine> {
pub tau_g1: (E::G1Affine, E::G1Affine),
pub alpha_g1: (E::G1Affine, E::G1Affine),
pub beta_g1: (E::G1Affine, E::G1Affine),
pub tau_g2: E::G2Affine,
pub alpha_g2: E::G2Affine,
pub beta_g2: E::G2Affine
}
impl<E: Engine> PartialEq for PublicKey<E> {
fn eq(&self, other: &PublicKey<E>) -> bool {
self.tau_g1.0 == other.tau_g1.0 &&
self.tau_g1.1 == other.tau_g1.1 &&
self.alpha_g1.0 == other.alpha_g1.0 &&
self.alpha_g1.1 == other.alpha_g1.1 &&
self.beta_g1.0 == other.beta_g1.0 &&
self.beta_g1.1 == other.beta_g1.1 &&
self.tau_g2 == other.tau_g2 &&
self.alpha_g2 == other.alpha_g2 &&
self.beta_g2 == other.beta_g2
}
}
/// Contains the secrets τ, α and β that the participant of the ceremony must destroy.
pub struct PrivateKey<E: Engine> {
pub tau: E::Fr,
pub alpha: E::Fr,
pub beta: E::Fr
}
/// Constructs a keypair given an RNG and a 64-byte transcript `digest`.
pub fn keypair<R: Rng, E: Engine>(rng: &mut R, digest: &[u8]) -> (PublicKey<E>, PrivateKey<E>)
{
assert_eq!(digest.len(), 64);
// tau is a conribution to the "powers of tau", in a set of points of the form "tau^i * G"
let tau = E::Fr::rand(rng);
// alpha and beta are a set of conrtibuitons in a form "alpha * tau^i * G" and that are required
// for construction of the polynomials
let alpha = E::Fr::rand(rng);
let beta = E::Fr::rand(rng);
let mut op = |x: E::Fr, personalization: u8| {
// Sample random g^s
let g1_s = E::G1::rand(rng).into_affine();
// Compute g^{s*x}
let g1_s_x = g1_s.mul(x).into_affine();
// Compute BLAKE2b(personalization | transcript | g^s | g^{s*x})
let h: generic_array::GenericArray<u8, U64> = {
let mut h = Blake2b::default();
h.input(&[personalization]);
h.input(digest);
h.input(g1_s.into_uncompressed().as_ref());
h.input(g1_s_x.into_uncompressed().as_ref());
h.result()
};
// Hash into G2 as g^{s'}
let g2_s: E::G2Affine = hash_to_g2::<E>(h.as_ref()).into_affine();
// Compute g^{s'*x}
let g2_s_x = g2_s.mul(x).into_affine();
((g1_s, g1_s_x), g2_s_x)
};
// these "public keys" are requried for for next participants to check that points are in fact
// sequential powers
let pk_tau = op(tau, 0);
let pk_alpha = op(alpha, 1);
let pk_beta = op(beta, 2);
(
PublicKey {
tau_g1: pk_tau.0,
alpha_g1: pk_alpha.0,
beta_g1: pk_beta.0,
tau_g2: pk_tau.1,
alpha_g2: pk_alpha.1,
beta_g2: pk_beta.1,
},
PrivateKey {
tau: tau,
alpha: alpha,
beta: beta
}
)
}
impl<E: Engine> PublicKey<E> {
/// Serialize the public key. Points are always in uncompressed form.
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>
{
write_point(writer, &self.tau_g1.0, UseCompression::No)?;
write_point(writer, &self.tau_g1.1, UseCompression::No)?;
write_point(writer, &self.alpha_g1.0, UseCompression::No)?;
write_point(writer, &self.alpha_g1.1, UseCompression::No)?;
write_point(writer, &self.beta_g1.0, UseCompression::No)?;
write_point(writer, &self.beta_g1.1, UseCompression::No)?;
write_point(writer, &self.tau_g2, UseCompression::No)?;
write_point(writer, &self.alpha_g2, UseCompression::No)?;
write_point(writer, &self.beta_g2, UseCompression::No)?;
Ok(())
}
/// Deserialize the public key. Points are always in uncompressed form, and
/// always checked, since there aren't very many of them. Does not allow any
/// points at infinity.
pub fn deserialize<R: Read>(reader: &mut R) -> Result<PublicKey<E>, DeserializationError>
{
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>, R: Read>(reader: &mut R) -> Result<C, DeserializationError> {
let mut repr = C::Uncompressed::empty();
reader.read_exact(repr.as_mut())?;
let v = repr.into_affine()?;
if v.is_zero() {
Err(DeserializationError::PointAtInfinity)
} else {
Ok(v)
}
}
let tau_g1_s = read_uncompressed::<E, _, _>(reader)?;
let tau_g1_s_tau = read_uncompressed::<E, _, _>(reader)?;
let alpha_g1_s = read_uncompressed::<E, _, _>(reader)?;
let alpha_g1_s_alpha = read_uncompressed::<E, _, _>(reader)?;
let beta_g1_s = read_uncompressed::<E, _, _>(reader)?;
let beta_g1_s_beta = read_uncompressed::<E, _, _>(reader)?;
let tau_g2 = read_uncompressed::<E, _, _>(reader)?;
let alpha_g2 = read_uncompressed::<E, _, _>(reader)?;
let beta_g2 = read_uncompressed::<E, _, _>(reader)?;
Ok(PublicKey {
tau_g1: (tau_g1_s, tau_g1_s_tau),
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
beta_g1: (beta_g1_s, beta_g1_s_beta),
tau_g2: tau_g2,
alpha_g2: alpha_g2,
beta_g2: beta_g2
})
}
}
impl<E: Engine> PublicKey<E> {
/// This function is intended to write the key to the memory map and calculates
/// a position for writing into the file itself based on information whether
/// contribution was output in compressed on uncompressed form
pub fn write<P>(
&self,
output_map: &mut MmapMut,
accumulator_was_compressed: UseCompression
)
-> io::Result<()>
where P: PowersOfTauParameters
{
let mut position = match accumulator_was_compressed {
UseCompression::Yes => {
P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE
},
UseCompression::No => {
P::ACCUMULATOR_BYTE_SIZE
}
};
(&mut output_map[position..]).write(&self.tau_g1.0.into_uncompressed().as_ref())?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
(&mut output_map[position..]).write(&self.tau_g1.1.into_uncompressed().as_ref())?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
(&mut output_map[position..]).write(&self.alpha_g1.0.into_uncompressed().as_ref())?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
(&mut output_map[position..]).write(&self.alpha_g1.1.into_uncompressed().as_ref())?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
(&mut output_map[position..]).write(&self.beta_g1.0.into_uncompressed().as_ref())?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
(&mut output_map[position..]).write(&self.beta_g1.1.into_uncompressed().as_ref())?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
(&mut output_map[position..]).write(&self.tau_g2.into_uncompressed().as_ref())?;
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
(&mut output_map[position..]).write(&self.alpha_g2.into_uncompressed().as_ref())?;
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
(&mut output_map[position..]).write(&self.beta_g2.into_uncompressed().as_ref())?;
output_map.flush()?;
Ok(())
}
/// Deserialize the public key. Points are always in uncompressed form, and
/// always checked, since there aren't very many of them. Does not allow any
/// points at infinity.
pub fn read<P>(
input_map: &Mmap,
accumulator_was_compressed: UseCompression
) -> Result<Self, DeserializationError>
where P: PowersOfTauParameters
{
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(input_map: &Mmap, position: usize) -> Result<C, DeserializationError> {
let mut repr = C::Uncompressed::empty();
let element_size = C::Uncompressed::size();
let memory_slice = input_map.get(position..position+element_size).expect("must read point data from file");
memory_slice.clone().read_exact(repr.as_mut())?;
let v = repr.into_affine()?;
if v.is_zero() {
Err(DeserializationError::PointAtInfinity)
} else {
Ok(v)
}
}
let mut position = match accumulator_was_compressed {
UseCompression::Yes => {
P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE
},
UseCompression::No => {
P::ACCUMULATOR_BYTE_SIZE
}
};
let tau_g1_s = read_uncompressed::<E, _>(input_map, position)?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
let tau_g1_s_tau = read_uncompressed::<E, _>(input_map, position)?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
let alpha_g1_s = read_uncompressed::<E, _>(input_map, position)?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
let alpha_g1_s_alpha = read_uncompressed::<E, _>(input_map, position)?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
let beta_g1_s = read_uncompressed::<E, _>(input_map, position)?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
let beta_g1_s_beta = read_uncompressed::<E, _>(input_map, position)?;
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
let tau_g2 = read_uncompressed::<E, _>(input_map, position)?;
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
let alpha_g2 = read_uncompressed::<E, _>(input_map, position)?;
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
let beta_g2 = read_uncompressed::<E, _>(input_map, position)?;
Ok(PublicKey {
tau_g1: (tau_g1_s, tau_g1_s_tau),
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
beta_g1: (beta_g1_s, beta_g1_s_beta),
tau_g2: tau_g2,
alpha_g2: alpha_g2,
beta_g2: beta_g2
})
}
}

10
powersoftau/src/lib.rs Normal file

@ -0,0 +1,10 @@
#![allow(unused_imports)]
// pub mod bls12_381;
pub mod bn256;
pub mod small_bn256;
pub mod accumulator;
pub mod batched_accumulator;
pub mod keypair;
pub mod parameters;
pub mod utils;

@ -0,0 +1,117 @@
extern crate rand;
extern crate crossbeam;
extern crate num_cpus;
extern crate blake2;
extern crate generic_array;
extern crate typenum;
extern crate byteorder;
extern crate bellman;
use bellman::pairing::ff::{Field, PrimeField};
use byteorder::{ReadBytesExt, BigEndian};
use rand::{SeedableRng, Rng, Rand};
use rand::chacha::ChaChaRng;
use bellman::pairing::bn256::{Bn256};
use bellman::pairing::*;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use generic_array::GenericArray;
use typenum::consts::U64;
use blake2::{Blake2b, Digest};
use std::fmt;
use super::keypair::*;
pub trait PowersOfTauParameters: Clone {
const REQUIRED_POWER: usize;
const G1_UNCOMPRESSED_BYTE_SIZE: usize;
const G2_UNCOMPRESSED_BYTE_SIZE: usize;
const G1_COMPRESSED_BYTE_SIZE: usize;
const G2_COMPRESSED_BYTE_SIZE: usize;
const TAU_POWERS_LENGTH: usize = (1 << Self::REQUIRED_POWER);
const TAU_POWERS_G1_LENGTH: usize = (Self::TAU_POWERS_LENGTH << 1) - 1;
const ACCUMULATOR_BYTE_SIZE: usize = (Self::TAU_POWERS_G1_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) + // g1 tau powers
(Self::TAU_POWERS_LENGTH * Self::G2_UNCOMPRESSED_BYTE_SIZE) + // g2 tau powers
(Self::TAU_POWERS_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) + // alpha tau powers
(Self::TAU_POWERS_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) // beta tau powers
+ Self::G2_UNCOMPRESSED_BYTE_SIZE // beta in g2
+ Self::HASH_SIZE; // blake2b hash of previous contribution
const PUBLIC_KEY_SIZE: usize = 3 * Self::G2_UNCOMPRESSED_BYTE_SIZE + // tau, alpha, and beta in g2
6 * Self::G1_UNCOMPRESSED_BYTE_SIZE; // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1
const CONTRIBUTION_BYTE_SIZE: usize = (Self::TAU_POWERS_G1_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) + // g1 tau powers
(Self::TAU_POWERS_LENGTH * Self::G2_COMPRESSED_BYTE_SIZE) + // g2 tau powers
(Self::TAU_POWERS_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) + // alpha tau powers
(Self::TAU_POWERS_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) // beta tau powers
+ Self::G2_COMPRESSED_BYTE_SIZE // beta in g2
+ Self::HASH_SIZE // blake2b hash of input accumulator
+ Self::PUBLIC_KEY_SIZE; // public key
// Blake2b hash size
const HASH_SIZE: usize = 64;
const EMPIRICAL_BATCH_SIZE: usize = 1 << 21;
}
/// Determines if point compression should be used.
#[derive(Copy, Clone, PartialEq)]
pub enum UseCompression {
Yes,
No
}
/// Determines if points should be checked for correctness during deserialization.
/// This is not necessary for participants, because a transcript verifier can
/// check this theirself.
#[derive(Copy, Clone, PartialEq)]
pub enum CheckForCorrectness {
Yes,
No
}
/// Errors that might occur during deserialization.
#[derive(Debug)]
pub enum DeserializationError {
IoError(io::Error),
DecodingError(GroupDecodingError),
PointAtInfinity
}
impl fmt::Display for DeserializationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e),
DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e),
DeserializationError::PointAtInfinity => write!(f, "Point at infinity found")
}
}
}
impl From<io::Error> for DeserializationError {
fn from(err: io::Error) -> DeserializationError {
DeserializationError::IoError(err)
}
}
impl From<GroupDecodingError> for DeserializationError {
fn from(err: GroupDecodingError) -> DeserializationError {
DeserializationError::DecodingError(err)
}
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum ElementType {
TauG1,
TauG2,
AlphaG1,
BetaG1,
BetaG2
}

@ -0,0 +1,40 @@
extern crate rand;
extern crate crossbeam;
extern crate num_cpus;
extern crate blake2;
extern crate generic_array;
extern crate typenum;
extern crate byteorder;
extern crate bellman;
use self::bellman::pairing::ff::{Field, PrimeField};
use self::byteorder::{ReadBytesExt, BigEndian};
use self::rand::{SeedableRng, Rng, Rand};
use self::rand::chacha::ChaChaRng;
use self::bellman::pairing::bn256::{Bn256};
use self::bellman::pairing::*;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use self::generic_array::GenericArray;
use self::typenum::consts::U64;
use self::blake2::{Blake2b, Digest};
use std::fmt;
use crate::parameters::*;
use crate::keypair::*;
use crate::utils::*;
#[derive(Clone)]
pub struct Bn256CeremonyParameters {
}
impl PowersOfTauParameters for Bn256CeremonyParameters {
const REQUIRED_POWER: usize = 25; // generate to have roughly 2 million constraints
// This ceremony is based on the BN256 elliptic curve construction.
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128;
const G1_COMPRESSED_BYTE_SIZE: usize = 32;
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
}

300
powersoftau/src/utils.rs Normal file

@ -0,0 +1,300 @@
extern crate rand;
extern crate crossbeam;
extern crate num_cpus;
extern crate blake2;
extern crate generic_array;
extern crate typenum;
extern crate byteorder;
extern crate bellman;
use bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr};
use byteorder::{ReadBytesExt, BigEndian};
use rand::{SeedableRng, Rng, Rand};
use rand::chacha::ChaChaRng;
use bellman::pairing::bn256::{Bn256};
use bellman::pairing::*;
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex};
use generic_array::GenericArray;
use typenum::consts::U64;
use blake2::{Blake2b, Digest};
use std::fmt;
use super::parameters::*;
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
/// than 32 bytes.
pub fn hash_to_g2<E:Engine>(mut digest: &[u8]) -> E::G2
{
assert!(digest.len() >= 32);
let mut seed = Vec::with_capacity(8);
for _ in 0..8 {
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
}
ChaChaRng::from_seed(&seed).gen()
}
#[test]
fn test_hash_to_g2() {
assert!(
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])
==
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34])
);
assert!(
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])
!=
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33])
);
}
/// Computes a random linear combination over v1/v2.
///
/// Checking that many pairs of elements are exponentiated by
/// the same `x` can be achieved (with high probability) with
/// the following technique:
///
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
/// random r1, r2, r3. Given (g, g^s)...
///
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
///
/// ... with high probability.
// fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v1: &[G], v2: &[G]) -> (G, G)
// {
// use std::sync::{Arc, Mutex};
// use self::rand::{thread_rng};
// assert_eq!(v1.len(), v2.len());
// let chunk = (v1.len() / num_cpus::get()) + 1;
// let s = Arc::new(Mutex::new(G::Projective::zero()));
// let sx = Arc::new(Mutex::new(G::Projective::zero()));
// crossbeam::scope(|scope| {
// for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
// let s = s.clone();
// let sx = sx.clone();
// scope.spawn(move || {
// // We do not need to be overly cautious of the RNG
// // used for this check.
// let rng = &mut thread_rng();
// let mut wnaf = Wnaf::new();
// let mut local_s = G::Projective::zero();
// let mut local_sx = G::Projective::zero();
// for (v1, v2) in v1.iter().zip(v2.iter()) {
// let rho = G::Scalar::rand(rng);
// let mut wnaf = wnaf.scalar(rho.into_repr());
// let v1 = wnaf.base(v1.into_projective());
// let v2 = wnaf.base(v2.into_projective());
// local_s.add_assign(&v1);
// local_sx.add_assign(&v2);
// }
// s.lock().unwrap().add_assign(&local_s);
// sx.lock().unwrap().add_assign(&local_sx);
// });
// }
// });
// let s = s.lock().unwrap().into_affine();
// let sx = sx.lock().unwrap().into_affine();
// (s, sx)
// }
fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v1: &[G], v2: &[G]) -> (G, G)
{
use self::rand::{thread_rng};
assert_eq!(v1.len(), v2.len());
let rng = &mut thread_rng();
let randomness: Vec<<G::Scalar as PrimeField>::Repr> = (0..v1.len()).map(|_| G::Scalar::rand(rng).into_repr()).collect();
let s = dense_multiexp(&v1, &randomness[..]).into_affine();
let sx = dense_multiexp(&v2, &randomness[..]).into_affine();
(s, sx)
}
/// Construct a single pair (s, s^x) for a vector of
/// the form [1, x, x^2, x^3, ...].
pub fn power_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v: &[G]) -> (G, G)
{
merge_pairs::<E, _>(&v[0..(v.len()-1)], &v[1..])
}
/// Compute BLAKE2b("")
pub fn blank_hash() -> GenericArray<u8, U64> {
Blake2b::new().result()
}
/// Checks if pairs have the same ratio.
/// Under the hood uses pairing to check
/// x1/x2 = y1/y2 => x1*y2 = x2*y1
pub fn same_ratio<E: Engine, G1: CurveAffine<Engine = E, Scalar = E::Fr>>(
g1: (G1, G1),
g2: (G1::Pair, G1::Pair)
) -> bool
{
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
}
pub fn write_point<W, G>(
writer: &mut W,
p: &G,
compression: UseCompression
) -> io::Result<()>
where W: Write,
G: CurveAffine
{
match compression {
UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()),
UseCompression::No => writer.write_all(p.into_uncompressed().as_ref()),
}
}
pub fn compute_g2_s<E: Engine> (
digest: &[u8],
g1_s: &E::G1Affine,
g1_s_x: &E::G1Affine,
personalization: u8
) -> E::G2Affine
{
let mut h = Blake2b::default();
h.input(&[personalization]);
h.input(digest);
h.input(g1_s.into_uncompressed().as_ref());
h.input(g1_s_x.into_uncompressed().as_ref());
hash_to_g2::<E>(h.result().as_ref()).into_affine()
}
/// Perform multi-exponentiation. The caller is responsible for ensuring that
/// the number of bases is the same as the number of exponents.
#[allow(dead_code)]
pub fn dense_multiexp<G: CurveAffine>(
bases: & [G],
exponents: & [<G::Scalar as PrimeField>::Repr]
) -> <G as CurveAffine>::Projective
{
if exponents.len() != bases.len() {
panic!("invalid length")
}
let c = if exponents.len() < 32 {
3u32
} else {
(f64::from(exponents.len() as u32)).ln().ceil() as u32
};
dense_multiexp_inner(bases, exponents, 0, c, true)
}
fn dense_multiexp_inner<G: CurveAffine>(
bases: & [G],
exponents: & [<G::Scalar as PrimeField>::Repr],
mut skip: u32,
c: u32,
handle_trivial: bool
) -> <G as CurveAffine>::Projective
{
use std::sync::{Mutex};
// Perform this region of the multiexp. We use a different strategy - go over region in parallel,
// then over another region, etc. No Arc required
let chunk = (bases.len() / num_cpus::get()) + 1;
let this = {
// let mask = (1u64 << c) - 1u64;
let this_region = Mutex::new(<G as CurveAffine>::Projective::zero());
let arc = Arc::new(this_region);
crossbeam::scope(|scope| {
for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) {
let this_region_rwlock = arc.clone();
// let handle =
scope.spawn(move || {
let mut buckets = vec![<G as CurveAffine>::Projective::zero(); (1 << c) - 1];
// Accumulate the result
let mut acc = G::Projective::zero();
let zero = G::Scalar::zero().into_repr();
let one = G::Scalar::one().into_repr();
for (base, &exp) in base.iter().zip(exp.iter()) {
// let index = (exp.as_ref()[0] & mask) as usize;
// if index != 0 {
// buckets[index - 1].add_assign_mixed(base);
// }
// exp.shr(c as u32);
if exp != zero {
if exp == one {
if handle_trivial {
acc.add_assign_mixed(base);
}
} else {
let mut exp = exp;
exp.shr(skip);
let exp = exp.as_ref()[0] % (1 << c);
if exp != 0 {
buckets[(exp - 1) as usize].add_assign_mixed(base);
}
}
}
}
// buckets are filled with the corresponding accumulated value, now sum
let mut running_sum = G::Projective::zero();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
let mut guard = match this_region_rwlock.lock() {
Ok(guard) => guard,
Err(_) => {
panic!("poisoned!");
// poisoned.into_inner()
}
};
(*guard).add_assign(&acc);
});
}
});
let this_region = Arc::try_unwrap(arc).unwrap();
let this_region = this_region.into_inner().unwrap();
this_region
};
skip += c;
if skip >= <G::Scalar as PrimeField>::NUM_BITS {
// There isn't another region, and this will be the highest region
return this;
} else {
// next region is actually higher than this one, so double it enough times
let mut next_region = dense_multiexp_inner(
bases, exponents, skip, c, false);
for _ in 0..c {
next_region.double();
}
next_region.add_assign(&this);
return next_region;
}
}

18
powersoftau/test.sh Executable file

@ -0,0 +1,18 @@
#!/bin/sh
rm challenge
rm response
rm new_challenge
rm challenge_old
rm response_old
cargo run --release --bin new_constrained
cargo run --release --bin compute_constrained
cargo run --release --bin verify_transform_constrained
mv challenge challenge_old
mv response response_old
mv new_challenge challenge
cargo run --release --bin beacon_constrained
cargo run --release --bin verify_transform_constrained