prepare for testing for Ethereum related ceremony
This commit is contained in:
parent
5429415959
commit
299c10a37d
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,5 +1,6 @@
|
||||
/target/
|
||||
**/*.rs.bk
|
||||
transcript
|
||||
challenge
|
||||
response
|
||||
transcript*
|
||||
challenge*
|
||||
response*
|
||||
new_challenge*
|
||||
|
167
Cargo.lock
generated
167
Cargo.lock
generated
@ -1,16 +1,19 @@
|
||||
[[package]]
|
||||
name = "bellman"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/matterinc/bellman#e544678a6abe2f97a9afbc02e2e590f2259f1e30"
|
||||
dependencies = [
|
||||
"bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ff 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pairing 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pairing 0.15.0 (git+https://github.com/matterinc/pairing)",
|
||||
"pbr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -71,6 +74,29 @@ dependencies = [
|
||||
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ff"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ff_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ff_derive"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"num-bigint 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon"
|
||||
version = "0.3.3"
|
||||
@ -130,6 +156,15 @@ dependencies = [
|
||||
"proc-macro-hack 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kernel32-sys"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.40"
|
||||
@ -140,6 +175,28 @@ name = "nodrop"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "num-bigint"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.39"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.8.0"
|
||||
@ -150,25 +207,39 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pairing"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
version = "0.15.0"
|
||||
source = "git+https://github.com/matterinc/pairing#1363d02170f1d98f1b9c8eec0e3fc6b1eea4ef9a"
|
||||
dependencies = [
|
||||
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ff 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "powersoftau"
|
||||
version = "0.1.1"
|
||||
name = "pbr"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bellman 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "powersoftau"
|
||||
version = "0.1.2"
|
||||
dependencies = [
|
||||
"bellman 0.1.1 (git+https://github.com/matterinc/bellman)",
|
||||
"blake2 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ff 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pairing 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"pairing 0.15.0 (git+https://github.com/matterinc/pairing)",
|
||||
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -187,6 +258,22 @@ name = "proc-macro-hack-impl"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "0.4.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "0.6.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.3.22"
|
||||
@ -212,6 +299,14 @@ name = "redox_syscall"
|
||||
version = "0.1.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "redox_termios"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust-crypto"
|
||||
version = "0.2.36"
|
||||
@ -229,6 +324,26 @@ name = "rustc-serialize"
|
||||
version = "0.3.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "0.14.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termion"
|
||||
version = "1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.1.39"
|
||||
@ -244,6 +359,16 @@ name = "typenum"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.4"
|
||||
@ -253,6 +378,11 @@ dependencies = [
|
||||
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-build"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
@ -264,7 +394,7 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[metadata]
|
||||
"checksum bellman 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eae372472c7ea8f7c8fc6a62f7d5535db8302de7f1aafda2e13a97c4830d3bcf"
|
||||
"checksum bellman 0.1.1 (git+https://github.com/matterinc/bellman)" = "<none>"
|
||||
"checksum bit-vec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f"
|
||||
"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf"
|
||||
"checksum blake2 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "53bf612c0f2839b7e764ebac65d6cb985f7c6812de399d0728038f4b1da141bc"
|
||||
@ -274,6 +404,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
|
||||
"checksum crypto-mac 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "779015233ac67d65098614aec748ac1c756ab6677fa2e14cf8b37c08dfed1198"
|
||||
"checksum digest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e5b29bf156f3f4b3c4f610a25ff69370616ae6e0657d416de22645483e72af0a"
|
||||
"checksum ff 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eec81e2e423086589b224dbcfbab70e3732913de25479d05165b20d4aaed05f4"
|
||||
"checksum ff_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "70335090ee115d5716416ca38980cce7752f40923f41d22cf5a69a6269f9e2a2"
|
||||
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
|
||||
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||
"checksum futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "1a70b146671de62ec8c8ed572219ca5d594d9b06c0b364d5e67b722fc559b48c"
|
||||
@ -282,19 +414,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
"checksum generic-array 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "fceb69994e330afed50c93524be68c42fa898c2d9fd4ee8da03bd7363acd26f2"
|
||||
"checksum hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4da5f0e01bd8a71a224a4eedecaacfcabda388dbb7a80faf04d3514287572d95"
|
||||
"checksum hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d340b6514f232f6db1bd16db65302a5278a04fef9ce867cb932e7e5fa21130a"
|
||||
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
|
||||
"checksum libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)" = "6fd41f331ac7c5b8ac259b8bf82c75c0fb2e469bbf37d2becbba9a6a2221965b"
|
||||
"checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
|
||||
"checksum num-bigint 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "10b8423ea72ec64751198856a853e07b37087cfc9b53a87ecb19bff67b6d1320"
|
||||
"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea"
|
||||
"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
|
||||
"checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30"
|
||||
"checksum pairing 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "06f21a403a78257de696b59a5bfafad56a3b3ab8f27741c8122750bf0ebbb9fa"
|
||||
"checksum pairing 0.15.0 (git+https://github.com/matterinc/pairing)" = "<none>"
|
||||
"checksum pbr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "deb73390ab68d81992bd994d145f697451bb0b54fd39738e72eef32458ad6907"
|
||||
"checksum proc-macro-hack 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ba8d4f9257b85eb6cdf13f055cea3190520aab1409ca2ab43493ea4820c25f0"
|
||||
"checksum proc-macro-hack-impl 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d5cb6f960ad471404618e9817c0e5d10b1ae74cfdf01fab89ea0641fe7fb2892"
|
||||
"checksum proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)" = "77619697826f31a02ae974457af0b29b723e5619e113e9397b8b82c6bd253f09"
|
||||
"checksum quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "53fa22a1994bd0f9372d7a816207d8a2677ad0325b073f5c5332760f0fb62b5c"
|
||||
"checksum rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1"
|
||||
"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5"
|
||||
"checksum redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "0d92eecebad22b767915e4d529f89f28ee96dbbf5a4810d2b844373f136417fd"
|
||||
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
|
||||
"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
|
||||
"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
|
||||
"checksum syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)" = "261ae9ecaa397c42b960649561949d69311f08eeaea86a65696e6e46517cf741"
|
||||
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
|
||||
"checksum time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "a15375f1df02096fb3317256ce2cee6a1f42fc84ea5ad5fc8c421cfe40c73098"
|
||||
"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169"
|
||||
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
|
||||
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
|
||||
"checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3"
|
||||
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
20
Cargo.toml
20
Cargo.toml
@ -1,8 +1,9 @@
|
||||
[package]
|
||||
name = "powersoftau"
|
||||
version = "0.1.1"
|
||||
authors = ["Sean Bowe"]
|
||||
version = "0.1.2"
|
||||
authors = ["Sean Bowe", "Alex Vlasov"]
|
||||
license = "MIT/Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
description = "Communal zk-SNARK MPC for Public Parameters"
|
||||
documentation = "https://docs.rs/powersoftau/"
|
||||
@ -17,10 +18,19 @@ blake2 = "0.6.1"
|
||||
generic-array = "0.8.3"
|
||||
typenum = "1.9.0"
|
||||
byteorder = "1.1.0"
|
||||
pairing = "0.14"
|
||||
hex-literal = "0.1"
|
||||
rust-crypto = "0.2"
|
||||
bellman = "0.1"
|
||||
|
||||
memmap = "0.7.0"
|
||||
itertools = "0.8.0"
|
||||
|
||||
#ff = { path = "../ff", features = ["derive"] }
|
||||
#pairing = { path = "../pairing", features = ["expose-arith"]}
|
||||
#bellman = { path = "../bellman" }
|
||||
|
||||
ff = { git = 'https://github.com/matterinc/ff', features = ["derive"] }
|
||||
pairing = { git = 'https://github.com/matterinc/pairing', features = ["expose-arith"] }
|
||||
bellman = { git = 'https://github.com/matterinc/bellman'}
|
||||
|
||||
[features]
|
||||
u128-support = ["pairing/u128-support"]
|
||||
|
||||
|
16
README.md
16
README.md
@ -1,20 +1,20 @@
|
||||
# Powers of Tau
|
||||
|
||||
## Original story
|
||||
|
||||
This is a [multi-party computation](https://en.wikipedia.org/wiki/Secure_multi-party_computation) (MPC) ceremony which constructs partial zk-SNARK parameters for _all_ circuits up to a depth of 2<sup>21</sup>. It works by taking a step that is performed by all zk-SNARK MPCs and performing it in just one single ceremony. This makes individual zk-SNARK MPCs much cheaper and allows them to scale to practically unbounded numbers of participants.
|
||||
|
||||
This protocol is described in a [forthcoming paper](https://eprint.iacr.org/2017/1050). It produces parameters for an adaptation of [Jens Groth's 2016 pairing-based proving system](https://eprint.iacr.org/2016/260) using the [BLS12-381](https://github.com/ebfull/pairing/tree/master/src/bls12_381) elliptic curve construction. The security proof relies on a randomness beacon being applied at the end of the ceremony.
|
||||
|
||||
## Contributions
|
||||
|
||||
Extended to support Ethereum's BN256 curve and made it easier to change size of the ceremony. In addition proof generation process can be done in memory constrained environments now. Benchmark is around `1.3 Gb` of memory and `3 hours` for a `2^26` power of tau on BN256 curve on my personal laptop
|
||||
|
||||
## Instructions
|
||||
|
||||
If you've been asked to participate, you were sent a `challenge` file. Put that in the current directory and use your [Rust toolchain](https://www.rust-lang.org/en-US/) to execute the computation:
|
||||
Instructions for a planned ceremony will be posted when everything is tested and finalized.
|
||||
|
||||
```
|
||||
cargo run --release --bin compute
|
||||
```
|
||||
|
||||
The process could take an hour or so. When it's finished, it will place a `response` file in the current directory. That's what you send back. It will also print a hash of the `response` file it produced. You need to write this hash down (or post it publicly) so that you and others can confirm that your contribution exists in the final transcript of the ceremony.
|
||||
|
||||
## Recommendations
|
||||
## Recommendations from original ceremony
|
||||
|
||||
Participants of the ceremony sample some randomness, perform a computation, and then destroy the randomness. **Only one participant needs to do this successfully to ensure the final parameters are secure.** In order to see that this randomness is truly destroyed, participants may take various kinds of precautions:
|
||||
|
||||
|
466
src/accumulator.rs
Normal file
466
src/accumulator.rs
Normal file
@ -0,0 +1,466 @@
|
||||
//! This ceremony constructs the "powers of tau" for Jens Groth's 2016 zk-SNARK proving
|
||||
//! system using the BLS12-381 pairing-friendly elliptic curve construction.
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! Participants of the ceremony receive a "challenge" file containing:
|
||||
//!
|
||||
//! * the BLAKE2b hash of the last file entered into the transcript
|
||||
//! * an `Accumulator` (with curve points encoded in uncompressed form for fast deserialization)
|
||||
//!
|
||||
//! The participant runs a tool which generates a random keypair (`PublicKey`, `PrivateKey`)
|
||||
//! used for modifying the `Accumulator` from the "challenge" file. The keypair is then used to
|
||||
//! transform the `Accumulator`, and a "response" file is generated containing:
|
||||
//!
|
||||
//! * the BLAKE2b hash of the "challenge" file (thus forming a hash chain over the entire transcript)
|
||||
//! * an `Accumulator` (with curve points encoded in compressed form for fast uploading)
|
||||
//! * the `PublicKey`
|
||||
//!
|
||||
//! This "challenge" file is entered into the protocol transcript. A given transcript is valid
|
||||
//! if the transformations between consecutive `Accumulator`s verify with their respective
|
||||
//! `PublicKey`s. Participants (and the public) can ensure that their contribution to the
|
||||
//! `Accumulator` was accepted by ensuring the transcript contains their "response" file, ideally
|
||||
//! by comparison of the BLAKE2b hash of the "response" file.
|
||||
//!
|
||||
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
|
||||
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
|
||||
//! public parameters for all circuits within a bounded size.
|
||||
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
extern crate ff;
|
||||
extern crate memmap;
|
||||
|
||||
use memmap::{Mmap, MmapMut};
|
||||
use self::ff::{Field, PrimeField};
|
||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
||||
use self::rand::{SeedableRng, Rng, Rand};
|
||||
use self::rand::chacha::ChaChaRng;
|
||||
use self::pairing::bn256::{Bn256};
|
||||
use self::pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use self::generic_array::GenericArray;
|
||||
use self::typenum::consts::U64;
|
||||
use self::blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
use super::keypair::*;
|
||||
use super::utils::*;
|
||||
use super::parameters::*;
|
||||
|
||||
/// The `Accumulator` is an object that participants of the ceremony contribute
|
||||
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
|
||||
/// fixed generators, and additionally in G1 over two other generators of exponents
|
||||
/// `alpha` and `beta` over those fixed generators. In other words:
|
||||
///
|
||||
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
||||
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
||||
#[derive(Eq, Clone)]
|
||||
pub struct Accumulator<E: Engine, P: PowersOfTauParameters> {
|
||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
||||
pub tau_powers_g1: Vec<E::G1Affine>,
|
||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub tau_powers_g2: Vec<E::G2Affine>,
|
||||
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub alpha_tau_powers_g1: Vec<E::G1Affine>,
|
||||
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub beta_tau_powers_g1: Vec<E::G1Affine>,
|
||||
/// beta
|
||||
pub beta_g2: E::G2Affine,
|
||||
/// Keep parameters here
|
||||
pub parameters: P
|
||||
}
|
||||
|
||||
impl<E: Engine, P: PowersOfTauParameters> PartialEq for Accumulator<E, P> {
|
||||
fn eq(&self, other: &Accumulator<E, P>) -> bool {
|
||||
self.tau_powers_g1.eq(&other.tau_powers_g1) &&
|
||||
self.tau_powers_g2.eq(&other.tau_powers_g2) &&
|
||||
self.alpha_tau_powers_g1.eq(&other.alpha_tau_powers_g1) &&
|
||||
self.beta_tau_powers_g1.eq(&other.beta_tau_powers_g1) &&
|
||||
self.beta_g2 == other.beta_g2
|
||||
}
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> Accumulator<E, P> {
|
||||
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
|
||||
pub fn new(parameters: P) -> Self {
|
||||
Accumulator {
|
||||
tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_G1_LENGTH],
|
||||
tau_powers_g2: vec![E::G2Affine::one(); P::TAU_POWERS_LENGTH],
|
||||
alpha_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
|
||||
beta_tau_powers_g1: vec![E::G1Affine::one(); P::TAU_POWERS_LENGTH],
|
||||
beta_g2: E::G2Affine::one(),
|
||||
parameters: parameters
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the accumulator with some compression behavior.
|
||||
pub fn serialize<W: Write>(
|
||||
&self,
|
||||
writer: &mut W,
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
{
|
||||
fn write_all<W: Write, C: CurveAffine>(
|
||||
writer: &mut W,
|
||||
c: &[C],
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
{
|
||||
for c in c {
|
||||
write_point(writer, c, compression)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
write_all(writer, &self.tau_powers_g1, compression)?;
|
||||
write_all(writer, &self.tau_powers_g2, compression)?;
|
||||
write_all(writer, &self.alpha_tau_powers_g1, compression)?;
|
||||
write_all(writer, &self.beta_tau_powers_g1, compression)?;
|
||||
write_all(writer, &[self.beta_g2], compression)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the accumulator from disk with some compression behavior. `checked`
|
||||
/// indicates whether we should check it's a valid element of the group and
|
||||
/// not the point at infinity.
|
||||
pub fn deserialize<R: Read>(
|
||||
reader: &mut R,
|
||||
compression: UseCompression,
|
||||
checked: CheckForCorrectness,
|
||||
parameters: P
|
||||
) -> Result<Self, DeserializationError>
|
||||
{
|
||||
fn read_all<EE: Engine, R: Read, C: CurveAffine<Engine = EE, Scalar = EE::Fr> > (
|
||||
reader: &mut R,
|
||||
size: usize,
|
||||
compression: UseCompression,
|
||||
checked: CheckForCorrectness
|
||||
) -> Result<Vec<C>, DeserializationError>
|
||||
{
|
||||
fn decompress_all<R: Read, ENC: EncodedPoint>(
|
||||
reader: &mut R,
|
||||
size: usize,
|
||||
checked: CheckForCorrectness
|
||||
) -> Result<Vec<ENC::Affine>, DeserializationError>
|
||||
{
|
||||
// Read the encoded elements
|
||||
let mut res = vec![ENC::empty(); size];
|
||||
|
||||
for encoded in &mut res {
|
||||
reader.read_exact(encoded.as_mut())?;
|
||||
}
|
||||
|
||||
// Allocate space for the deserialized elements
|
||||
let mut res_affine = vec![ENC::Affine::zero(); size];
|
||||
|
||||
let mut chunk_size = res.len() / num_cpus::get();
|
||||
if chunk_size == 0 {
|
||||
chunk_size = 1;
|
||||
}
|
||||
|
||||
// If any of our threads encounter a deserialization/IO error, catch
|
||||
// it with this.
|
||||
let decoding_error = Arc::new(Mutex::new(None));
|
||||
|
||||
crossbeam::scope(|scope| {
|
||||
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
|
||||
let decoding_error = decoding_error.clone();
|
||||
|
||||
scope.spawn(move || {
|
||||
for (source, target) in source.iter().zip(target.iter_mut()) {
|
||||
match {
|
||||
// If we're a participant, we don't need to check all of the
|
||||
// elements in the accumulator, which saves a lot of time.
|
||||
// The hash chain prevents this from being a problem: the
|
||||
// transcript guarantees that the accumulator was properly
|
||||
// formed.
|
||||
match checked {
|
||||
CheckForCorrectness::Yes => {
|
||||
// Points at infinity are never expected in the accumulator
|
||||
source.into_affine().map_err(|e| e.into()).and_then(|source| {
|
||||
if source.is_zero() {
|
||||
Err(DeserializationError::PointAtInfinity)
|
||||
} else {
|
||||
Ok(source)
|
||||
}
|
||||
})
|
||||
},
|
||||
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
{
|
||||
Ok(source) => {
|
||||
*target = source;
|
||||
},
|
||||
Err(e) => {
|
||||
*decoding_error.lock().unwrap() = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
|
||||
Some(e) => {
|
||||
Err(e)
|
||||
},
|
||||
None => {
|
||||
Ok(res_affine)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match compression {
|
||||
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
|
||||
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked)
|
||||
}
|
||||
}
|
||||
|
||||
let tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_G1_LENGTH, compression, checked)?;
|
||||
let tau_powers_g2 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let alpha_tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let beta_tau_powers_g1 = read_all::<E, _, _>(reader, P::TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let beta_g2 = read_all::<E, _, _>(reader, 1, compression, checked)?[0];
|
||||
|
||||
Ok(Accumulator {
|
||||
tau_powers_g1: tau_powers_g1,
|
||||
tau_powers_g2: tau_powers_g2,
|
||||
alpha_tau_powers_g1: alpha_tau_powers_g1,
|
||||
beta_tau_powers_g1: beta_tau_powers_g1,
|
||||
beta_g2: beta_g2,
|
||||
parameters: parameters
|
||||
})
|
||||
}
|
||||
|
||||
/// Transforms the accumulator with a private key.
|
||||
pub fn transform(&mut self, key: &PrivateKey<E>)
|
||||
{
|
||||
// Construct the powers of tau
|
||||
let mut taupowers = vec![E::Fr::zero(); P::TAU_POWERS_G1_LENGTH];
|
||||
let chunk_size = P::TAU_POWERS_G1_LENGTH / num_cpus::get();
|
||||
|
||||
// Construct exponents in parallel
|
||||
crossbeam::scope(|scope| {
|
||||
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
|
||||
scope.spawn(move || {
|
||||
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
|
||||
|
||||
for t in taupowers {
|
||||
*t = acc;
|
||||
acc.mul_assign(&key.tau);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
||||
/// exponent.
|
||||
fn batch_exp<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr> >(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
|
||||
assert_eq!(bases.len(), exp.len());
|
||||
let mut projective = vec![C::Projective::zero(); bases.len()];
|
||||
let chunk_size = bases.len() / num_cpus::get();
|
||||
|
||||
// Perform wNAF over multiple cores, placing results into `projective`.
|
||||
crossbeam::scope(|scope| {
|
||||
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
|
||||
.zip(exp.chunks(chunk_size))
|
||||
.zip(projective.chunks_mut(chunk_size))
|
||||
{
|
||||
scope.spawn(move || {
|
||||
let mut wnaf = Wnaf::new();
|
||||
|
||||
for ((base, exp), projective) in bases.iter_mut()
|
||||
.zip(exp.iter())
|
||||
.zip(projective.iter_mut())
|
||||
{
|
||||
let mut exp = *exp;
|
||||
if let Some(coeff) = coeff {
|
||||
exp.mul_assign(coeff);
|
||||
}
|
||||
|
||||
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Perform batch normalization
|
||||
crossbeam::scope(|scope| {
|
||||
for projective in projective.chunks_mut(chunk_size)
|
||||
{
|
||||
scope.spawn(move || {
|
||||
C::Projective::batch_normalization(projective);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Turn it all back into affine points
|
||||
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
||||
*affine = projective.into_affine();
|
||||
}
|
||||
}
|
||||
|
||||
batch_exp::<E, _>(&mut self.tau_powers_g1, &taupowers[0..], None);
|
||||
batch_exp::<E, _>(&mut self.tau_powers_g2, &taupowers[0..P::TAU_POWERS_LENGTH], None);
|
||||
batch_exp::<E, _>(&mut self.alpha_tau_powers_g1, &taupowers[0..P::TAU_POWERS_LENGTH], Some(&key.alpha));
|
||||
batch_exp::<E, _>(&mut self.beta_tau_powers_g1, &taupowers[0..P::TAU_POWERS_LENGTH], Some(&key.beta));
|
||||
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||
pub fn verify_transform<E: Engine, P: PowersOfTauParameters>(before: &Accumulator<E, P>, after: &Accumulator<E, P>, key: &PublicKey<E>, digest: &[u8]) -> bool
|
||||
{
|
||||
assert_eq!(digest.len(), 64);
|
||||
|
||||
let compute_g2_s = |g1_s: E::G1Affine, g1_s_x: E::G1Affine, personalization: u8| {
|
||||
let mut h = Blake2b::default();
|
||||
h.input(&[personalization]);
|
||||
h.input(digest);
|
||||
h.input(g1_s.into_uncompressed().as_ref());
|
||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
||||
hash_to_g2::<E>(h.result().as_ref()).into_affine()
|
||||
};
|
||||
|
||||
let tau_g2_s = compute_g2_s(key.tau_g1.0, key.tau_g1.1, 0);
|
||||
let alpha_g2_s = compute_g2_s(key.alpha_g1.0, key.alpha_g1.1, 1);
|
||||
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
|
||||
|
||||
// Check the proofs-of-knowledge for tau/alpha/beta
|
||||
|
||||
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
|
||||
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check the correctness of the generators for tau powers
|
||||
if after.tau_powers_g1[0] != E::G1Affine::one() {
|
||||
return false;
|
||||
}
|
||||
if after.tau_powers_g2[0] != E::G2Affine::one() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous tau by the new one?
|
||||
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous alpha by the new one?
|
||||
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous beta by the new one?
|
||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Are the powers of tau correct?
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Abstraction over a reader which hashes the data being read.
|
||||
pub struct HashReader<R: Read> {
|
||||
reader: R,
|
||||
hasher: Blake2b
|
||||
}
|
||||
|
||||
impl<R: Read> HashReader<R> {
|
||||
/// Construct a new `HashReader` given an existing `reader` by value.
|
||||
pub fn new(reader: R) -> Self {
|
||||
HashReader {
|
||||
reader: reader,
|
||||
hasher: Blake2b::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Destroy this reader and return the hash of what was read.
|
||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
||||
self.hasher.result()
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Read for HashReader<R> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let bytes = self.reader.read(buf)?;
|
||||
|
||||
if bytes > 0 {
|
||||
self.hasher.input(&buf[0..bytes]);
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Abstraction over a writer which hashes the data being written.
|
||||
pub struct HashWriter<W: Write> {
|
||||
writer: W,
|
||||
hasher: Blake2b
|
||||
}
|
||||
|
||||
impl<W: Write> HashWriter<W> {
|
||||
/// Construct a new `HashWriter` given an existing `writer` by value.
|
||||
pub fn new(writer: W) -> Self {
|
||||
HashWriter {
|
||||
writer: writer,
|
||||
hasher: Blake2b::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Destroy this writer and return the hash of what was written.
|
||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
||||
self.hasher.result()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Write for HashWriter<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let bytes = self.writer.write(buf)?;
|
||||
|
||||
if bytes > 0 {
|
||||
self.hasher.input(&buf[0..bytes]);
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.writer.flush()
|
||||
}
|
||||
}
|
832
src/batched_accumulator.rs
Normal file
832
src/batched_accumulator.rs
Normal file
@ -0,0 +1,832 @@
|
||||
/// Memory constrained accumulator that checks parts of the initial information in parts that fit to memory
|
||||
/// and then contributes to entropy in parts as well
|
||||
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
extern crate ff;
|
||||
extern crate memmap;
|
||||
extern crate itertools;
|
||||
|
||||
use itertools::Itertools;
|
||||
use memmap::{Mmap, MmapMut};
|
||||
use self::ff::{Field, PrimeField};
|
||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
||||
use self::rand::{SeedableRng, Rng, Rand};
|
||||
use self::rand::chacha::ChaChaRng;
|
||||
use self::pairing::bn256::{Bn256};
|
||||
use self::pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use self::generic_array::GenericArray;
|
||||
use self::typenum::consts::U64;
|
||||
use self::blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
use super::keypair::*;
|
||||
use super::utils::*;
|
||||
use super::parameters::*;
|
||||
|
||||
pub enum AccumulatorState{
|
||||
Empty,
|
||||
NonEmpty,
|
||||
Transformed,
|
||||
}
|
||||
|
||||
/// The `Accumulator` is an object that participants of the ceremony contribute
|
||||
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
|
||||
/// fixed generators, and additionally in G1 over two other generators of exponents
|
||||
/// `alpha` and `beta` over those fixed generators. In other words:
|
||||
///
|
||||
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
||||
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
||||
pub struct BachedAccumulator<E: Engine, P: PowersOfTauParameters> {
|
||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
||||
pub tau_powers_g1: Vec<E::G1Affine>,
|
||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub tau_powers_g2: Vec<E::G2Affine>,
|
||||
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub alpha_tau_powers_g1: Vec<E::G1Affine>,
|
||||
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub beta_tau_powers_g1: Vec<E::G1Affine>,
|
||||
/// beta
|
||||
pub beta_g2: E::G2Affine,
|
||||
/// Hash chain hash
|
||||
pub hash: GenericArray<u8, U64>,
|
||||
/// Keep parameters here as a marker
|
||||
marker: std::marker::PhantomData<P>,
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
||||
/// Calcualte the contibution hash from the resulting file. Original powers of tau implementaiton
|
||||
/// used a specially formed writer to write to the file and calculate a hash on the fly, but memory-constrained
|
||||
/// implementation now writes without a particular order, so plain recalculation at the end
|
||||
/// of the procedure is more efficient
|
||||
pub fn calculate_hash(
|
||||
input_map: &Mmap
|
||||
) -> GenericArray<u8, U64> {
|
||||
let chunk_size = 1 << 30; // read by 1GB from map
|
||||
let mut hasher = Blake2b::default();
|
||||
for chunk in input_map.chunks(chunk_size) {
|
||||
hasher.input(&chunk);
|
||||
}
|
||||
|
||||
hasher.result()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
tau_powers_g1: vec![],
|
||||
tau_powers_g2: vec![],
|
||||
alpha_tau_powers_g1: vec![],
|
||||
beta_tau_powers_g1: vec![],
|
||||
beta_g2: E::G2Affine::zero(),
|
||||
hash: blank_hash(),
|
||||
marker: std::marker::PhantomData::<P>{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
||||
fn g1_size(compression: UseCompression) -> usize {
|
||||
match compression {
|
||||
UseCompression::Yes => {
|
||||
return P::G1_COMPRESSED_BYTE_SIZE;
|
||||
},
|
||||
UseCompression::No => {
|
||||
return P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn g2_size(compression: UseCompression) -> usize {
|
||||
match compression {
|
||||
UseCompression::Yes => {
|
||||
return P::G2_COMPRESSED_BYTE_SIZE;
|
||||
},
|
||||
UseCompression::No => {
|
||||
return P::G2_UNCOMPRESSED_BYTE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_size(element_type: ElementType, compression: UseCompression) -> usize {
|
||||
let size = match element_type {
|
||||
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::TauG1 => { Self::g1_size(compression) },
|
||||
ElementType::BetaG2 | ElementType::TauG2 => { Self::g2_size(compression) }
|
||||
};
|
||||
|
||||
size
|
||||
}
|
||||
|
||||
/// File expected structure
|
||||
/// HASH_SIZE bytes for the hash of the contribution
|
||||
/// TAU_POWERS_G1_LENGTH of G1 points
|
||||
/// TAU_POWERS_LENGTH of G2 points
|
||||
/// TAU_POWERS_LENGTH of G1 points for alpha
|
||||
/// TAU_POWERS_LENGTH of G1 points for beta
|
||||
/// One G2 point for beta
|
||||
/// Public key appended to the end of file, but it's irrelevant for an accumulator itself
|
||||
|
||||
fn calculate_mmap_position(index: usize, element_type: ElementType, compression: UseCompression) -> usize {
|
||||
let g1_size = Self::g1_size(compression);
|
||||
let g2_size = Self::g2_size(compression);
|
||||
let required_tau_g1_power = P::TAU_POWERS_G1_LENGTH;
|
||||
let required_power = P::TAU_POWERS_LENGTH;
|
||||
let position = match element_type {
|
||||
ElementType::TauG1 => {
|
||||
let mut position = 0;
|
||||
position += g1_size * index;
|
||||
assert!(index < P::TAU_POWERS_G1_LENGTH, format!("Index of TauG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_G1_LENGTH, index));
|
||||
|
||||
position
|
||||
},
|
||||
ElementType::TauG2 => {
|
||||
let mut position = 0;
|
||||
position += g1_size * required_tau_g1_power;
|
||||
assert!(index < P::TAU_POWERS_LENGTH, format!("Index of TauG2 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index));
|
||||
position += g2_size * index;
|
||||
|
||||
position
|
||||
},
|
||||
ElementType::AlphaG1 => {
|
||||
let mut position = 0;
|
||||
position += g1_size * required_tau_g1_power;
|
||||
position += g2_size * required_power;
|
||||
assert!(index < P::TAU_POWERS_LENGTH, format!("Index of AlphaG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index));
|
||||
position += g1_size * index;
|
||||
|
||||
position
|
||||
},
|
||||
ElementType::BetaG1 => {
|
||||
let mut position = 0;
|
||||
position += g1_size * required_tau_g1_power;
|
||||
position += g2_size * required_power;
|
||||
position += g1_size * required_power;
|
||||
assert!(index < P::TAU_POWERS_LENGTH, format!("Index of BetaG1 element written must not exceed {}, while it's {}", P::TAU_POWERS_LENGTH, index));
|
||||
position += g1_size * index;
|
||||
|
||||
position
|
||||
},
|
||||
ElementType::BetaG2 => {
|
||||
let mut position = 0;
|
||||
position += g1_size * required_tau_g1_power;
|
||||
position += g2_size * required_power;
|
||||
position += g1_size * required_power;
|
||||
position += g1_size * required_power;
|
||||
|
||||
position
|
||||
}
|
||||
};
|
||||
|
||||
position + P::HASH_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||
pub fn verify_transformation(
|
||||
input_map: &Mmap,
|
||||
output_map: &Mmap,
|
||||
key: &PublicKey<E>,
|
||||
digest: &[u8],
|
||||
input_is_compressed: UseCompression,
|
||||
output_is_compressed: UseCompression,
|
||||
check_input_for_correctness: CheckForCorrectness,
|
||||
check_output_for_correctness: CheckForCorrectness,
|
||||
) -> bool
|
||||
{
|
||||
use itertools::MinMaxResult::{MinMax};
|
||||
assert_eq!(digest.len(), 64);
|
||||
|
||||
let tau_g2_s = compute_g2_s::<E>(&digest, &key.tau_g1.0, &key.tau_g1.1, 0);
|
||||
let alpha_g2_s = compute_g2_s::<E>(&digest, &key.alpha_g1.0, &key.alpha_g1.1, 1);
|
||||
let beta_g2_s = compute_g2_s::<E>(&digest, &key.beta_g1.0, &key.beta_g1.1, 2);
|
||||
|
||||
// Check the proofs-of-knowledge for tau/alpha/beta
|
||||
|
||||
// g1^s / g1^(s*x) = g2^s / g2^(s*x)
|
||||
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
||||
println!("Invalid ratio key.tau_g1, (tau_g2_s, key.tau_g2)");
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
|
||||
println!("Invalid ratio key.alpha_g1, (alpha_g2_s, key.alpha_g2)");
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
|
||||
println!("Invalid ratio key.beta_g1, (beta_g2_s, key.beta_g2)");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load accumulators AND perform computations
|
||||
|
||||
let mut before = Self::empty();
|
||||
let mut after = Self::empty();
|
||||
|
||||
// these checks only touch a part of the accumulator, so read two elements
|
||||
|
||||
{
|
||||
let chunk_size = 2;
|
||||
before.read_chunk(0, chunk_size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk");
|
||||
after.read_chunk(0, chunk_size, output_is_compressed, check_output_for_correctness, &output_map).expect("must read a first chunk");
|
||||
|
||||
// Check the correctness of the generators for tau powers
|
||||
if after.tau_powers_g1[0] != E::G1Affine::one() {
|
||||
println!("tau_powers_g1[0] != 1");
|
||||
return false;
|
||||
}
|
||||
if after.tau_powers_g2[0] != E::G2Affine::one() {
|
||||
println!("tau_powers_g2[0] != 1");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous tau by the new one?
|
||||
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
|
||||
println!("Invalid ratio (before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous alpha by the new one?
|
||||
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
|
||||
println!("Invalid ratio (before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous beta by the new one?
|
||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
|
||||
println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)");
|
||||
return false;
|
||||
}
|
||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
|
||||
println!("Invalid ratio (before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)");
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
let tau_powers_g2_0 = after.tau_powers_g2[0].clone();
|
||||
let tau_powers_g2_1 = after.tau_powers_g2[1].clone();
|
||||
let tau_powers_g1_0 = after.tau_powers_g1[0].clone();
|
||||
let tau_powers_g1_1 = after.tau_powers_g1[1].clone();
|
||||
|
||||
// Read by parts and just verify same ratios. Cause of two fixed variables above with tau_powers_g2_1 = tau_powers_g2_0 ^ s
|
||||
// one does not need to care about some overlapping
|
||||
|
||||
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||
if let MinMax(start, end) = chunk.minmax() {
|
||||
let size = end - start + 1;
|
||||
before.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk");
|
||||
after.read_chunk(start, size, output_is_compressed, check_output_for_correctness, &output_map).expect("must read a first chunk");
|
||||
|
||||
// Are the powers of tau correct?
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) {
|
||||
println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)) {
|
||||
println!("Invalid ratio power_pairs(&after.tau_powers_g2), (tau_powers_g1_0, tau_powers_g1_1)");
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) {
|
||||
println!("Invalid ratio power_pairs(&after.alpha_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) {
|
||||
println!("Invalid ratio power_pairs(&after.beta_tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
panic!("Chunk does not have a min and max");
|
||||
}
|
||||
}
|
||||
|
||||
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||
if let MinMax(start, end) = chunk.minmax() {
|
||||
let size = end - start + 1;
|
||||
before.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk");
|
||||
after.read_chunk(start, size, output_is_compressed, check_output_for_correctness, &output_map).expect("must read a first chunk");
|
||||
|
||||
assert_eq!(before.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty");
|
||||
assert_eq!(after.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty");
|
||||
|
||||
// Are the powers of tau correct?
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1)) {
|
||||
println!("Invalid ratio power_pairs(&after.tau_powers_g1), (tau_powers_g2_0, tau_powers_g2_1) in extra TauG1 contribution");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
panic!("Chunk does not have a min and max");
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
pub fn decompress(
|
||||
input_map: &Mmap,
|
||||
output_map: &mut MmapMut,
|
||||
check_input_for_correctness: CheckForCorrectness,
|
||||
) -> io::Result<()>
|
||||
{
|
||||
use itertools::MinMaxResult::{MinMax};
|
||||
|
||||
let mut accumulator = Self::empty();
|
||||
|
||||
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||
if let MinMax(start, end) = chunk.minmax() {
|
||||
let size = end - start + 1;
|
||||
accumulator.read_chunk(start, size, UseCompression::Yes, check_input_for_correctness, &input_map).expect("Must read a chunk");
|
||||
accumulator.write_chunk(start, UseCompression::No, output_map)?;
|
||||
} else {
|
||||
panic!("Chunk does not have a min and max");
|
||||
}
|
||||
}
|
||||
|
||||
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||
if let MinMax(start, end) = chunk.minmax() {
|
||||
let size = end - start + 1;
|
||||
accumulator.read_chunk(start, size, UseCompression::Yes, check_input_for_correctness, &input_map).expect("must read a chunk");
|
||||
assert_eq!(accumulator.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty");
|
||||
|
||||
accumulator.write_chunk(start, UseCompression::No, output_map)?;
|
||||
|
||||
} else {
|
||||
panic!("Chunk does not have a min and max");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
||||
pub fn read_chunk (
|
||||
&mut self,
|
||||
from: usize,
|
||||
size: usize,
|
||||
compression: UseCompression,
|
||||
checked: CheckForCorrectness,
|
||||
input_map: &Mmap,
|
||||
) -> Result<(), DeserializationError>
|
||||
{
|
||||
self.tau_powers_g1 = match compression {
|
||||
UseCompression::Yes => {
|
||||
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(from, size, ElementType::TauG1, compression, checked, &input_map)?
|
||||
},
|
||||
UseCompression::No => {
|
||||
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(from, size, ElementType::TauG1, compression, checked, &input_map)?
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
self.tau_powers_g2 = match compression {
|
||||
UseCompression::Yes => {
|
||||
self.read_points_chunk::<<E::G2Affine as CurveAffine>::Compressed>(from, size, ElementType::TauG2, compression, checked, &input_map)?
|
||||
},
|
||||
UseCompression::No => {
|
||||
self.read_points_chunk::<<E::G2Affine as CurveAffine>::Uncompressed>(from, size, ElementType::TauG2, compression, checked, &input_map)?
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
self.alpha_tau_powers_g1 = match compression {
|
||||
UseCompression::Yes => {
|
||||
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(from, size, ElementType::AlphaG1, compression, checked, &input_map)?
|
||||
},
|
||||
UseCompression::No => {
|
||||
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(from, size, ElementType::AlphaG1, compression, checked, &input_map)?
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
self.beta_tau_powers_g1 = match compression {
|
||||
UseCompression::Yes => {
|
||||
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Compressed>(from, size, ElementType::BetaG1, compression, checked, &input_map)?
|
||||
},
|
||||
UseCompression::No => {
|
||||
self.read_points_chunk::<<E::G1Affine as CurveAffine>::Uncompressed>(from, size, ElementType::BetaG1, compression, checked, &input_map)?
|
||||
},
|
||||
};
|
||||
|
||||
self.beta_g2 = match compression {
|
||||
UseCompression::Yes => {
|
||||
let points = self.read_points_chunk::<<E::G2Affine as CurveAffine>::Compressed>(0, 1, ElementType::BetaG2, compression, checked, &input_map)?;
|
||||
|
||||
points[0]
|
||||
},
|
||||
UseCompression::No => {
|
||||
let points = self.read_points_chunk::<<E::G2Affine as CurveAffine>::Uncompressed>(0, 1, ElementType::BetaG2, compression, checked, &input_map)?;
|
||||
|
||||
points[0]
|
||||
},
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// fn read_point<ENC: EncodedPoint>(
|
||||
|
||||
// ) ->
|
||||
|
||||
fn read_points_chunk<ENC: EncodedPoint>(
|
||||
&mut self,
|
||||
from: usize,
|
||||
size: usize,
|
||||
element_type: ElementType,
|
||||
compression: UseCompression,
|
||||
checked: CheckForCorrectness,
|
||||
input_map: &Mmap,
|
||||
) -> Result<Vec<ENC::Affine>, DeserializationError>
|
||||
{
|
||||
// Read the encoded elements
|
||||
let mut res = vec![ENC::empty(); size];
|
||||
|
||||
for (i, encoded) in res.iter_mut().enumerate() {
|
||||
let index = from + i;
|
||||
match element_type {
|
||||
ElementType::TauG1 => {
|
||||
if index >= P::TAU_POWERS_G1_LENGTH {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
},
|
||||
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::BetaG2 | ElementType::TauG2 => {
|
||||
if index >= P::TAU_POWERS_LENGTH {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
}
|
||||
};
|
||||
let position = Self::calculate_mmap_position(index, element_type, compression);
|
||||
let element_size = Self::get_size(element_type, compression);
|
||||
let memory_slice = input_map.get(position..position+element_size).expect("must read point data from file");
|
||||
memory_slice.clone().read_exact(encoded.as_mut())?;
|
||||
}
|
||||
|
||||
// Allocate space for the deserialized elements
|
||||
let mut res_affine = vec![ENC::Affine::zero(); size];
|
||||
|
||||
let mut chunk_size = res.len() / num_cpus::get();
|
||||
if chunk_size == 0 {
|
||||
chunk_size = 1;
|
||||
}
|
||||
|
||||
// If any of our threads encounter a deserialization/IO error, catch
|
||||
// it with this.
|
||||
let decoding_error = Arc::new(Mutex::new(None));
|
||||
|
||||
crossbeam::scope(|scope| {
|
||||
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
|
||||
let decoding_error = decoding_error.clone();
|
||||
|
||||
scope.spawn(move || {
|
||||
for (source, target) in source.iter().zip(target.iter_mut()) {
|
||||
match {
|
||||
// If we're a participant, we don't need to check all of the
|
||||
// elements in the accumulator, which saves a lot of time.
|
||||
// The hash chain prevents this from being a problem: the
|
||||
// transcript guarantees that the accumulator was properly
|
||||
// formed.
|
||||
match checked {
|
||||
CheckForCorrectness::Yes => {
|
||||
// Points at infinity are never expected in the accumulator
|
||||
source.into_affine().map_err(|e| e.into()).and_then(|source| {
|
||||
if source.is_zero() {
|
||||
Err(DeserializationError::PointAtInfinity)
|
||||
} else {
|
||||
Ok(source)
|
||||
}
|
||||
})
|
||||
},
|
||||
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
{
|
||||
Ok(source) => {
|
||||
*target = source;
|
||||
},
|
||||
Err(e) => {
|
||||
*decoding_error.lock().unwrap() = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
|
||||
Some(e) => {
|
||||
Err(e)
|
||||
},
|
||||
None => {
|
||||
Ok(res_affine)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
||||
fn write_all(
|
||||
&mut self,
|
||||
chunk_start: usize,
|
||||
compression: UseCompression,
|
||||
element_type: ElementType,
|
||||
output_map: &mut MmapMut,
|
||||
) -> io::Result<()>
|
||||
{
|
||||
match element_type {
|
||||
ElementType::TauG1 => {
|
||||
for (i, c) in self.tau_powers_g1.clone().iter().enumerate() {
|
||||
let index = chunk_start + i;
|
||||
self.write_point(index, c, compression, element_type.clone(), output_map)?;
|
||||
}
|
||||
},
|
||||
ElementType::TauG2 => {
|
||||
for (i, c) in self.tau_powers_g2.clone().iter().enumerate() {
|
||||
let index = chunk_start + i;
|
||||
self.write_point(index, c, compression, element_type.clone(), output_map)?;
|
||||
}
|
||||
},
|
||||
ElementType::AlphaG1 => {
|
||||
for (i, c) in self.alpha_tau_powers_g1.clone().iter().enumerate() {
|
||||
let index = chunk_start + i;
|
||||
self.write_point(index, c, compression, element_type.clone(), output_map)?;
|
||||
}
|
||||
},
|
||||
ElementType::BetaG1 => {
|
||||
for (i, c) in self.beta_tau_powers_g1.clone().iter().enumerate() {
|
||||
let index = chunk_start + i;
|
||||
self.write_point(index, c, compression, element_type.clone(), output_map)?;
|
||||
}
|
||||
},
|
||||
ElementType::BetaG2 => {
|
||||
let index = chunk_start;
|
||||
self.write_point(index, &self.beta_g2.clone(), compression, element_type.clone(), output_map)?
|
||||
}
|
||||
};
|
||||
|
||||
output_map.flush_async()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_point<C>(
|
||||
&mut self,
|
||||
index: usize,
|
||||
p: &C,
|
||||
compression: UseCompression,
|
||||
element_type: ElementType,
|
||||
output_map: &mut MmapMut,
|
||||
) -> io::Result<()>
|
||||
where C: CurveAffine<Engine = E, Scalar = E::Fr>
|
||||
{
|
||||
match element_type {
|
||||
ElementType::TauG1 => {
|
||||
if index >= P::TAU_POWERS_G1_LENGTH {
|
||||
return Ok(());
|
||||
}
|
||||
},
|
||||
ElementType::AlphaG1 | ElementType::BetaG1 | ElementType::BetaG2 | ElementType::TauG2 => {
|
||||
if index >= P::TAU_POWERS_LENGTH {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match compression {
|
||||
UseCompression::Yes => {
|
||||
let position = Self::calculate_mmap_position(index, element_type, compression);
|
||||
// let size = self.get_size(element_type, compression);
|
||||
(&mut output_map[position..]).write(p.into_compressed().as_ref())?;
|
||||
},
|
||||
UseCompression::No => {
|
||||
let position = Self::calculate_mmap_position(index, element_type, compression);
|
||||
// let size = self.get_size(element_type, compression);
|
||||
(&mut output_map[position..]).write(p.into_uncompressed().as_ref())?;
|
||||
},
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write the accumulator with some compression behavior.
|
||||
pub fn write_chunk(
|
||||
&mut self,
|
||||
chunk_start: usize,
|
||||
compression: UseCompression,
|
||||
output_map: &mut MmapMut
|
||||
) -> io::Result<()>
|
||||
{
|
||||
self.write_all(chunk_start, compression, ElementType::TauG1, output_map)?;
|
||||
if chunk_start < P::TAU_POWERS_LENGTH {
|
||||
self.write_all(chunk_start, compression, ElementType::TauG2, output_map)?;
|
||||
self.write_all(chunk_start, compression, ElementType::AlphaG1, output_map)?;
|
||||
self.write_all(chunk_start, compression, ElementType::BetaG1, output_map)?;
|
||||
self.write_all(chunk_start, compression, ElementType::BetaG2, output_map)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
||||
/// Transforms the accumulator with a private key.
|
||||
/// Due to large amount of data in a previous accumulator even in the compressed form
|
||||
/// this function can now work on compressed input. Output can be made in any form
|
||||
/// WARNING: Contributor does not have to check that values from challenge file were serialized
|
||||
/// corrently, but we may want to enforce it if a ceremony coordinator does not recompress the previous
|
||||
/// contribution into the new challenge file
|
||||
pub fn transform(
|
||||
input_map: &Mmap,
|
||||
output_map: &mut MmapMut,
|
||||
input_is_compressed: UseCompression,
|
||||
compress_the_output: UseCompression,
|
||||
check_input_for_correctness: CheckForCorrectness,
|
||||
key: &PrivateKey<E>
|
||||
) -> io::Result<()>
|
||||
{
|
||||
|
||||
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
||||
/// exponent.
|
||||
fn batch_exp<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr> >(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
|
||||
assert_eq!(bases.len(), exp.len());
|
||||
let mut projective = vec![C::Projective::zero(); bases.len()];
|
||||
let chunk_size = bases.len() / num_cpus::get();
|
||||
|
||||
// Perform wNAF over multiple cores, placing results into `projective`.
|
||||
crossbeam::scope(|scope| {
|
||||
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
|
||||
.zip(exp.chunks(chunk_size))
|
||||
.zip(projective.chunks_mut(chunk_size))
|
||||
{
|
||||
scope.spawn(move || {
|
||||
let mut wnaf = Wnaf::new();
|
||||
|
||||
for ((base, exp), projective) in bases.iter_mut()
|
||||
.zip(exp.iter())
|
||||
.zip(projective.iter_mut())
|
||||
{
|
||||
let mut exp = *exp;
|
||||
if let Some(coeff) = coeff {
|
||||
exp.mul_assign(coeff);
|
||||
}
|
||||
|
||||
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Perform batch normalization
|
||||
crossbeam::scope(|scope| {
|
||||
for projective in projective.chunks_mut(chunk_size)
|
||||
{
|
||||
scope.spawn(move || {
|
||||
C::Projective::batch_normalization(projective);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Turn it all back into affine points
|
||||
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
||||
*affine = projective.into_affine();
|
||||
}
|
||||
}
|
||||
|
||||
let mut accumulator = Self::empty();
|
||||
|
||||
use itertools::MinMaxResult::{MinMax};
|
||||
|
||||
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||
if let MinMax(start, end) = chunk.minmax() {
|
||||
let size = end - start + 1;
|
||||
accumulator.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk");
|
||||
|
||||
// Construct the powers of tau
|
||||
let mut taupowers = vec![E::Fr::zero(); size];
|
||||
let chunk_size = size / num_cpus::get();
|
||||
|
||||
// Construct exponents in parallel
|
||||
crossbeam::scope(|scope| {
|
||||
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
|
||||
scope.spawn(move || {
|
||||
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
|
||||
|
||||
for t in taupowers {
|
||||
*t = acc;
|
||||
acc.mul_assign(&key.tau);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
batch_exp::<E, _>(&mut accumulator.tau_powers_g1, &taupowers[0..], None);
|
||||
batch_exp::<E, _>(&mut accumulator.tau_powers_g2, &taupowers[0..], None);
|
||||
batch_exp::<E, _>(&mut accumulator.alpha_tau_powers_g1, &taupowers[0..], Some(&key.alpha));
|
||||
batch_exp::<E, _>(&mut accumulator.beta_tau_powers_g1, &taupowers[0..], Some(&key.beta));
|
||||
accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
|
||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||
|
||||
println!("Done processing {} powers of tau", end);
|
||||
} else {
|
||||
panic!("Chunk does not have a min and max");
|
||||
}
|
||||
}
|
||||
|
||||
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||
if let MinMax(start, end) = chunk.minmax() {
|
||||
let size = end - start + 1;
|
||||
accumulator.read_chunk(start, size, input_is_compressed, check_input_for_correctness, &input_map).expect("must read a first chunk");
|
||||
assert_eq!(accumulator.tau_powers_g2.len(), 0, "during rest of tau g1 generation tau g2 must be empty");
|
||||
|
||||
// Construct the powers of tau
|
||||
let mut taupowers = vec![E::Fr::zero(); size];
|
||||
let chunk_size = size / num_cpus::get();
|
||||
|
||||
// Construct exponents in parallel
|
||||
crossbeam::scope(|scope| {
|
||||
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
|
||||
scope.spawn(move || {
|
||||
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
|
||||
|
||||
for t in taupowers {
|
||||
*t = acc;
|
||||
acc.mul_assign(&key.tau);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
batch_exp::<E, _>(&mut accumulator.tau_powers_g1, &taupowers[0..], None);
|
||||
accumulator.beta_g2 = accumulator.beta_g2.mul(key.beta).into_affine();
|
||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||
|
||||
println!("Done processing {} powers of tau", end);
|
||||
} else {
|
||||
panic!("Chunk does not have a min and max");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E:Engine, P: PowersOfTauParameters> BachedAccumulator<E, P> {
|
||||
/// Transforms the accumulator with a private key.
|
||||
pub fn generate_initial(
|
||||
output_map: &mut MmapMut,
|
||||
compress_the_output: UseCompression,
|
||||
) -> io::Result<()>
|
||||
{
|
||||
use itertools::MinMaxResult::{MinMax};
|
||||
|
||||
for chunk in &(0..P::TAU_POWERS_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||
if let MinMax(start, end) = chunk.minmax() {
|
||||
let size = end - start + 1;
|
||||
let mut accumulator = Self {
|
||||
tau_powers_g1: vec![E::G1Affine::one(); size],
|
||||
tau_powers_g2: vec![E::G2Affine::one(); size],
|
||||
alpha_tau_powers_g1: vec![E::G1Affine::one(); size],
|
||||
beta_tau_powers_g1: vec![E::G1Affine::one(); size],
|
||||
beta_g2: E::G2Affine::one(),
|
||||
hash: blank_hash(),
|
||||
marker: std::marker::PhantomData::<P>{}
|
||||
};
|
||||
|
||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||
println!("Done processing {} powers of tau", end);
|
||||
} else {
|
||||
panic!("Chunk does not have a min and max");
|
||||
}
|
||||
}
|
||||
|
||||
for chunk in &(P::TAU_POWERS_LENGTH..P::TAU_POWERS_G1_LENGTH).into_iter().chunks(P::EMPIRICAL_BATCH_SIZE) {
|
||||
if let MinMax(start, end) = chunk.minmax() {
|
||||
let size = end - start + 1;
|
||||
let mut accumulator = Self {
|
||||
tau_powers_g1: vec![E::G1Affine::one(); size],
|
||||
tau_powers_g2: vec![],
|
||||
alpha_tau_powers_g1: vec![],
|
||||
beta_tau_powers_g1: vec![],
|
||||
beta_g2: E::G2Affine::one(),
|
||||
hash: blank_hash(),
|
||||
marker: std::marker::PhantomData::<P>{}
|
||||
};
|
||||
|
||||
accumulator.write_chunk(start, compress_the_output, output_map)?;
|
||||
println!("Done processing {} powers of tau", end);
|
||||
} else {
|
||||
panic!("Chunk does not have a min and max");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
192
src/bin/beacon_constrained.rs
Normal file
192
src/bin/beacon_constrained.rs
Normal file
@ -0,0 +1,192 @@
|
||||
extern crate powersoftau;
|
||||
extern crate pairing;
|
||||
extern crate memmap;
|
||||
extern crate rand;
|
||||
extern crate blake2;
|
||||
extern crate byteorder;
|
||||
extern crate crypto;
|
||||
|
||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::batched_accumulator::{BachedAccumulator};
|
||||
use powersoftau::keypair::{keypair};
|
||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
||||
|
||||
use std::fs::OpenOptions;
|
||||
use pairing::bn256::Bn256;
|
||||
use memmap::*;
|
||||
|
||||
use std::io::Write;
|
||||
|
||||
use powersoftau::parameters::PowersOfTauParameters;
|
||||
|
||||
#[macro_use]
|
||||
extern crate hex_literal;
|
||||
|
||||
const input_is_compressed: UseCompression = UseCompression::No;
|
||||
const compress_the_output: UseCompression = UseCompression::Yes;
|
||||
const check_input_correctness: CheckForCorrectness = CheckForCorrectness::No;
|
||||
|
||||
|
||||
fn main() {
|
||||
println!("Will contribute a random beacon to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
||||
|
||||
// Create an RNG based on the outcome of the random beacon
|
||||
let mut rng = {
|
||||
use byteorder::{ReadBytesExt, BigEndian};
|
||||
use rand::{SeedableRng};
|
||||
use rand::chacha::ChaChaRng;
|
||||
use crypto::sha2::Sha256;
|
||||
use crypto::digest::Digest;
|
||||
|
||||
// Place block hash here (block number #514200)
|
||||
let mut cur_hash: [u8; 32] = hex!("00000000000000000034b33e842ac1c50456abe5fa92b60f6b3dfc5d247f7b58");
|
||||
|
||||
// Performs 2^n hash iterations over it
|
||||
// const N: usize = 42;
|
||||
|
||||
const N: usize = 16;
|
||||
|
||||
for i in 0..(1u64<<N) {
|
||||
// Print 1024 of the interstitial states
|
||||
// so that verification can be
|
||||
// parallelized
|
||||
|
||||
// if i % (1u64<<(N-10)) == 0 {
|
||||
// print!("{}: ", i);
|
||||
// for b in cur_hash.iter() {
|
||||
// print!("{:02x}", b);
|
||||
// }
|
||||
// println!("");
|
||||
// }
|
||||
|
||||
let mut h = Sha256::new();
|
||||
h.input(&cur_hash);
|
||||
h.result(&mut cur_hash);
|
||||
}
|
||||
|
||||
print!("Final result of beacon: ");
|
||||
for b in cur_hash.iter() {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
println!("");
|
||||
|
||||
let mut digest = &cur_hash[..];
|
||||
|
||||
let mut seed = [0u32; 8];
|
||||
for i in 0..8 {
|
||||
seed[i] = digest.read_u32::<BigEndian>().expect("digest is large enough for this to work");
|
||||
}
|
||||
|
||||
ChaChaRng::from_seed(&seed)
|
||||
};
|
||||
|
||||
println!("Done creating a beacon RNG");
|
||||
|
||||
// Try to load `./challenge` from disk.
|
||||
let reader = OpenOptions::new()
|
||||
.read(true)
|
||||
.open("challenge").expect("unable open `./challenge` in this directory");
|
||||
|
||||
{
|
||||
let metadata = reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
|
||||
let expected_challenge_length = match input_is_compressed {
|
||||
UseCompression::Yes => {
|
||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
||||
}
|
||||
};
|
||||
|
||||
if metadata.len() != (expected_challenge_length as u64) {
|
||||
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
||||
}
|
||||
}
|
||||
|
||||
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
||||
|
||||
// Create `./response` in this directory
|
||||
let writer = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open("response").expect("unable to create `./response` in this directory");
|
||||
|
||||
let required_output_length = match compress_the_output {
|
||||
UseCompression::Yes => {
|
||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||
}
|
||||
};
|
||||
|
||||
writer.set_len(required_output_length as u64).expect("must make output file large enough");
|
||||
|
||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
||||
|
||||
println!("Calculating previous contribution hash...");
|
||||
|
||||
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
||||
|
||||
{
|
||||
println!("Contributing on top of the hash:");
|
||||
for line in current_accumulator_hash.as_slice().chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
|
||||
|
||||
writable_map.flush().expect("unable to write hash to `./response`");
|
||||
}
|
||||
|
||||
// Construct our keypair using the RNG we created above
|
||||
let (pubkey, privkey) = keypair(&mut rng, current_accumulator_hash.as_ref());
|
||||
|
||||
// Perform the transformation
|
||||
println!("Computing and writing your contribution, this could take a while...");
|
||||
|
||||
// this computes a transformation and writes it
|
||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
||||
&readable_map,
|
||||
&mut writable_map,
|
||||
input_is_compressed,
|
||||
compress_the_output,
|
||||
check_input_correctness,
|
||||
&privkey
|
||||
).expect("must transform with the key");
|
||||
println!("Finihsing writing your contribution to `./response`...");
|
||||
|
||||
// Write the public key
|
||||
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, compress_the_output).expect("unable to write public key");
|
||||
|
||||
// Get the hash of the contribution, so the user can compare later
|
||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
||||
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||
|
||||
print!("Done!\n\n\
|
||||
Your contribution has been written to `./response`\n\n\
|
||||
The BLAKE2b hash of `./response` is:\n");
|
||||
|
||||
for line in contribution_hash.as_slice().chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
println!("Thank you for your participation, much appreciated! :)");
|
||||
}
|
177
src/bin/compute_constrained.rs
Normal file
177
src/bin/compute_constrained.rs
Normal file
@ -0,0 +1,177 @@
|
||||
extern crate powersoftau;
|
||||
extern crate pairing;
|
||||
extern crate memmap;
|
||||
extern crate rand;
|
||||
extern crate blake2;
|
||||
extern crate byteorder;
|
||||
|
||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::batched_accumulator::{BachedAccumulator};
|
||||
use powersoftau::keypair::{keypair};
|
||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
||||
|
||||
use std::fs::OpenOptions;
|
||||
use pairing::bn256::Bn256;
|
||||
use memmap::*;
|
||||
|
||||
use std::io::Write;
|
||||
|
||||
use powersoftau::parameters::PowersOfTauParameters;
|
||||
|
||||
const input_is_compressed: UseCompression = UseCompression::No;
|
||||
const compress_the_output: UseCompression = UseCompression::Yes;
|
||||
const check_input_correctness: CheckForCorrectness = CheckForCorrectness::No;
|
||||
|
||||
fn main() {
|
||||
println!("Will contribute to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
||||
|
||||
// Create an RNG based on a mixture of system randomness and user provided randomness
|
||||
let mut rng = {
|
||||
use byteorder::{ReadBytesExt, BigEndian};
|
||||
use blake2::{Blake2b, Digest};
|
||||
use rand::{SeedableRng, Rng, OsRng};
|
||||
use rand::chacha::ChaChaRng;
|
||||
|
||||
let h = {
|
||||
let mut system_rng = OsRng::new().unwrap();
|
||||
let mut h = Blake2b::default();
|
||||
|
||||
// Gather 1024 bytes of entropy from the system
|
||||
for _ in 0..1024 {
|
||||
let r: u8 = system_rng.gen();
|
||||
h.input(&[r]);
|
||||
}
|
||||
|
||||
// Ask the user to provide some information for additional entropy
|
||||
let mut user_input = String::new();
|
||||
println!("Type some random text and press [ENTER] to provide additional entropy...");
|
||||
std::io::stdin().read_line(&mut user_input).expect("expected to read some random text from the user");
|
||||
|
||||
// Hash it all up to make a seed
|
||||
h.input(&user_input.as_bytes());
|
||||
h.result()
|
||||
};
|
||||
|
||||
let mut digest = &h[..];
|
||||
|
||||
// Interpret the first 32 bytes of the digest as 8 32-bit words
|
||||
let mut seed = [0u32; 8];
|
||||
for i in 0..8 {
|
||||
seed[i] = digest.read_u32::<BigEndian>().expect("digest is large enough for this to work");
|
||||
}
|
||||
|
||||
ChaChaRng::from_seed(&seed)
|
||||
};
|
||||
|
||||
// Try to load `./challenge` from disk.
|
||||
let reader = OpenOptions::new()
|
||||
.read(true)
|
||||
.open("challenge").expect("unable open `./challenge` in this directory");
|
||||
|
||||
{
|
||||
let metadata = reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
|
||||
let expected_challenge_length = match input_is_compressed {
|
||||
UseCompression::Yes => {
|
||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
||||
}
|
||||
};
|
||||
|
||||
if metadata.len() != (expected_challenge_length as u64) {
|
||||
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
||||
}
|
||||
}
|
||||
|
||||
let readable_map = unsafe { MmapOptions::new().map(&reader).expect("unable to create a memory map for input") };
|
||||
|
||||
// Create `./response` in this directory
|
||||
let writer = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open("response").expect("unable to create `./response` in this directory");
|
||||
|
||||
let required_output_length = match compress_the_output {
|
||||
UseCompression::Yes => {
|
||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||
}
|
||||
};
|
||||
|
||||
writer.set_len(required_output_length as u64).expect("must make output file large enough");
|
||||
|
||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
||||
|
||||
println!("Calculating previous contribution hash...");
|
||||
|
||||
assert!(UseCompression::No == input_is_compressed, "Hashing the compressed file in not yet defined");
|
||||
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&readable_map);
|
||||
|
||||
{
|
||||
println!("Contributing on top of the hash:");
|
||||
for line in current_accumulator_hash.as_slice().chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
(&mut writable_map[0..]).write(current_accumulator_hash.as_slice()).expect("unable to write a challenge hash to mmap");
|
||||
|
||||
writable_map.flush().expect("unable to write hash to `./response`");
|
||||
}
|
||||
|
||||
// Construct our keypair using the RNG we created above
|
||||
let (pubkey, privkey) = keypair(&mut rng, current_accumulator_hash.as_ref());
|
||||
|
||||
// Perform the transformation
|
||||
println!("Computing and writing your contribution, this could take a while...");
|
||||
|
||||
// this computes a transformation and writes it
|
||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::transform(
|
||||
&readable_map,
|
||||
&mut writable_map,
|
||||
input_is_compressed,
|
||||
compress_the_output,
|
||||
check_input_correctness,
|
||||
&privkey
|
||||
).expect("must transform with the key");
|
||||
|
||||
println!("Finihsing writing your contribution to `./response`...");
|
||||
|
||||
// Write the public key
|
||||
pubkey.write::<Bn256CeremonyParameters>(&mut writable_map, compress_the_output).expect("unable to write public key");
|
||||
|
||||
writable_map.flush().expect("must flush a memory map");
|
||||
|
||||
// Get the hash of the contribution, so the user can compare later
|
||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
||||
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||
|
||||
print!("Done!\n\n\
|
||||
Your contribution has been written to `./response`\n\n\
|
||||
The BLAKE2b hash of `./response` is:\n");
|
||||
|
||||
for line in contribution_hash.as_slice().chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
println!("Thank you for your participation, much appreciated! :)");
|
||||
}
|
@ -1,8 +1,15 @@
|
||||
extern crate powersoftau;
|
||||
use powersoftau::*;
|
||||
extern crate pairing;
|
||||
|
||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::accumulator::{Accumulator};
|
||||
use powersoftau::utils::{blank_hash};
|
||||
use powersoftau::parameters::{UseCompression};
|
||||
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::{Write, BufWriter};
|
||||
use pairing::bn256::Bn256;
|
||||
|
||||
fn main() {
|
||||
let writer = OpenOptions::new()
|
||||
@ -16,7 +23,10 @@ fn main() {
|
||||
// Write a blank BLAKE2b hash:
|
||||
writer.write_all(&blank_hash().as_slice()).expect("unable to write blank hash to `./challenge`");
|
||||
|
||||
let acc = Accumulator::new();
|
||||
let parameters = Bn256CeremonyParameters{};
|
||||
|
||||
let acc: Accumulator<Bn256, _> = Accumulator::new(parameters);
|
||||
println!("Writing an empty accumulator to disk");
|
||||
acc.serialize(&mut writer, UseCompression::No).expect("unable to write fresh accumulator to `./challenge`");
|
||||
writer.flush().expect("unable to flush accumulator to disk");
|
||||
|
||||
|
24
src/bin/new.rs.nocompile
Normal file
24
src/bin/new.rs.nocompile
Normal file
@ -0,0 +1,24 @@
|
||||
extern crate powersoftau;
|
||||
use powersoftau::*;
|
||||
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::{Write, BufWriter};
|
||||
|
||||
fn main() {
|
||||
let writer = OpenOptions::new()
|
||||
.read(false)
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open("challenge").expect("unable to create `./challenge`");
|
||||
|
||||
let mut writer = BufWriter::new(writer);
|
||||
|
||||
// Write a blank BLAKE2b hash:
|
||||
writer.write_all(&blank_hash().as_slice()).expect("unable to write blank hash to `./challenge`");
|
||||
|
||||
let acc = Accumulator::new();
|
||||
acc.serialize(&mut writer, UseCompression::No).expect("unable to write fresh accumulator to `./challenge`");
|
||||
writer.flush().expect("unable to flush accumulator to disk");
|
||||
|
||||
println!("Wrote a fresh accumulator to `./challenge`");
|
||||
}
|
81
src/bin/new_constrained.rs
Normal file
81
src/bin/new_constrained.rs
Normal file
@ -0,0 +1,81 @@
|
||||
extern crate powersoftau;
|
||||
extern crate pairing;
|
||||
extern crate memmap;
|
||||
|
||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::batched_accumulator::{BachedAccumulator};
|
||||
use powersoftau::parameters::{UseCompression};
|
||||
use powersoftau::utils::{blank_hash};
|
||||
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::{Write};
|
||||
use pairing::bn256::Bn256;
|
||||
use memmap::*;
|
||||
|
||||
use powersoftau::parameters::PowersOfTauParameters;
|
||||
|
||||
const compress_new_challenge: UseCompression = UseCompression::No;
|
||||
|
||||
fn main() {
|
||||
println!("Will generate an empty accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
||||
println!("In total will generate up to {} powers", Bn256CeremonyParameters::TAU_POWERS_G1_LENGTH);
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open("challenge").expect("unable to create `./challenge`");
|
||||
|
||||
let expected_challenge_length = match compress_new_challenge {
|
||||
UseCompression::Yes => {
|
||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE - Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
||||
}
|
||||
};
|
||||
|
||||
file.set_len(expected_challenge_length as u64).expect("unable to allocate large enough file");
|
||||
|
||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&file).expect("unable to create a memory map") };
|
||||
|
||||
// Write a blank BLAKE2b hash:
|
||||
let hash = blank_hash();
|
||||
(&mut writable_map[0..]).write(hash.as_slice()).expect("unable to write a default hash to mmap");
|
||||
writable_map.flush().expect("unable to write blank hash to `./challenge`");
|
||||
|
||||
println!("Blank hash for an empty challenge:");
|
||||
for line in hash.as_slice().chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::generate_initial(&mut writable_map, compress_new_challenge).expect("generation of initial accumulator is successful");
|
||||
writable_map.flush().expect("unable to flush memmap to disk");
|
||||
|
||||
// Get the hash of the contribution, so the user can compare later
|
||||
let output_readonly = writable_map.make_read_only().expect("must make a map readonly");
|
||||
let contribution_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&output_readonly);
|
||||
|
||||
println!("Empty contribution is formed with a hash:");
|
||||
|
||||
for line in contribution_hash.as_slice().chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
println!("Wrote a fresh accumulator to `./challenge`");
|
||||
}
|
190
src/bin/verify_transform_constrained.rs
Normal file
190
src/bin/verify_transform_constrained.rs
Normal file
@ -0,0 +1,190 @@
|
||||
extern crate powersoftau;
|
||||
extern crate pairing;
|
||||
extern crate memmap;
|
||||
extern crate rand;
|
||||
extern crate blake2;
|
||||
extern crate byteorder;
|
||||
|
||||
// use powersoftau::bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::small_bn256::{Bn256CeremonyParameters};
|
||||
use powersoftau::batched_accumulator::{BachedAccumulator};
|
||||
use powersoftau::keypair::{PublicKey};
|
||||
use powersoftau::parameters::{UseCompression, CheckForCorrectness};
|
||||
|
||||
use std::fs::OpenOptions;
|
||||
use pairing::bn256::Bn256;
|
||||
use memmap::*;
|
||||
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use powersoftau::parameters::PowersOfTauParameters;
|
||||
|
||||
const previous_challenge_is_compressed: UseCompression = UseCompression::No;
|
||||
const contribution_is_compressed: UseCompression = UseCompression::Yes;
|
||||
const compress_new_challenge: UseCompression = UseCompression::No;
|
||||
|
||||
fn main() {
|
||||
println!("Will verify and decompress a contribution to accumulator for 2^{} powers of tau", Bn256CeremonyParameters::REQUIRED_POWER);
|
||||
|
||||
// Try to load `./challenge` from disk.
|
||||
let challenge_reader = OpenOptions::new()
|
||||
.read(true)
|
||||
.open("challenge").expect("unable open `./challenge` in this directory");
|
||||
|
||||
{
|
||||
let metadata = challenge_reader.metadata().expect("unable to get filesystem metadata for `./challenge`");
|
||||
let expected_challenge_length = match previous_challenge_is_compressed {
|
||||
UseCompression::Yes => {
|
||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE
|
||||
}
|
||||
};
|
||||
if metadata.len() != (expected_challenge_length as u64) {
|
||||
panic!("The size of `./challenge` should be {}, but it's {}, so something isn't right.", expected_challenge_length, metadata.len());
|
||||
}
|
||||
}
|
||||
|
||||
let challenge_readable_map = unsafe { MmapOptions::new().map(&challenge_reader).expect("unable to create a memory map for input") };
|
||||
|
||||
// Try to load `./response` from disk.
|
||||
let response_reader = OpenOptions::new()
|
||||
.read(true)
|
||||
.open("response").expect("unable open `./response` in this directory");
|
||||
|
||||
{
|
||||
let metadata = response_reader.metadata().expect("unable to get filesystem metadata for `./response`");
|
||||
let expected_response_length = match contribution_is_compressed {
|
||||
UseCompression::Yes => {
|
||||
Bn256CeremonyParameters::CONTRIBUTION_BYTE_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE + Bn256CeremonyParameters::PUBLIC_KEY_SIZE
|
||||
}
|
||||
};
|
||||
if metadata.len() != (expected_response_length as u64) {
|
||||
panic!("The size of `./response` should be {}, but it's {}, so something isn't right.", expected_response_length, metadata.len());
|
||||
}
|
||||
}
|
||||
|
||||
let response_readable_map = unsafe { MmapOptions::new().map(&response_reader).expect("unable to create a memory map for input") };
|
||||
|
||||
println!("Calculating previous challenge hash...");
|
||||
|
||||
// Check that contribution is correct
|
||||
|
||||
let current_accumulator_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&challenge_readable_map);
|
||||
|
||||
println!("Previous challenge hash");
|
||||
for line in current_accumulator_hash.as_slice().chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
// Check the hash chain - a new response must be based on the previous challenge!
|
||||
{
|
||||
let mut response_challenge_hash = [0; 64];
|
||||
let memory_slice = response_readable_map.get(0..64).expect("must read point data from file");
|
||||
memory_slice.clone().read_exact(&mut response_challenge_hash).expect("couldn't read hash of challenge file from response file");
|
||||
|
||||
println!("Response was based on the hash");
|
||||
for line in response_challenge_hash.chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
if &response_challenge_hash[..] != current_accumulator_hash.as_slice() {
|
||||
panic!("Hash chain failure. This is not the right response.");
|
||||
}
|
||||
}
|
||||
|
||||
// get the contributor's public key
|
||||
let public_key = PublicKey::<Bn256>::read::<Bn256CeremonyParameters>(&response_readable_map, contribution_is_compressed)
|
||||
.expect("wasn't able to deserialize the response file's public key");
|
||||
|
||||
|
||||
// check that it follows the protocol
|
||||
|
||||
let valid = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::verify_transformation(
|
||||
&challenge_readable_map,
|
||||
&response_readable_map,
|
||||
&public_key,
|
||||
current_accumulator_hash.as_slice(),
|
||||
previous_challenge_is_compressed,
|
||||
contribution_is_compressed,
|
||||
CheckForCorrectness::No,
|
||||
CheckForCorrectness::Yes,
|
||||
);
|
||||
|
||||
if !valid {
|
||||
println!("Verification failed, contribution was invalid somehow.");
|
||||
panic!("INVALID CONTRIBUTION!!!");
|
||||
} else {
|
||||
println!("Verification succeeded!");
|
||||
}
|
||||
|
||||
|
||||
let response_hash = BachedAccumulator::<Bn256, Bn256CeremonyParameters>::calculate_hash(&response_readable_map);
|
||||
|
||||
println!("Here's the BLAKE2b hash of the participant's response file:");
|
||||
|
||||
for line in response_hash.as_slice().chunks(16) {
|
||||
print!("\t");
|
||||
for section in line.chunks(4) {
|
||||
for b in section {
|
||||
print!("{:02x}", b);
|
||||
}
|
||||
print!(" ");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
|
||||
if compress_new_challenge == UseCompression::Yes {
|
||||
println!("Don't need to recompress the contribution, please copy `./response` as `./new_challenge`");
|
||||
} else {
|
||||
println!("Verification succeeded! Writing to `./new_challenge`...");
|
||||
|
||||
// Create `./new_challenge` in this directory
|
||||
let writer = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open("new_challenge").expect("unable to create `./new_challenge` in this directory");
|
||||
|
||||
|
||||
|
||||
// Recomputation stips the public key and uses hashing to link with the previous contibution after decompression
|
||||
writer.set_len(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE as u64).expect("must make output file large enough");
|
||||
|
||||
let mut writable_map = unsafe { MmapOptions::new().map_mut(&writer).expect("unable to create a memory map for output") };
|
||||
|
||||
{
|
||||
(&mut writable_map[0..]).write(response_hash.as_slice()).expect("unable to write a default hash to mmap");
|
||||
|
||||
writable_map.flush().expect("unable to write hash to `./new_challenge`");
|
||||
}
|
||||
|
||||
BachedAccumulator::<Bn256, Bn256CeremonyParameters>::decompress(
|
||||
&response_readable_map,
|
||||
&mut writable_map,
|
||||
CheckForCorrectness::No).expect("must decompress a response for a new challenge");
|
||||
|
||||
writable_map.flush().expect("must flush the memory map");
|
||||
|
||||
println!("Done! `./new_challenge` contains the new challenge file. The other files");
|
||||
println!("were left alone.");
|
||||
}
|
||||
}
|
859
src/bls12_381/mod.rs
Normal file
859
src/bls12_381/mod.rs
Normal file
@ -0,0 +1,859 @@
|
||||
//! This ceremony constructs the "powers of tau" for Jens Groth's 2016 zk-SNARK proving
|
||||
//! system using the BLS12-381 pairing-friendly elliptic curve construction.
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! Participants of the ceremony receive a "challenge" file containing:
|
||||
//!
|
||||
//! * the BLAKE2b hash of the last file entered into the transcript
|
||||
//! * an `Accumulator` (with curve points encoded in uncompressed form for fast deserialization)
|
||||
//!
|
||||
//! The participant runs a tool which generates a random keypair (`PublicKey`, `PrivateKey`)
|
||||
//! used for modifying the `Accumulator` from the "challenge" file. The keypair is then used to
|
||||
//! transform the `Accumulator`, and a "response" file is generated containing:
|
||||
//!
|
||||
//! * the BLAKE2b hash of the "challenge" file (thus forming a hash chain over the entire transcript)
|
||||
//! * an `Accumulator` (with curve points encoded in compressed form for fast uploading)
|
||||
//! * the `PublicKey`
|
||||
//!
|
||||
//! This "challenge" file is entered into the protocol transcript. A given transcript is valid
|
||||
//! if the transformations between consecutive `Accumulator`s verify with their respective
|
||||
//! `PublicKey`s. Participants (and the public) can ensure that their contribution to the
|
||||
//! `Accumulator` was accepted by ensuring the transcript contains their "response" file, ideally
|
||||
//! by comparison of the BLAKE2b hash of the "response" file.
|
||||
//!
|
||||
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
|
||||
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
|
||||
//! public parameters for all circuits within a bounded size.
|
||||
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
|
||||
use byteorder::{ReadBytesExt, BigEndian};
|
||||
use rand::{SeedableRng, Rng, Rand};
|
||||
use rand::chacha::ChaChaRng;
|
||||
use pairing::bls12_381::*;
|
||||
use pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use generic_array::GenericArray;
|
||||
use typenum::consts::U64;
|
||||
use blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
// This ceremony is based on the BLS12-381 elliptic curve construction.
|
||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 96;
|
||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 192;
|
||||
const G1_COMPRESSED_BYTE_SIZE: usize = 48;
|
||||
const G2_COMPRESSED_BYTE_SIZE: usize = 96;
|
||||
|
||||
/// The accumulator supports circuits with 2^21 multiplication gates.
|
||||
const TAU_POWERS_LENGTH: usize = (1 << 21);
|
||||
|
||||
/// More tau powers are needed in G1 because the Groth16 H query
|
||||
/// includes terms of the form tau^i * (tau^m - 1) = tau^(i+m) - tau^i
|
||||
/// where the largest i = m - 2, requiring the computation of tau^(2m - 2)
|
||||
/// and thus giving us a vector length of 2^22 - 1.
|
||||
const TAU_POWERS_G1_LENGTH: usize = (TAU_POWERS_LENGTH << 1) - 1;
|
||||
|
||||
/// The size of the accumulator on disk.
|
||||
pub const ACCUMULATOR_BYTE_SIZE: usize = (TAU_POWERS_G1_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) + // g1 tau powers
|
||||
(TAU_POWERS_LENGTH * G2_UNCOMPRESSED_BYTE_SIZE) + // g2 tau powers
|
||||
(TAU_POWERS_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) + // alpha tau powers
|
||||
(TAU_POWERS_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) // beta tau powers
|
||||
+ G2_UNCOMPRESSED_BYTE_SIZE // beta in g2
|
||||
+ 64; // blake2b hash of previous contribution
|
||||
|
||||
/// The "public key" is used to verify a contribution was correctly
|
||||
/// computed.
|
||||
pub const PUBLIC_KEY_SIZE: usize = 3 * G2_UNCOMPRESSED_BYTE_SIZE + // tau, alpha, and beta in g2
|
||||
6 * G1_UNCOMPRESSED_BYTE_SIZE; // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1
|
||||
|
||||
/// The size of the contribution on disk.
|
||||
pub const CONTRIBUTION_BYTE_SIZE: usize = (TAU_POWERS_G1_LENGTH * G1_COMPRESSED_BYTE_SIZE) + // g1 tau powers
|
||||
(TAU_POWERS_LENGTH * G2_COMPRESSED_BYTE_SIZE) + // g2 tau powers
|
||||
(TAU_POWERS_LENGTH * G1_COMPRESSED_BYTE_SIZE) + // alpha tau powers
|
||||
(TAU_POWERS_LENGTH * G1_COMPRESSED_BYTE_SIZE) // beta tau powers
|
||||
+ G2_COMPRESSED_BYTE_SIZE // beta in g2
|
||||
+ 64 // blake2b hash of input accumulator
|
||||
+ PUBLIC_KEY_SIZE; // public key
|
||||
|
||||
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
|
||||
/// than 32 bytes.
|
||||
fn hash_to_g2(mut digest: &[u8]) -> G2
|
||||
{
|
||||
assert!(digest.len() >= 32);
|
||||
|
||||
let mut seed = Vec::with_capacity(8);
|
||||
|
||||
for _ in 0..8 {
|
||||
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
|
||||
}
|
||||
|
||||
ChaChaRng::from_seed(&seed).gen()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_to_g2() {
|
||||
assert!(
|
||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])
|
||||
==
|
||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34])
|
||||
);
|
||||
|
||||
assert!(
|
||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])
|
||||
!=
|
||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33])
|
||||
);
|
||||
}
|
||||
|
||||
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
|
||||
/// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity.
|
||||
///
|
||||
/// The elements in G2 are used to verify transformations of the accumulator. By its nature, the public key proves
|
||||
/// knowledge of τ, α and β.
|
||||
///
|
||||
/// It is necessary to verify `same_ratio`((s<sub>1</sub>, s<sub>1</sub><sup>x</sup>), (H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)).
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub struct PublicKey {
|
||||
tau_g1: (G1Affine, G1Affine),
|
||||
alpha_g1: (G1Affine, G1Affine),
|
||||
beta_g1: (G1Affine, G1Affine),
|
||||
tau_g2: G2Affine,
|
||||
alpha_g2: G2Affine,
|
||||
beta_g2: G2Affine
|
||||
}
|
||||
|
||||
/// Contains the secrets τ, α and β that the participant of the ceremony must destroy.
|
||||
pub struct PrivateKey {
|
||||
tau: Fr,
|
||||
alpha: Fr,
|
||||
beta: Fr
|
||||
}
|
||||
|
||||
/// Constructs a keypair given an RNG and a 64-byte transcript `digest`.
|
||||
pub fn keypair<R: Rng>(rng: &mut R, digest: &[u8]) -> (PublicKey, PrivateKey)
|
||||
{
|
||||
assert_eq!(digest.len(), 64);
|
||||
|
||||
let tau = Fr::rand(rng);
|
||||
let alpha = Fr::rand(rng);
|
||||
let beta = Fr::rand(rng);
|
||||
|
||||
let mut op = |x, personalization: u8| {
|
||||
// Sample random g^s
|
||||
let g1_s = G1::rand(rng).into_affine();
|
||||
// Compute g^{s*x}
|
||||
let g1_s_x = g1_s.mul(x).into_affine();
|
||||
// Compute BLAKE2b(personalization | transcript | g^s | g^{s*x})
|
||||
let h = {
|
||||
let mut h = Blake2b::default();
|
||||
h.input(&[personalization]);
|
||||
h.input(digest);
|
||||
h.input(g1_s.into_uncompressed().as_ref());
|
||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
||||
h.result()
|
||||
};
|
||||
// Hash into G2 as g^{s'}
|
||||
let g2_s = hash_to_g2(h.as_ref()).into_affine();
|
||||
// Compute g^{s'*x}
|
||||
let g2_s_x = g2_s.mul(x).into_affine();
|
||||
|
||||
((g1_s, g1_s_x), g2_s_x)
|
||||
};
|
||||
|
||||
let pk_tau = op(tau, 0);
|
||||
let pk_alpha = op(alpha, 1);
|
||||
let pk_beta = op(beta, 2);
|
||||
|
||||
(
|
||||
PublicKey {
|
||||
tau_g1: pk_tau.0,
|
||||
alpha_g1: pk_alpha.0,
|
||||
beta_g1: pk_beta.0,
|
||||
tau_g2: pk_tau.1,
|
||||
alpha_g2: pk_alpha.1,
|
||||
beta_g2: pk_beta.1,
|
||||
},
|
||||
PrivateKey {
|
||||
tau: tau,
|
||||
alpha: alpha,
|
||||
beta: beta
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/// Determines if point compression should be used.
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum UseCompression {
|
||||
Yes,
|
||||
No
|
||||
}
|
||||
|
||||
/// Determines if points should be checked for correctness during deserialization.
|
||||
/// This is not necessary for participants, because a transcript verifier can
|
||||
/// check this theirself.
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum CheckForCorrectness {
|
||||
Yes,
|
||||
No
|
||||
}
|
||||
|
||||
fn write_point<W, G>(
|
||||
writer: &mut W,
|
||||
p: &G,
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
where W: Write,
|
||||
G: CurveAffine
|
||||
{
|
||||
match compression {
|
||||
UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()),
|
||||
UseCompression::No => writer.write_all(p.into_uncompressed().as_ref()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors that might occur during deserialization.
|
||||
#[derive(Debug)]
|
||||
pub enum DeserializationError {
|
||||
IoError(io::Error),
|
||||
DecodingError(GroupDecodingError),
|
||||
PointAtInfinity
|
||||
}
|
||||
|
||||
impl fmt::Display for DeserializationError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e),
|
||||
DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e),
|
||||
DeserializationError::PointAtInfinity => write!(f, "Point at infinity found")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for DeserializationError {
|
||||
fn from(err: io::Error) -> DeserializationError {
|
||||
DeserializationError::IoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GroupDecodingError> for DeserializationError {
|
||||
fn from(err: GroupDecodingError) -> DeserializationError {
|
||||
DeserializationError::DecodingError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Serialize the public key. Points are always in uncompressed form.
|
||||
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>
|
||||
{
|
||||
write_point(writer, &self.tau_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.tau_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.alpha_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.alpha_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.beta_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.beta_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.tau_g2, UseCompression::No)?;
|
||||
write_point(writer, &self.alpha_g2, UseCompression::No)?;
|
||||
write_point(writer, &self.beta_g2, UseCompression::No)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deserialize the public key. Points are always in uncompressed form, and
|
||||
/// always checked, since there aren't very many of them. Does not allow any
|
||||
/// points at infinity.
|
||||
pub fn deserialize<R: Read>(reader: &mut R) -> Result<PublicKey, DeserializationError>
|
||||
{
|
||||
fn read_uncompressed<C: CurveAffine, R: Read>(reader: &mut R) -> Result<C, DeserializationError> {
|
||||
let mut repr = C::Uncompressed::empty();
|
||||
reader.read_exact(repr.as_mut())?;
|
||||
let v = repr.into_affine()?;
|
||||
|
||||
if v.is_zero() {
|
||||
Err(DeserializationError::PointAtInfinity)
|
||||
} else {
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
let tau_g1_s = read_uncompressed(reader)?;
|
||||
let tau_g1_s_tau = read_uncompressed(reader)?;
|
||||
|
||||
let alpha_g1_s = read_uncompressed(reader)?;
|
||||
let alpha_g1_s_alpha = read_uncompressed(reader)?;
|
||||
|
||||
let beta_g1_s = read_uncompressed(reader)?;
|
||||
let beta_g1_s_beta = read_uncompressed(reader)?;
|
||||
|
||||
let tau_g2 = read_uncompressed(reader)?;
|
||||
let alpha_g2 = read_uncompressed(reader)?;
|
||||
let beta_g2 = read_uncompressed(reader)?;
|
||||
|
||||
Ok(PublicKey {
|
||||
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
||||
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
||||
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
||||
tau_g2: tau_g2,
|
||||
alpha_g2: alpha_g2,
|
||||
beta_g2: beta_g2
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pubkey_serialization() {
|
||||
use rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||
let (pk, _) = keypair(rng, &digest);
|
||||
let mut v = vec![];
|
||||
pk.serialize(&mut v).unwrap();
|
||||
assert_eq!(v.len(), PUBLIC_KEY_SIZE);
|
||||
let deserialized = PublicKey::deserialize(&mut &v[..]).unwrap();
|
||||
assert!(pk == deserialized);
|
||||
}
|
||||
|
||||
/// The `Accumulator` is an object that participants of the ceremony contribute
|
||||
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
|
||||
/// fixed generators, and additionally in G1 over two other generators of exponents
|
||||
/// `alpha` and `beta` over those fixed generators. In other words:
|
||||
///
|
||||
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
||||
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct Accumulator {
|
||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
||||
pub tau_powers_g1: Vec<G1Affine>,
|
||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub tau_powers_g2: Vec<G2Affine>,
|
||||
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub alpha_tau_powers_g1: Vec<G1Affine>,
|
||||
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub beta_tau_powers_g1: Vec<G1Affine>,
|
||||
/// beta
|
||||
pub beta_g2: G2Affine
|
||||
}
|
||||
|
||||
impl Accumulator {
|
||||
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
|
||||
pub fn new() -> Self {
|
||||
Accumulator {
|
||||
tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_G1_LENGTH],
|
||||
tau_powers_g2: vec![G2Affine::one(); TAU_POWERS_LENGTH],
|
||||
alpha_tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_LENGTH],
|
||||
beta_tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_LENGTH],
|
||||
beta_g2: G2Affine::one()
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the accumulator with some compression behavior.
|
||||
pub fn serialize<W: Write>(
|
||||
&self,
|
||||
writer: &mut W,
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
{
|
||||
fn write_all<W: Write, C: CurveAffine>(
|
||||
writer: &mut W,
|
||||
c: &[C],
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
{
|
||||
for c in c {
|
||||
write_point(writer, c, compression)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
write_all(writer, &self.tau_powers_g1, compression)?;
|
||||
write_all(writer, &self.tau_powers_g2, compression)?;
|
||||
write_all(writer, &self.alpha_tau_powers_g1, compression)?;
|
||||
write_all(writer, &self.beta_tau_powers_g1, compression)?;
|
||||
write_all(writer, &[self.beta_g2], compression)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the accumulator from disk with some compression behavior. `checked`
|
||||
/// indicates whether we should check it's a valid element of the group and
|
||||
/// not the point at infinity.
|
||||
pub fn deserialize<R: Read>(
|
||||
reader: &mut R,
|
||||
compression: UseCompression,
|
||||
checked: CheckForCorrectness
|
||||
) -> Result<Self, DeserializationError>
|
||||
{
|
||||
fn read_all<R: Read, C: CurveAffine>(
|
||||
reader: &mut R,
|
||||
size: usize,
|
||||
compression: UseCompression,
|
||||
checked: CheckForCorrectness
|
||||
) -> Result<Vec<C>, DeserializationError>
|
||||
{
|
||||
fn decompress_all<R: Read, E: EncodedPoint>(
|
||||
reader: &mut R,
|
||||
size: usize,
|
||||
checked: CheckForCorrectness
|
||||
) -> Result<Vec<E::Affine>, DeserializationError>
|
||||
{
|
||||
// Read the encoded elements
|
||||
let mut res = vec![E::empty(); size];
|
||||
|
||||
for encoded in &mut res {
|
||||
reader.read_exact(encoded.as_mut())?;
|
||||
}
|
||||
|
||||
// Allocate space for the deserialized elements
|
||||
let mut res_affine = vec![E::Affine::zero(); size];
|
||||
|
||||
let mut chunk_size = res.len() / num_cpus::get();
|
||||
if chunk_size == 0 {
|
||||
chunk_size = 1;
|
||||
}
|
||||
|
||||
// If any of our threads encounter a deserialization/IO error, catch
|
||||
// it with this.
|
||||
let decoding_error = Arc::new(Mutex::new(None));
|
||||
|
||||
crossbeam::scope(|scope| {
|
||||
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
|
||||
let decoding_error = decoding_error.clone();
|
||||
|
||||
scope.spawn(move || {
|
||||
for (source, target) in source.iter().zip(target.iter_mut()) {
|
||||
match {
|
||||
// If we're a participant, we don't need to check all of the
|
||||
// elements in the accumulator, which saves a lot of time.
|
||||
// The hash chain prevents this from being a problem: the
|
||||
// transcript guarantees that the accumulator was properly
|
||||
// formed.
|
||||
match checked {
|
||||
CheckForCorrectness::Yes => {
|
||||
// Points at infinity are never expected in the accumulator
|
||||
source.into_affine().map_err(|e| e.into()).and_then(|source| {
|
||||
if source.is_zero() {
|
||||
Err(DeserializationError::PointAtInfinity)
|
||||
} else {
|
||||
Ok(source)
|
||||
}
|
||||
})
|
||||
},
|
||||
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
{
|
||||
Ok(source) => {
|
||||
*target = source;
|
||||
},
|
||||
Err(e) => {
|
||||
*decoding_error.lock().unwrap() = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
|
||||
Some(e) => {
|
||||
Err(e)
|
||||
},
|
||||
None => {
|
||||
Ok(res_affine)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match compression {
|
||||
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
|
||||
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked)
|
||||
}
|
||||
}
|
||||
|
||||
let tau_powers_g1 = read_all(reader, TAU_POWERS_G1_LENGTH, compression, checked)?;
|
||||
let tau_powers_g2 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let alpha_tau_powers_g1 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let beta_tau_powers_g1 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let beta_g2 = read_all(reader, 1, compression, checked)?[0];
|
||||
|
||||
Ok(Accumulator {
|
||||
tau_powers_g1: tau_powers_g1,
|
||||
tau_powers_g2: tau_powers_g2,
|
||||
alpha_tau_powers_g1: alpha_tau_powers_g1,
|
||||
beta_tau_powers_g1: beta_tau_powers_g1,
|
||||
beta_g2: beta_g2
|
||||
})
|
||||
}
|
||||
|
||||
/// Transforms the accumulator with a private key.
|
||||
pub fn transform(&mut self, key: &PrivateKey)
|
||||
{
|
||||
// Construct the powers of tau
|
||||
let mut taupowers = vec![Fr::zero(); TAU_POWERS_G1_LENGTH];
|
||||
let chunk_size = TAU_POWERS_G1_LENGTH / num_cpus::get();
|
||||
|
||||
// Construct exponents in parallel
|
||||
crossbeam::scope(|scope| {
|
||||
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
|
||||
scope.spawn(move || {
|
||||
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
|
||||
|
||||
for t in taupowers {
|
||||
*t = acc;
|
||||
acc.mul_assign(&key.tau);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
||||
/// exponent.
|
||||
fn batch_exp<C: CurveAffine>(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
|
||||
assert_eq!(bases.len(), exp.len());
|
||||
let mut projective = vec![C::Projective::zero(); bases.len()];
|
||||
let chunk_size = bases.len() / num_cpus::get();
|
||||
|
||||
// Perform wNAF over multiple cores, placing results into `projective`.
|
||||
crossbeam::scope(|scope| {
|
||||
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
|
||||
.zip(exp.chunks(chunk_size))
|
||||
.zip(projective.chunks_mut(chunk_size))
|
||||
{
|
||||
scope.spawn(move || {
|
||||
let mut wnaf = Wnaf::new();
|
||||
|
||||
for ((base, exp), projective) in bases.iter_mut()
|
||||
.zip(exp.iter())
|
||||
.zip(projective.iter_mut())
|
||||
{
|
||||
let mut exp = *exp;
|
||||
if let Some(coeff) = coeff {
|
||||
exp.mul_assign(coeff);
|
||||
}
|
||||
|
||||
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Perform batch normalization
|
||||
crossbeam::scope(|scope| {
|
||||
for projective in projective.chunks_mut(chunk_size)
|
||||
{
|
||||
scope.spawn(move || {
|
||||
C::Projective::batch_normalization(projective);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Turn it all back into affine points
|
||||
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
||||
*affine = projective.into_affine();
|
||||
}
|
||||
}
|
||||
|
||||
batch_exp(&mut self.tau_powers_g1, &taupowers[0..], None);
|
||||
batch_exp(&mut self.tau_powers_g2, &taupowers[0..TAU_POWERS_LENGTH], None);
|
||||
batch_exp(&mut self.alpha_tau_powers_g1, &taupowers[0..TAU_POWERS_LENGTH], Some(&key.alpha));
|
||||
batch_exp(&mut self.beta_tau_powers_g1, &taupowers[0..TAU_POWERS_LENGTH], Some(&key.beta));
|
||||
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||
pub fn verify_transform(before: &Accumulator, after: &Accumulator, key: &PublicKey, digest: &[u8]) -> bool
|
||||
{
|
||||
assert_eq!(digest.len(), 64);
|
||||
|
||||
let compute_g2_s = |g1_s: G1Affine, g1_s_x: G1Affine, personalization: u8| {
|
||||
let mut h = Blake2b::default();
|
||||
h.input(&[personalization]);
|
||||
h.input(digest);
|
||||
h.input(g1_s.into_uncompressed().as_ref());
|
||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
||||
hash_to_g2(h.result().as_ref()).into_affine()
|
||||
};
|
||||
|
||||
let tau_g2_s = compute_g2_s(key.tau_g1.0, key.tau_g1.1, 0);
|
||||
let alpha_g2_s = compute_g2_s(key.alpha_g1.0, key.alpha_g1.1, 1);
|
||||
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
|
||||
|
||||
// Check the proofs-of-knowledge for tau/alpha/beta
|
||||
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check the correctness of the generators for tau powers
|
||||
if after.tau_powers_g1[0] != G1Affine::one() {
|
||||
return false;
|
||||
}
|
||||
if after.tau_powers_g2[0] != G2Affine::one() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous tau by the new one?
|
||||
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous alpha by the new one?
|
||||
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous beta by the new one?
|
||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Are the powers of tau correct?
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Computes a random linear combination over v1/v2.
|
||||
///
|
||||
/// Checking that many pairs of elements are exponentiated by
|
||||
/// the same `x` can be achieved (with high probability) with
|
||||
/// the following technique:
|
||||
///
|
||||
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
|
||||
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
|
||||
/// random r1, r2, r3. Given (g, g^s)...
|
||||
///
|
||||
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
|
||||
///
|
||||
/// ... with high probability.
|
||||
fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G)
|
||||
{
|
||||
use std::sync::{Arc, Mutex};
|
||||
use rand::{thread_rng};
|
||||
|
||||
assert_eq!(v1.len(), v2.len());
|
||||
|
||||
let chunk = (v1.len() / num_cpus::get()) + 1;
|
||||
|
||||
let s = Arc::new(Mutex::new(G::Projective::zero()));
|
||||
let sx = Arc::new(Mutex::new(G::Projective::zero()));
|
||||
|
||||
crossbeam::scope(|scope| {
|
||||
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
|
||||
let s = s.clone();
|
||||
let sx = sx.clone();
|
||||
|
||||
scope.spawn(move || {
|
||||
// We do not need to be overly cautious of the RNG
|
||||
// used for this check.
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let mut wnaf = Wnaf::new();
|
||||
let mut local_s = G::Projective::zero();
|
||||
let mut local_sx = G::Projective::zero();
|
||||
|
||||
for (v1, v2) in v1.iter().zip(v2.iter()) {
|
||||
let rho = G::Scalar::rand(rng);
|
||||
let mut wnaf = wnaf.scalar(rho.into_repr());
|
||||
let v1 = wnaf.base(v1.into_projective());
|
||||
let v2 = wnaf.base(v2.into_projective());
|
||||
|
||||
local_s.add_assign(&v1);
|
||||
local_sx.add_assign(&v2);
|
||||
}
|
||||
|
||||
s.lock().unwrap().add_assign(&local_s);
|
||||
sx.lock().unwrap().add_assign(&local_sx);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
let s = s.lock().unwrap().into_affine();
|
||||
let sx = sx.lock().unwrap().into_affine();
|
||||
|
||||
(s, sx)
|
||||
}
|
||||
|
||||
/// Construct a single pair (s, s^x) for a vector of
|
||||
/// the form [1, x, x^2, x^3, ...].
|
||||
fn power_pairs<G: CurveAffine>(v: &[G]) -> (G, G)
|
||||
{
|
||||
merge_pairs(&v[0..(v.len()-1)], &v[1..])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_power_pairs() {
|
||||
use rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let mut v = vec![];
|
||||
let x = Fr::rand(rng);
|
||||
let mut acc = Fr::one();
|
||||
for _ in 0..100 {
|
||||
v.push(G1Affine::one().mul(acc).into_affine());
|
||||
acc.mul_assign(&x);
|
||||
}
|
||||
|
||||
let gx = G2Affine::one().mul(x).into_affine();
|
||||
|
||||
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||
|
||||
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
|
||||
|
||||
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||
}
|
||||
|
||||
/// Checks if pairs have the same ratio.
|
||||
fn same_ratio<G1: CurveAffine>(
|
||||
g1: (G1, G1),
|
||||
g2: (G1::Pair, G1::Pair)
|
||||
) -> bool
|
||||
{
|
||||
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_same_ratio() {
|
||||
use rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let s = Fr::rand(rng);
|
||||
let g1 = G1Affine::one();
|
||||
let g2 = G2Affine::one();
|
||||
let g1_s = g1.mul(s).into_affine();
|
||||
let g2_s = g2.mul(s).into_affine();
|
||||
|
||||
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
|
||||
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_accumulator_serialization() {
|
||||
use rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||
|
||||
let mut acc = Accumulator::new();
|
||||
let before = acc.clone();
|
||||
let (pk, sk) = keypair(rng, &digest);
|
||||
acc.transform(&sk);
|
||||
assert!(verify_transform(&before, &acc, &pk, &digest));
|
||||
digest[0] = !digest[0];
|
||||
assert!(!verify_transform(&before, &acc, &pk, &digest));
|
||||
let mut v = Vec::with_capacity(ACCUMULATOR_BYTE_SIZE - 64);
|
||||
acc.serialize(&mut v, UseCompression::No).unwrap();
|
||||
assert_eq!(v.len(), ACCUMULATOR_BYTE_SIZE - 64);
|
||||
let deserialized = Accumulator::deserialize(&mut &v[..], UseCompression::No, CheckForCorrectness::No).unwrap();
|
||||
assert!(acc == deserialized);
|
||||
}
|
||||
|
||||
/// Compute BLAKE2b("")
|
||||
pub fn blank_hash() -> GenericArray<u8, U64> {
|
||||
Blake2b::new().result()
|
||||
}
|
||||
|
||||
/// Abstraction over a reader which hashes the data being read.
|
||||
pub struct HashReader<R: Read> {
|
||||
reader: R,
|
||||
hasher: Blake2b
|
||||
}
|
||||
|
||||
impl<R: Read> HashReader<R> {
|
||||
/// Construct a new `HashReader` given an existing `reader` by value.
|
||||
pub fn new(reader: R) -> Self {
|
||||
HashReader {
|
||||
reader: reader,
|
||||
hasher: Blake2b::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Destroy this reader and return the hash of what was read.
|
||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
||||
self.hasher.result()
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Read for HashReader<R> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let bytes = self.reader.read(buf)?;
|
||||
|
||||
if bytes > 0 {
|
||||
self.hasher.input(&buf[0..bytes]);
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Abstraction over a writer which hashes the data being written.
|
||||
pub struct HashWriter<W: Write> {
|
||||
writer: W,
|
||||
hasher: Blake2b
|
||||
}
|
||||
|
||||
impl<W: Write> HashWriter<W> {
|
||||
/// Construct a new `HashWriter` given an existing `writer` by value.
|
||||
pub fn new(writer: W) -> Self {
|
||||
HashWriter {
|
||||
writer: writer,
|
||||
hasher: Blake2b::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Destroy this writer and return the hash of what was written.
|
||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
||||
self.hasher.result()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Write for HashWriter<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let bytes = self.writer.write(buf)?;
|
||||
|
||||
if bytes > 0 {
|
||||
self.hasher.input(&buf[0..bytes]);
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.writer.flush()
|
||||
}
|
||||
}
|
120
src/bn256/mod.rs
Normal file
120
src/bn256/mod.rs
Normal file
@ -0,0 +1,120 @@
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
extern crate ff;
|
||||
|
||||
use self::ff::{Field, PrimeField};
|
||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
||||
use self::rand::{SeedableRng, Rng, Rand};
|
||||
use self::rand::chacha::ChaChaRng;
|
||||
use self::pairing::bn256::{Bn256};
|
||||
use self::pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use self::generic_array::GenericArray;
|
||||
use self::typenum::consts::U64;
|
||||
use self::blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
use crate::parameters::*;
|
||||
use crate::keypair::*;
|
||||
use crate::utils::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Bn256CeremonyParameters {
|
||||
|
||||
}
|
||||
|
||||
impl PowersOfTauParameters for Bn256CeremonyParameters {
|
||||
const REQUIRED_POWER: usize = 26; // generate to have roughly 64 million constraints
|
||||
|
||||
// This ceremony is based on the BN256 elliptic curve construction.
|
||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
|
||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128;
|
||||
const G1_COMPRESSED_BYTE_SIZE: usize = 32;
|
||||
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pubkey_serialization() {
|
||||
use self::rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||
let (pk, _) = keypair::<_, Bn256>(rng, &digest);
|
||||
let mut v = vec![];
|
||||
pk.serialize(&mut v).unwrap();
|
||||
assert_eq!(v.len(), Bn256CeremonyParameters::PUBLIC_KEY_SIZE);
|
||||
let deserialized = PublicKey::<Bn256>::deserialize(&mut &v[..]).unwrap();
|
||||
assert!(pk == deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_power_pairs() {
|
||||
use self::rand::thread_rng;
|
||||
use self::pairing::bn256::{Fr, G1Affine, G2Affine};
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let mut v = vec![];
|
||||
let x = Fr::rand(rng);
|
||||
let mut acc = Fr::one();
|
||||
for _ in 0..100 {
|
||||
v.push(G1Affine::one().mul(acc).into_affine());
|
||||
acc.mul_assign(&x);
|
||||
}
|
||||
|
||||
let gx = G2Affine::one().mul(x).into_affine();
|
||||
|
||||
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||
|
||||
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
|
||||
|
||||
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_same_ratio() {
|
||||
use self::rand::thread_rng;
|
||||
use self::pairing::bn256::{Fr, G1Affine, G2Affine};
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let s = Fr::rand(rng);
|
||||
let g1 = G1Affine::one();
|
||||
let g2 = G2Affine::one();
|
||||
let g1_s = g1.mul(s).into_affine();
|
||||
let g2_s = g2.mul(s).into_affine();
|
||||
|
||||
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
|
||||
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_accumulator_serialization() {
|
||||
use crate::accumulator::*;
|
||||
|
||||
use self::rand::thread_rng;
|
||||
use self::pairing::bn256::{Bn256, Fr, G1Affine, G2Affine};
|
||||
use self::PowersOfTauParameters;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||
let params = Bn256CeremonyParameters{};
|
||||
let mut acc = Accumulator::<Bn256, _>::new(params.clone());
|
||||
let before = acc.clone();
|
||||
let (pk, sk) = keypair::<_, Bn256>(rng, &digest);
|
||||
acc.transform(&sk);
|
||||
assert!(verify_transform(&before, &acc, &pk, &digest));
|
||||
digest[0] = !digest[0];
|
||||
assert!(!verify_transform(&before, &acc, &pk, &digest));
|
||||
let mut v = Vec::with_capacity(Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
|
||||
acc.serialize(&mut v, UseCompression::No).unwrap();
|
||||
assert_eq!(v.len(), Bn256CeremonyParameters::ACCUMULATOR_BYTE_SIZE - 64);
|
||||
let deserialized = Accumulator::deserialize(&mut &v[..], UseCompression::No, CheckForCorrectness::No, params).unwrap();
|
||||
assert!(acc == deserialized);
|
||||
}
|
307
src/keypair.rs
Normal file
307
src/keypair.rs
Normal file
@ -0,0 +1,307 @@
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
extern crate ff;
|
||||
extern crate memmap;
|
||||
extern crate itertools;
|
||||
|
||||
use itertools::Itertools;
|
||||
use memmap::{Mmap, MmapMut};
|
||||
use self::ff::{Field, PrimeField};
|
||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
||||
use self::rand::{SeedableRng, Rng, Rand};
|
||||
use self::rand::chacha::ChaChaRng;
|
||||
use self::pairing::bn256::{Bn256};
|
||||
use self::pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use self::generic_array::GenericArray;
|
||||
use self::typenum::consts::U64;
|
||||
use self::blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
use super::utils::*;
|
||||
use super::parameters::*;
|
||||
|
||||
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
|
||||
/// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity.
|
||||
///
|
||||
/// The elements in G2 are used to verify transformations of the accumulator. By its nature, the public key proves
|
||||
/// knowledge of τ, α and β.
|
||||
///
|
||||
/// It is necessary to verify `same_ratio`((s<sub>1</sub>, s<sub>1</sub><sup>x</sup>), (H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)).
|
||||
#[derive(Eq)]
|
||||
pub struct PublicKey<E: Engine> {
|
||||
pub tau_g1: (E::G1Affine, E::G1Affine),
|
||||
pub alpha_g1: (E::G1Affine, E::G1Affine),
|
||||
pub beta_g1: (E::G1Affine, E::G1Affine),
|
||||
pub tau_g2: E::G2Affine,
|
||||
pub alpha_g2: E::G2Affine,
|
||||
pub beta_g2: E::G2Affine
|
||||
}
|
||||
|
||||
impl<E: Engine> PartialEq for PublicKey<E> {
|
||||
fn eq(&self, other: &PublicKey<E>) -> bool {
|
||||
self.tau_g1.0 == other.tau_g1.0 &&
|
||||
self.tau_g1.1 == other.tau_g1.1 &&
|
||||
self.alpha_g1.0 == other.alpha_g1.0 &&
|
||||
self.alpha_g1.1 == other.alpha_g1.1 &&
|
||||
self.beta_g1.0 == other.beta_g1.0 &&
|
||||
self.beta_g1.1 == other.beta_g1.1 &&
|
||||
self.tau_g2 == other.tau_g2 &&
|
||||
self.alpha_g2 == other.alpha_g2 &&
|
||||
self.beta_g2 == other.beta_g2
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains the secrets τ, α and β that the participant of the ceremony must destroy.
|
||||
pub struct PrivateKey<E: Engine> {
|
||||
pub tau: E::Fr,
|
||||
pub alpha: E::Fr,
|
||||
pub beta: E::Fr
|
||||
}
|
||||
|
||||
/// Constructs a keypair given an RNG and a 64-byte transcript `digest`.
|
||||
pub fn keypair<R: Rng, E: Engine>(rng: &mut R, digest: &[u8]) -> (PublicKey<E>, PrivateKey<E>)
|
||||
{
|
||||
assert_eq!(digest.len(), 64);
|
||||
|
||||
// tau is a conribution to the "powers of tau", in a set of points of the form "tau^i * G"
|
||||
let tau = E::Fr::rand(rng);
|
||||
// alpha and beta are a set of conrtibuitons in a form "alpha * tau^i * G" and that are required
|
||||
// for construction of the polynomials
|
||||
let alpha = E::Fr::rand(rng);
|
||||
let beta = E::Fr::rand(rng);
|
||||
|
||||
let mut op = |x: E::Fr, personalization: u8| {
|
||||
// Sample random g^s
|
||||
let g1_s = E::G1::rand(rng).into_affine();
|
||||
// Compute g^{s*x}
|
||||
let g1_s_x = g1_s.mul(x).into_affine();
|
||||
// Compute BLAKE2b(personalization | transcript | g^s | g^{s*x})
|
||||
let h: generic_array::GenericArray<u8, U64> = {
|
||||
let mut h = Blake2b::default();
|
||||
h.input(&[personalization]);
|
||||
h.input(digest);
|
||||
h.input(g1_s.into_uncompressed().as_ref());
|
||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
||||
h.result()
|
||||
};
|
||||
// Hash into G2 as g^{s'}
|
||||
let g2_s: E::G2Affine = hash_to_g2::<E>(h.as_ref()).into_affine();
|
||||
// Compute g^{s'*x}
|
||||
let g2_s_x = g2_s.mul(x).into_affine();
|
||||
|
||||
((g1_s, g1_s_x), g2_s_x)
|
||||
};
|
||||
|
||||
// these "public keys" are requried for for next participants to check that points are in fact
|
||||
// sequential powers
|
||||
let pk_tau = op(tau, 0);
|
||||
let pk_alpha = op(alpha, 1);
|
||||
let pk_beta = op(beta, 2);
|
||||
|
||||
(
|
||||
PublicKey {
|
||||
tau_g1: pk_tau.0,
|
||||
alpha_g1: pk_alpha.0,
|
||||
beta_g1: pk_beta.0,
|
||||
tau_g2: pk_tau.1,
|
||||
alpha_g2: pk_alpha.1,
|
||||
beta_g2: pk_beta.1,
|
||||
},
|
||||
PrivateKey {
|
||||
tau: tau,
|
||||
alpha: alpha,
|
||||
beta: beta
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
impl<E: Engine> PublicKey<E> {
|
||||
/// Serialize the public key. Points are always in uncompressed form.
|
||||
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>
|
||||
{
|
||||
write_point(writer, &self.tau_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.tau_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.alpha_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.alpha_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.beta_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.beta_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.tau_g2, UseCompression::No)?;
|
||||
write_point(writer, &self.alpha_g2, UseCompression::No)?;
|
||||
write_point(writer, &self.beta_g2, UseCompression::No)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deserialize the public key. Points are always in uncompressed form, and
|
||||
/// always checked, since there aren't very many of them. Does not allow any
|
||||
/// points at infinity.
|
||||
pub fn deserialize<R: Read>(reader: &mut R) -> Result<PublicKey<E>, DeserializationError>
|
||||
{
|
||||
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>, R: Read>(reader: &mut R) -> Result<C, DeserializationError> {
|
||||
let mut repr = C::Uncompressed::empty();
|
||||
reader.read_exact(repr.as_mut())?;
|
||||
let v = repr.into_affine()?;
|
||||
|
||||
if v.is_zero() {
|
||||
Err(DeserializationError::PointAtInfinity)
|
||||
} else {
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
let tau_g1_s = read_uncompressed::<E, _, _>(reader)?;
|
||||
let tau_g1_s_tau = read_uncompressed::<E, _, _>(reader)?;
|
||||
|
||||
let alpha_g1_s = read_uncompressed::<E, _, _>(reader)?;
|
||||
let alpha_g1_s_alpha = read_uncompressed::<E, _, _>(reader)?;
|
||||
|
||||
let beta_g1_s = read_uncompressed::<E, _, _>(reader)?;
|
||||
let beta_g1_s_beta = read_uncompressed::<E, _, _>(reader)?;
|
||||
|
||||
let tau_g2 = read_uncompressed::<E, _, _>(reader)?;
|
||||
let alpha_g2 = read_uncompressed::<E, _, _>(reader)?;
|
||||
let beta_g2 = read_uncompressed::<E, _, _>(reader)?;
|
||||
|
||||
Ok(PublicKey {
|
||||
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
||||
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
||||
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
||||
tau_g2: tau_g2,
|
||||
alpha_g2: alpha_g2,
|
||||
beta_g2: beta_g2
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Engine> PublicKey<E> {
|
||||
|
||||
/// This function is intended to write the key to the memory map and calculates
|
||||
/// a position for writing into the file itself based on information whether
|
||||
/// contribution was output in compressed on uncompressed form
|
||||
pub fn write<P>(
|
||||
&self,
|
||||
output_map: &mut MmapMut,
|
||||
accumulator_was_compressed: UseCompression
|
||||
)
|
||||
-> io::Result<()>
|
||||
where P: PowersOfTauParameters
|
||||
{
|
||||
let mut position = match accumulator_was_compressed {
|
||||
UseCompression::Yes => {
|
||||
P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
P::ACCUMULATOR_BYTE_SIZE
|
||||
}
|
||||
};
|
||||
|
||||
(&mut output_map[position..]).write(&self.tau_g1.0.into_uncompressed().as_ref())?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
(&mut output_map[position..]).write(&self.tau_g1.1.into_uncompressed().as_ref())?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
(&mut output_map[position..]).write(&self.alpha_g1.0.into_uncompressed().as_ref())?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
(&mut output_map[position..]).write(&self.alpha_g1.1.into_uncompressed().as_ref())?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
(&mut output_map[position..]).write(&self.beta_g1.0.into_uncompressed().as_ref())?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
(&mut output_map[position..]).write(&self.beta_g1.1.into_uncompressed().as_ref())?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
(&mut output_map[position..]).write(&self.tau_g2.into_uncompressed().as_ref())?;
|
||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
(&mut output_map[position..]).write(&self.alpha_g2.into_uncompressed().as_ref())?;
|
||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
(&mut output_map[position..]).write(&self.beta_g2.into_uncompressed().as_ref())?;
|
||||
|
||||
output_map.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deserialize the public key. Points are always in uncompressed form, and
|
||||
/// always checked, since there aren't very many of them. Does not allow any
|
||||
/// points at infinity.
|
||||
pub fn read<P>(
|
||||
input_map: &Mmap,
|
||||
accumulator_was_compressed: UseCompression
|
||||
) -> Result<Self, DeserializationError>
|
||||
where P: PowersOfTauParameters
|
||||
{
|
||||
fn read_uncompressed<EE: Engine, C: CurveAffine<Engine = EE, Scalar = EE::Fr>>(input_map: &Mmap, position: usize) -> Result<C, DeserializationError> {
|
||||
let mut repr = C::Uncompressed::empty();
|
||||
let element_size = C::Uncompressed::size();
|
||||
let memory_slice = input_map.get(position..position+element_size).expect("must read point data from file");
|
||||
memory_slice.clone().read_exact(repr.as_mut())?;
|
||||
let v = repr.into_affine()?;
|
||||
|
||||
if v.is_zero() {
|
||||
Err(DeserializationError::PointAtInfinity)
|
||||
} else {
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
let mut position = match accumulator_was_compressed {
|
||||
UseCompression::Yes => {
|
||||
P::CONTRIBUTION_BYTE_SIZE - P::PUBLIC_KEY_SIZE
|
||||
},
|
||||
UseCompression::No => {
|
||||
P::ACCUMULATOR_BYTE_SIZE
|
||||
}
|
||||
};
|
||||
|
||||
let tau_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
let tau_g1_s_tau = read_uncompressed::<E, _>(input_map, position)?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
let alpha_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
let alpha_g1_s_alpha = read_uncompressed::<E, _>(input_map, position)?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
let beta_g1_s = read_uncompressed::<E, _>(input_map, position)?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
let beta_g1_s_beta = read_uncompressed::<E, _>(input_map, position)?;
|
||||
position += P::G1_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
let tau_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
let alpha_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
||||
position += P::G2_UNCOMPRESSED_BYTE_SIZE;
|
||||
|
||||
let beta_g2 = read_uncompressed::<E, _>(input_map, position)?;
|
||||
|
||||
Ok(PublicKey {
|
||||
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
||||
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
||||
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
||||
tau_g2: tau_g2,
|
||||
alpha_g2: alpha_g2,
|
||||
beta_g2: beta_g2
|
||||
})
|
||||
}
|
||||
}
|
869
src/lib.rs
869
src/lib.rs
@ -1,859 +1,10 @@
|
||||
//! This ceremony constructs the "powers of tau" for Jens Groth's 2016 zk-SNARK proving
|
||||
//! system using the BLS12-381 pairing-friendly elliptic curve construction.
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! Participants of the ceremony receive a "challenge" file containing:
|
||||
//!
|
||||
//! * the BLAKE2b hash of the last file entered into the transcript
|
||||
//! * an `Accumulator` (with curve points encoded in uncompressed form for fast deserialization)
|
||||
//!
|
||||
//! The participant runs a tool which generates a random keypair (`PublicKey`, `PrivateKey`)
|
||||
//! used for modifying the `Accumulator` from the "challenge" file. The keypair is then used to
|
||||
//! transform the `Accumulator`, and a "response" file is generated containing:
|
||||
//!
|
||||
//! * the BLAKE2b hash of the "challenge" file (thus forming a hash chain over the entire transcript)
|
||||
//! * an `Accumulator` (with curve points encoded in compressed form for fast uploading)
|
||||
//! * the `PublicKey`
|
||||
//!
|
||||
//! This "challenge" file is entered into the protocol transcript. A given transcript is valid
|
||||
//! if the transformations between consecutive `Accumulator`s verify with their respective
|
||||
//! `PublicKey`s. Participants (and the public) can ensure that their contribution to the
|
||||
//! `Accumulator` was accepted by ensuring the transcript contains their "response" file, ideally
|
||||
//! by comparison of the BLAKE2b hash of the "response" file.
|
||||
//!
|
||||
//! After some time has elapsed for participants to contribute to the ceremony, a participant is
|
||||
//! simulated with a randomness beacon. The resulting `Accumulator` contains partial zk-SNARK
|
||||
//! public parameters for all circuits within a bounded size.
|
||||
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
|
||||
use byteorder::{ReadBytesExt, BigEndian};
|
||||
use rand::{SeedableRng, Rng, Rand};
|
||||
use rand::chacha::ChaChaRng;
|
||||
use pairing::bls12_381::*;
|
||||
use pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use generic_array::GenericArray;
|
||||
use typenum::consts::U64;
|
||||
use blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
// This ceremony is based on the BLS12-381 elliptic curve construction.
|
||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 96;
|
||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 192;
|
||||
const G1_COMPRESSED_BYTE_SIZE: usize = 48;
|
||||
const G2_COMPRESSED_BYTE_SIZE: usize = 96;
|
||||
|
||||
/// The accumulator supports circuits with 2^21 multiplication gates.
|
||||
const TAU_POWERS_LENGTH: usize = (1 << 21);
|
||||
|
||||
/// More tau powers are needed in G1 because the Groth16 H query
|
||||
/// includes terms of the form tau^i * (tau^m - 1) = tau^(i+m) - tau^i
|
||||
/// where the largest i = m - 2, requiring the computation of tau^(2m - 2)
|
||||
/// and thus giving us a vector length of 2^22 - 1.
|
||||
const TAU_POWERS_G1_LENGTH: usize = (TAU_POWERS_LENGTH << 1) - 1;
|
||||
|
||||
/// The size of the accumulator on disk.
|
||||
pub const ACCUMULATOR_BYTE_SIZE: usize = (TAU_POWERS_G1_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) + // g1 tau powers
|
||||
(TAU_POWERS_LENGTH * G2_UNCOMPRESSED_BYTE_SIZE) + // g2 tau powers
|
||||
(TAU_POWERS_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) + // alpha tau powers
|
||||
(TAU_POWERS_LENGTH * G1_UNCOMPRESSED_BYTE_SIZE) // beta tau powers
|
||||
+ G2_UNCOMPRESSED_BYTE_SIZE // beta in g2
|
||||
+ 64; // blake2b hash of previous contribution
|
||||
|
||||
/// The "public key" is used to verify a contribution was correctly
|
||||
/// computed.
|
||||
pub const PUBLIC_KEY_SIZE: usize = 3 * G2_UNCOMPRESSED_BYTE_SIZE + // tau, alpha, and beta in g2
|
||||
6 * G1_UNCOMPRESSED_BYTE_SIZE; // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1
|
||||
|
||||
/// The size of the contribution on disk.
|
||||
pub const CONTRIBUTION_BYTE_SIZE: usize = (TAU_POWERS_G1_LENGTH * G1_COMPRESSED_BYTE_SIZE) + // g1 tau powers
|
||||
(TAU_POWERS_LENGTH * G2_COMPRESSED_BYTE_SIZE) + // g2 tau powers
|
||||
(TAU_POWERS_LENGTH * G1_COMPRESSED_BYTE_SIZE) + // alpha tau powers
|
||||
(TAU_POWERS_LENGTH * G1_COMPRESSED_BYTE_SIZE) // beta tau powers
|
||||
+ G2_COMPRESSED_BYTE_SIZE // beta in g2
|
||||
+ 64 // blake2b hash of input accumulator
|
||||
+ PUBLIC_KEY_SIZE; // public key
|
||||
|
||||
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
|
||||
/// than 32 bytes.
|
||||
fn hash_to_g2(mut digest: &[u8]) -> G2
|
||||
{
|
||||
assert!(digest.len() >= 32);
|
||||
|
||||
let mut seed = Vec::with_capacity(8);
|
||||
|
||||
for _ in 0..8 {
|
||||
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
|
||||
}
|
||||
|
||||
ChaChaRng::from_seed(&seed).gen()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_to_g2() {
|
||||
assert!(
|
||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])
|
||||
==
|
||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34])
|
||||
);
|
||||
|
||||
assert!(
|
||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])
|
||||
!=
|
||||
hash_to_g2(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33])
|
||||
);
|
||||
}
|
||||
|
||||
/// Contains terms of the form (s<sub>1</sub>, s<sub>1</sub><sup>x</sup>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)
|
||||
/// for all x in τ, α and β, and some s chosen randomly by its creator. The function H "hashes into" the group G2. No points in the public key may be the identity.
|
||||
///
|
||||
/// The elements in G2 are used to verify transformations of the accumulator. By its nature, the public key proves
|
||||
/// knowledge of τ, α and β.
|
||||
///
|
||||
/// It is necessary to verify `same_ratio`((s<sub>1</sub>, s<sub>1</sub><sup>x</sup>), (H(s<sub>1</sub><sup>x</sup>)<sub>2</sub>, H(s<sub>1</sub><sup>x</sup>)<sub>2</sub><sup>x</sup>)).
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub struct PublicKey {
|
||||
tau_g1: (G1Affine, G1Affine),
|
||||
alpha_g1: (G1Affine, G1Affine),
|
||||
beta_g1: (G1Affine, G1Affine),
|
||||
tau_g2: G2Affine,
|
||||
alpha_g2: G2Affine,
|
||||
beta_g2: G2Affine
|
||||
}
|
||||
|
||||
/// Contains the secrets τ, α and β that the participant of the ceremony must destroy.
|
||||
pub struct PrivateKey {
|
||||
tau: Fr,
|
||||
alpha: Fr,
|
||||
beta: Fr
|
||||
}
|
||||
|
||||
/// Constructs a keypair given an RNG and a 64-byte transcript `digest`.
|
||||
pub fn keypair<R: Rng>(rng: &mut R, digest: &[u8]) -> (PublicKey, PrivateKey)
|
||||
{
|
||||
assert_eq!(digest.len(), 64);
|
||||
|
||||
let tau = Fr::rand(rng);
|
||||
let alpha = Fr::rand(rng);
|
||||
let beta = Fr::rand(rng);
|
||||
|
||||
let mut op = |x, personalization: u8| {
|
||||
// Sample random g^s
|
||||
let g1_s = G1::rand(rng).into_affine();
|
||||
// Compute g^{s*x}
|
||||
let g1_s_x = g1_s.mul(x).into_affine();
|
||||
// Compute BLAKE2b(personalization | transcript | g^s | g^{s*x})
|
||||
let h = {
|
||||
let mut h = Blake2b::default();
|
||||
h.input(&[personalization]);
|
||||
h.input(digest);
|
||||
h.input(g1_s.into_uncompressed().as_ref());
|
||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
||||
h.result()
|
||||
};
|
||||
// Hash into G2 as g^{s'}
|
||||
let g2_s = hash_to_g2(h.as_ref()).into_affine();
|
||||
// Compute g^{s'*x}
|
||||
let g2_s_x = g2_s.mul(x).into_affine();
|
||||
|
||||
((g1_s, g1_s_x), g2_s_x)
|
||||
};
|
||||
|
||||
let pk_tau = op(tau, 0);
|
||||
let pk_alpha = op(alpha, 1);
|
||||
let pk_beta = op(beta, 2);
|
||||
|
||||
(
|
||||
PublicKey {
|
||||
tau_g1: pk_tau.0,
|
||||
alpha_g1: pk_alpha.0,
|
||||
beta_g1: pk_beta.0,
|
||||
tau_g2: pk_tau.1,
|
||||
alpha_g2: pk_alpha.1,
|
||||
beta_g2: pk_beta.1,
|
||||
},
|
||||
PrivateKey {
|
||||
tau: tau,
|
||||
alpha: alpha,
|
||||
beta: beta
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/// Determines if point compression should be used.
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum UseCompression {
|
||||
Yes,
|
||||
No
|
||||
}
|
||||
|
||||
/// Determines if points should be checked for correctness during deserialization.
|
||||
/// This is not necessary for participants, because a transcript verifier can
|
||||
/// check this theirself.
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum CheckForCorrectness {
|
||||
Yes,
|
||||
No
|
||||
}
|
||||
|
||||
fn write_point<W, G>(
|
||||
writer: &mut W,
|
||||
p: &G,
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
where W: Write,
|
||||
G: CurveAffine
|
||||
{
|
||||
match compression {
|
||||
UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()),
|
||||
UseCompression::No => writer.write_all(p.into_uncompressed().as_ref()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors that might occur during deserialization.
|
||||
#[derive(Debug)]
|
||||
pub enum DeserializationError {
|
||||
IoError(io::Error),
|
||||
DecodingError(GroupDecodingError),
|
||||
PointAtInfinity
|
||||
}
|
||||
|
||||
impl fmt::Display for DeserializationError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e),
|
||||
DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e),
|
||||
DeserializationError::PointAtInfinity => write!(f, "Point at infinity found")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for DeserializationError {
|
||||
fn from(err: io::Error) -> DeserializationError {
|
||||
DeserializationError::IoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GroupDecodingError> for DeserializationError {
|
||||
fn from(err: GroupDecodingError) -> DeserializationError {
|
||||
DeserializationError::DecodingError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Serialize the public key. Points are always in uncompressed form.
|
||||
pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()>
|
||||
{
|
||||
write_point(writer, &self.tau_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.tau_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.alpha_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.alpha_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.beta_g1.0, UseCompression::No)?;
|
||||
write_point(writer, &self.beta_g1.1, UseCompression::No)?;
|
||||
|
||||
write_point(writer, &self.tau_g2, UseCompression::No)?;
|
||||
write_point(writer, &self.alpha_g2, UseCompression::No)?;
|
||||
write_point(writer, &self.beta_g2, UseCompression::No)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deserialize the public key. Points are always in uncompressed form, and
|
||||
/// always checked, since there aren't very many of them. Does not allow any
|
||||
/// points at infinity.
|
||||
pub fn deserialize<R: Read>(reader: &mut R) -> Result<PublicKey, DeserializationError>
|
||||
{
|
||||
fn read_uncompressed<C: CurveAffine, R: Read>(reader: &mut R) -> Result<C, DeserializationError> {
|
||||
let mut repr = C::Uncompressed::empty();
|
||||
reader.read_exact(repr.as_mut())?;
|
||||
let v = repr.into_affine()?;
|
||||
|
||||
if v.is_zero() {
|
||||
Err(DeserializationError::PointAtInfinity)
|
||||
} else {
|
||||
Ok(v)
|
||||
}
|
||||
}
|
||||
|
||||
let tau_g1_s = read_uncompressed(reader)?;
|
||||
let tau_g1_s_tau = read_uncompressed(reader)?;
|
||||
|
||||
let alpha_g1_s = read_uncompressed(reader)?;
|
||||
let alpha_g1_s_alpha = read_uncompressed(reader)?;
|
||||
|
||||
let beta_g1_s = read_uncompressed(reader)?;
|
||||
let beta_g1_s_beta = read_uncompressed(reader)?;
|
||||
|
||||
let tau_g2 = read_uncompressed(reader)?;
|
||||
let alpha_g2 = read_uncompressed(reader)?;
|
||||
let beta_g2 = read_uncompressed(reader)?;
|
||||
|
||||
Ok(PublicKey {
|
||||
tau_g1: (tau_g1_s, tau_g1_s_tau),
|
||||
alpha_g1: (alpha_g1_s, alpha_g1_s_alpha),
|
||||
beta_g1: (beta_g1_s, beta_g1_s_beta),
|
||||
tau_g2: tau_g2,
|
||||
alpha_g2: alpha_g2,
|
||||
beta_g2: beta_g2
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pubkey_serialization() {
|
||||
use rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
let digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||
let (pk, _) = keypair(rng, &digest);
|
||||
let mut v = vec![];
|
||||
pk.serialize(&mut v).unwrap();
|
||||
assert_eq!(v.len(), PUBLIC_KEY_SIZE);
|
||||
let deserialized = PublicKey::deserialize(&mut &v[..]).unwrap();
|
||||
assert!(pk == deserialized);
|
||||
}
|
||||
|
||||
/// The `Accumulator` is an object that participants of the ceremony contribute
|
||||
/// randomness to. This object contains powers of trapdoor `tau` in G1 and in G2 over
|
||||
/// fixed generators, and additionally in G1 over two other generators of exponents
|
||||
/// `alpha` and `beta` over those fixed generators. In other words:
|
||||
///
|
||||
/// * (τ, τ<sup>2</sup>, ..., τ<sup>2<sup>22</sup> - 2</sup>, α, ατ, ατ<sup>2</sup>, ..., ατ<sup>2<sup>21</sup> - 1</sup>, β, βτ, βτ<sup>2</sup>, ..., βτ<sup>2<sup>21</sup> - 1</sup>)<sub>1</sub>
|
||||
/// * (β, τ, τ<sup>2</sup>, ..., τ<sup>2<sup>21</sup> - 1</sup>)<sub>2</sub>
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct Accumulator {
|
||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_G1_LENGTH - 1}
|
||||
pub tau_powers_g1: Vec<G1Affine>,
|
||||
/// tau^0, tau^1, tau^2, ..., tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub tau_powers_g2: Vec<G2Affine>,
|
||||
/// alpha * tau^0, alpha * tau^1, alpha * tau^2, ..., alpha * tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub alpha_tau_powers_g1: Vec<G1Affine>,
|
||||
/// beta * tau^0, beta * tau^1, beta * tau^2, ..., beta * tau^{TAU_POWERS_LENGTH - 1}
|
||||
pub beta_tau_powers_g1: Vec<G1Affine>,
|
||||
/// beta
|
||||
pub beta_g2: G2Affine
|
||||
}
|
||||
|
||||
impl Accumulator {
|
||||
/// Constructs an "initial" accumulator with τ = 1, α = 1, β = 1.
|
||||
pub fn new() -> Self {
|
||||
Accumulator {
|
||||
tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_G1_LENGTH],
|
||||
tau_powers_g2: vec![G2Affine::one(); TAU_POWERS_LENGTH],
|
||||
alpha_tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_LENGTH],
|
||||
beta_tau_powers_g1: vec![G1Affine::one(); TAU_POWERS_LENGTH],
|
||||
beta_g2: G2Affine::one()
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the accumulator with some compression behavior.
|
||||
pub fn serialize<W: Write>(
|
||||
&self,
|
||||
writer: &mut W,
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
{
|
||||
fn write_all<W: Write, C: CurveAffine>(
|
||||
writer: &mut W,
|
||||
c: &[C],
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
{
|
||||
for c in c {
|
||||
write_point(writer, c, compression)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
write_all(writer, &self.tau_powers_g1, compression)?;
|
||||
write_all(writer, &self.tau_powers_g2, compression)?;
|
||||
write_all(writer, &self.alpha_tau_powers_g1, compression)?;
|
||||
write_all(writer, &self.beta_tau_powers_g1, compression)?;
|
||||
write_all(writer, &[self.beta_g2], compression)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the accumulator from disk with some compression behavior. `checked`
|
||||
/// indicates whether we should check it's a valid element of the group and
|
||||
/// not the point at infinity.
|
||||
pub fn deserialize<R: Read>(
|
||||
reader: &mut R,
|
||||
compression: UseCompression,
|
||||
checked: CheckForCorrectness
|
||||
) -> Result<Self, DeserializationError>
|
||||
{
|
||||
fn read_all<R: Read, C: CurveAffine>(
|
||||
reader: &mut R,
|
||||
size: usize,
|
||||
compression: UseCompression,
|
||||
checked: CheckForCorrectness
|
||||
) -> Result<Vec<C>, DeserializationError>
|
||||
{
|
||||
fn decompress_all<R: Read, E: EncodedPoint>(
|
||||
reader: &mut R,
|
||||
size: usize,
|
||||
checked: CheckForCorrectness
|
||||
) -> Result<Vec<E::Affine>, DeserializationError>
|
||||
{
|
||||
// Read the encoded elements
|
||||
let mut res = vec![E::empty(); size];
|
||||
|
||||
for encoded in &mut res {
|
||||
reader.read_exact(encoded.as_mut())?;
|
||||
}
|
||||
|
||||
// Allocate space for the deserialized elements
|
||||
let mut res_affine = vec![E::Affine::zero(); size];
|
||||
|
||||
let mut chunk_size = res.len() / num_cpus::get();
|
||||
if chunk_size == 0 {
|
||||
chunk_size = 1;
|
||||
}
|
||||
|
||||
// If any of our threads encounter a deserialization/IO error, catch
|
||||
// it with this.
|
||||
let decoding_error = Arc::new(Mutex::new(None));
|
||||
|
||||
crossbeam::scope(|scope| {
|
||||
for (source, target) in res.chunks(chunk_size).zip(res_affine.chunks_mut(chunk_size)) {
|
||||
let decoding_error = decoding_error.clone();
|
||||
|
||||
scope.spawn(move || {
|
||||
for (source, target) in source.iter().zip(target.iter_mut()) {
|
||||
match {
|
||||
// If we're a participant, we don't need to check all of the
|
||||
// elements in the accumulator, which saves a lot of time.
|
||||
// The hash chain prevents this from being a problem: the
|
||||
// transcript guarantees that the accumulator was properly
|
||||
// formed.
|
||||
match checked {
|
||||
CheckForCorrectness::Yes => {
|
||||
// Points at infinity are never expected in the accumulator
|
||||
source.into_affine().map_err(|e| e.into()).and_then(|source| {
|
||||
if source.is_zero() {
|
||||
Err(DeserializationError::PointAtInfinity)
|
||||
} else {
|
||||
Ok(source)
|
||||
}
|
||||
})
|
||||
},
|
||||
CheckForCorrectness::No => source.into_affine_unchecked().map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
{
|
||||
Ok(source) => {
|
||||
*target = source;
|
||||
},
|
||||
Err(e) => {
|
||||
*decoding_error.lock().unwrap() = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
match Arc::try_unwrap(decoding_error).unwrap().into_inner().unwrap() {
|
||||
Some(e) => {
|
||||
Err(e)
|
||||
},
|
||||
None => {
|
||||
Ok(res_affine)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match compression {
|
||||
UseCompression::Yes => decompress_all::<_, C::Compressed>(reader, size, checked),
|
||||
UseCompression::No => decompress_all::<_, C::Uncompressed>(reader, size, checked)
|
||||
}
|
||||
}
|
||||
|
||||
let tau_powers_g1 = read_all(reader, TAU_POWERS_G1_LENGTH, compression, checked)?;
|
||||
let tau_powers_g2 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let alpha_tau_powers_g1 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let beta_tau_powers_g1 = read_all(reader, TAU_POWERS_LENGTH, compression, checked)?;
|
||||
let beta_g2 = read_all(reader, 1, compression, checked)?[0];
|
||||
|
||||
Ok(Accumulator {
|
||||
tau_powers_g1: tau_powers_g1,
|
||||
tau_powers_g2: tau_powers_g2,
|
||||
alpha_tau_powers_g1: alpha_tau_powers_g1,
|
||||
beta_tau_powers_g1: beta_tau_powers_g1,
|
||||
beta_g2: beta_g2
|
||||
})
|
||||
}
|
||||
|
||||
/// Transforms the accumulator with a private key.
|
||||
pub fn transform(&mut self, key: &PrivateKey)
|
||||
{
|
||||
// Construct the powers of tau
|
||||
let mut taupowers = vec![Fr::zero(); TAU_POWERS_G1_LENGTH];
|
||||
let chunk_size = TAU_POWERS_G1_LENGTH / num_cpus::get();
|
||||
|
||||
// Construct exponents in parallel
|
||||
crossbeam::scope(|scope| {
|
||||
for (i, taupowers) in taupowers.chunks_mut(chunk_size).enumerate() {
|
||||
scope.spawn(move || {
|
||||
let mut acc = key.tau.pow(&[(i * chunk_size) as u64]);
|
||||
|
||||
for t in taupowers {
|
||||
*t = acc;
|
||||
acc.mul_assign(&key.tau);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
/// Exponentiate a large number of points, with an optional coefficient to be applied to the
|
||||
/// exponent.
|
||||
fn batch_exp<C: CurveAffine>(bases: &mut [C], exp: &[C::Scalar], coeff: Option<&C::Scalar>) {
|
||||
assert_eq!(bases.len(), exp.len());
|
||||
let mut projective = vec![C::Projective::zero(); bases.len()];
|
||||
let chunk_size = bases.len() / num_cpus::get();
|
||||
|
||||
// Perform wNAF over multiple cores, placing results into `projective`.
|
||||
crossbeam::scope(|scope| {
|
||||
for ((bases, exp), projective) in bases.chunks_mut(chunk_size)
|
||||
.zip(exp.chunks(chunk_size))
|
||||
.zip(projective.chunks_mut(chunk_size))
|
||||
{
|
||||
scope.spawn(move || {
|
||||
let mut wnaf = Wnaf::new();
|
||||
|
||||
for ((base, exp), projective) in bases.iter_mut()
|
||||
.zip(exp.iter())
|
||||
.zip(projective.iter_mut())
|
||||
{
|
||||
let mut exp = *exp;
|
||||
if let Some(coeff) = coeff {
|
||||
exp.mul_assign(coeff);
|
||||
}
|
||||
|
||||
*projective = wnaf.base(base.into_projective(), 1).scalar(exp.into_repr());
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Perform batch normalization
|
||||
crossbeam::scope(|scope| {
|
||||
for projective in projective.chunks_mut(chunk_size)
|
||||
{
|
||||
scope.spawn(move || {
|
||||
C::Projective::batch_normalization(projective);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Turn it all back into affine points
|
||||
for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
|
||||
*affine = projective.into_affine();
|
||||
}
|
||||
}
|
||||
|
||||
batch_exp(&mut self.tau_powers_g1, &taupowers[0..], None);
|
||||
batch_exp(&mut self.tau_powers_g2, &taupowers[0..TAU_POWERS_LENGTH], None);
|
||||
batch_exp(&mut self.alpha_tau_powers_g1, &taupowers[0..TAU_POWERS_LENGTH], Some(&key.alpha));
|
||||
batch_exp(&mut self.beta_tau_powers_g1, &taupowers[0..TAU_POWERS_LENGTH], Some(&key.beta));
|
||||
self.beta_g2 = self.beta_g2.mul(key.beta).into_affine();
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies a transformation of the `Accumulator` with the `PublicKey`, given a 64-byte transcript `digest`.
|
||||
pub fn verify_transform(before: &Accumulator, after: &Accumulator, key: &PublicKey, digest: &[u8]) -> bool
|
||||
{
|
||||
assert_eq!(digest.len(), 64);
|
||||
|
||||
let compute_g2_s = |g1_s: G1Affine, g1_s_x: G1Affine, personalization: u8| {
|
||||
let mut h = Blake2b::default();
|
||||
h.input(&[personalization]);
|
||||
h.input(digest);
|
||||
h.input(g1_s.into_uncompressed().as_ref());
|
||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
||||
hash_to_g2(h.result().as_ref()).into_affine()
|
||||
};
|
||||
|
||||
let tau_g2_s = compute_g2_s(key.tau_g1.0, key.tau_g1.1, 0);
|
||||
let alpha_g2_s = compute_g2_s(key.alpha_g1.0, key.alpha_g1.1, 1);
|
||||
let beta_g2_s = compute_g2_s(key.beta_g1.0, key.beta_g1.1, 2);
|
||||
|
||||
// Check the proofs-of-knowledge for tau/alpha/beta
|
||||
if !same_ratio(key.tau_g1, (tau_g2_s, key.tau_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(key.alpha_g1, (alpha_g2_s, key.alpha_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(key.beta_g1, (beta_g2_s, key.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check the correctness of the generators for tau powers
|
||||
if after.tau_powers_g1[0] != G1Affine::one() {
|
||||
return false;
|
||||
}
|
||||
if after.tau_powers_g2[0] != G2Affine::one() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous tau by the new one?
|
||||
if !same_ratio((before.tau_powers_g1[1], after.tau_powers_g1[1]), (tau_g2_s, key.tau_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous alpha by the new one?
|
||||
if !same_ratio((before.alpha_tau_powers_g1[0], after.alpha_tau_powers_g1[0]), (alpha_g2_s, key.alpha_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Did the participant multiply the previous beta by the new one?
|
||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (beta_g2_s, key.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio((before.beta_tau_powers_g1[0], after.beta_tau_powers_g1[0]), (before.beta_g2, after.beta_g2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Are the powers of tau correct?
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.tau_powers_g2), (after.tau_powers_g1[0], after.tau_powers_g1[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.alpha_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
if !same_ratio(power_pairs(&after.beta_tau_powers_g1), (after.tau_powers_g2[0], after.tau_powers_g2[1])) {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Computes a random linear combination over v1/v2.
|
||||
///
|
||||
/// Checking that many pairs of elements are exponentiated by
|
||||
/// the same `x` can be achieved (with high probability) with
|
||||
/// the following technique:
|
||||
///
|
||||
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
|
||||
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
|
||||
/// random r1, r2, r3. Given (g, g^s)...
|
||||
///
|
||||
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
|
||||
///
|
||||
/// ... with high probability.
|
||||
fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G)
|
||||
{
|
||||
use std::sync::{Arc, Mutex};
|
||||
use rand::{thread_rng};
|
||||
|
||||
assert_eq!(v1.len(), v2.len());
|
||||
|
||||
let chunk = (v1.len() / num_cpus::get()) + 1;
|
||||
|
||||
let s = Arc::new(Mutex::new(G::Projective::zero()));
|
||||
let sx = Arc::new(Mutex::new(G::Projective::zero()));
|
||||
|
||||
crossbeam::scope(|scope| {
|
||||
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
|
||||
let s = s.clone();
|
||||
let sx = sx.clone();
|
||||
|
||||
scope.spawn(move || {
|
||||
// We do not need to be overly cautious of the RNG
|
||||
// used for this check.
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let mut wnaf = Wnaf::new();
|
||||
let mut local_s = G::Projective::zero();
|
||||
let mut local_sx = G::Projective::zero();
|
||||
|
||||
for (v1, v2) in v1.iter().zip(v2.iter()) {
|
||||
let rho = G::Scalar::rand(rng);
|
||||
let mut wnaf = wnaf.scalar(rho.into_repr());
|
||||
let v1 = wnaf.base(v1.into_projective());
|
||||
let v2 = wnaf.base(v2.into_projective());
|
||||
|
||||
local_s.add_assign(&v1);
|
||||
local_sx.add_assign(&v2);
|
||||
}
|
||||
|
||||
s.lock().unwrap().add_assign(&local_s);
|
||||
sx.lock().unwrap().add_assign(&local_sx);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
let s = s.lock().unwrap().into_affine();
|
||||
let sx = sx.lock().unwrap().into_affine();
|
||||
|
||||
(s, sx)
|
||||
}
|
||||
|
||||
/// Construct a single pair (s, s^x) for a vector of
|
||||
/// the form [1, x, x^2, x^3, ...].
|
||||
fn power_pairs<G: CurveAffine>(v: &[G]) -> (G, G)
|
||||
{
|
||||
merge_pairs(&v[0..(v.len()-1)], &v[1..])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_power_pairs() {
|
||||
use rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let mut v = vec![];
|
||||
let x = Fr::rand(rng);
|
||||
let mut acc = Fr::one();
|
||||
for _ in 0..100 {
|
||||
v.push(G1Affine::one().mul(acc).into_affine());
|
||||
acc.mul_assign(&x);
|
||||
}
|
||||
|
||||
let gx = G2Affine::one().mul(x).into_affine();
|
||||
|
||||
assert!(same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||
|
||||
v[1] = v[1].mul(Fr::rand(rng)).into_affine();
|
||||
|
||||
assert!(!same_ratio(power_pairs(&v), (G2Affine::one(), gx)));
|
||||
}
|
||||
|
||||
/// Checks if pairs have the same ratio.
|
||||
fn same_ratio<G1: CurveAffine>(
|
||||
g1: (G1, G1),
|
||||
g2: (G1::Pair, G1::Pair)
|
||||
) -> bool
|
||||
{
|
||||
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_same_ratio() {
|
||||
use rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let s = Fr::rand(rng);
|
||||
let g1 = G1Affine::one();
|
||||
let g2 = G2Affine::one();
|
||||
let g1_s = g1.mul(s).into_affine();
|
||||
let g2_s = g2.mul(s).into_affine();
|
||||
|
||||
assert!(same_ratio((g1, g1_s), (g2, g2_s)));
|
||||
assert!(!same_ratio((g1_s, g1), (g2, g2_s)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_accumulator_serialization() {
|
||||
use rand::thread_rng;
|
||||
|
||||
let rng = &mut thread_rng();
|
||||
let mut digest = (0..64).map(|_| rng.gen()).collect::<Vec<_>>();
|
||||
|
||||
let mut acc = Accumulator::new();
|
||||
let before = acc.clone();
|
||||
let (pk, sk) = keypair(rng, &digest);
|
||||
acc.transform(&sk);
|
||||
assert!(verify_transform(&before, &acc, &pk, &digest));
|
||||
digest[0] = !digest[0];
|
||||
assert!(!verify_transform(&before, &acc, &pk, &digest));
|
||||
let mut v = Vec::with_capacity(ACCUMULATOR_BYTE_SIZE - 64);
|
||||
acc.serialize(&mut v, UseCompression::No).unwrap();
|
||||
assert_eq!(v.len(), ACCUMULATOR_BYTE_SIZE - 64);
|
||||
let deserialized = Accumulator::deserialize(&mut &v[..], UseCompression::No, CheckForCorrectness::No).unwrap();
|
||||
assert!(acc == deserialized);
|
||||
}
|
||||
|
||||
/// Compute BLAKE2b("")
|
||||
pub fn blank_hash() -> GenericArray<u8, U64> {
|
||||
Blake2b::new().result()
|
||||
}
|
||||
|
||||
/// Abstraction over a reader which hashes the data being read.
|
||||
pub struct HashReader<R: Read> {
|
||||
reader: R,
|
||||
hasher: Blake2b
|
||||
}
|
||||
|
||||
impl<R: Read> HashReader<R> {
|
||||
/// Construct a new `HashReader` given an existing `reader` by value.
|
||||
pub fn new(reader: R) -> Self {
|
||||
HashReader {
|
||||
reader: reader,
|
||||
hasher: Blake2b::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Destroy this reader and return the hash of what was read.
|
||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
||||
self.hasher.result()
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Read for HashReader<R> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let bytes = self.reader.read(buf)?;
|
||||
|
||||
if bytes > 0 {
|
||||
self.hasher.input(&buf[0..bytes]);
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Abstraction over a writer which hashes the data being written.
|
||||
pub struct HashWriter<W: Write> {
|
||||
writer: W,
|
||||
hasher: Blake2b
|
||||
}
|
||||
|
||||
impl<W: Write> HashWriter<W> {
|
||||
/// Construct a new `HashWriter` given an existing `writer` by value.
|
||||
pub fn new(writer: W) -> Self {
|
||||
HashWriter {
|
||||
writer: writer,
|
||||
hasher: Blake2b::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Destroy this writer and return the hash of what was written.
|
||||
pub fn into_hash(self) -> GenericArray<u8, U64> {
|
||||
self.hasher.result()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Write for HashWriter<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let bytes = self.writer.write(buf)?;
|
||||
|
||||
if bytes > 0 {
|
||||
self.hasher.input(&buf[0..bytes]);
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.writer.flush()
|
||||
}
|
||||
}
|
||||
#![allow(unused_imports)]
|
||||
|
||||
// pub mod bls12_381;
|
||||
pub mod bn256;
|
||||
pub mod small_bn256;
|
||||
pub mod accumulator;
|
||||
pub mod batched_accumulator;
|
||||
pub mod keypair;
|
||||
pub mod parameters;
|
||||
pub mod utils;
|
118
src/parameters.rs
Normal file
118
src/parameters.rs
Normal file
@ -0,0 +1,118 @@
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
extern crate ff;
|
||||
|
||||
use self::ff::{Field, PrimeField};
|
||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
||||
use self::rand::{SeedableRng, Rng, Rand};
|
||||
use self::rand::chacha::ChaChaRng;
|
||||
use self::pairing::bn256::{Bn256};
|
||||
use self::pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use self::generic_array::GenericArray;
|
||||
use self::typenum::consts::U64;
|
||||
use self::blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
use super::keypair::*;
|
||||
|
||||
pub trait PowersOfTauParameters: Clone {
|
||||
const REQUIRED_POWER: usize;
|
||||
|
||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize;
|
||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize;
|
||||
const G1_COMPRESSED_BYTE_SIZE: usize;
|
||||
const G2_COMPRESSED_BYTE_SIZE: usize;
|
||||
|
||||
const TAU_POWERS_LENGTH: usize = (1 << Self::REQUIRED_POWER);
|
||||
|
||||
const TAU_POWERS_G1_LENGTH: usize = (Self::TAU_POWERS_LENGTH << 1) - 1;
|
||||
|
||||
const ACCUMULATOR_BYTE_SIZE: usize = (Self::TAU_POWERS_G1_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) + // g1 tau powers
|
||||
(Self::TAU_POWERS_LENGTH * Self::G2_UNCOMPRESSED_BYTE_SIZE) + // g2 tau powers
|
||||
(Self::TAU_POWERS_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) + // alpha tau powers
|
||||
(Self::TAU_POWERS_LENGTH * Self::G1_UNCOMPRESSED_BYTE_SIZE) // beta tau powers
|
||||
+ Self::G2_UNCOMPRESSED_BYTE_SIZE // beta in g2
|
||||
+ Self::HASH_SIZE; // blake2b hash of previous contribution
|
||||
|
||||
const PUBLIC_KEY_SIZE: usize = 3 * Self::G2_UNCOMPRESSED_BYTE_SIZE + // tau, alpha, and beta in g2
|
||||
6 * Self::G1_UNCOMPRESSED_BYTE_SIZE; // (s1, s1*tau), (s2, s2*alpha), (s3, s3*beta) in g1
|
||||
|
||||
const CONTRIBUTION_BYTE_SIZE: usize = (Self::TAU_POWERS_G1_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) + // g1 tau powers
|
||||
(Self::TAU_POWERS_LENGTH * Self::G2_COMPRESSED_BYTE_SIZE) + // g2 tau powers
|
||||
(Self::TAU_POWERS_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) + // alpha tau powers
|
||||
(Self::TAU_POWERS_LENGTH * Self::G1_COMPRESSED_BYTE_SIZE) // beta tau powers
|
||||
+ Self::G2_COMPRESSED_BYTE_SIZE // beta in g2
|
||||
+ Self::HASH_SIZE // blake2b hash of input accumulator
|
||||
+ Self::PUBLIC_KEY_SIZE; // public key
|
||||
|
||||
// Blake2b hash size
|
||||
const HASH_SIZE: usize = 64;
|
||||
|
||||
const EMPIRICAL_BATCH_SIZE: usize = 1 << 21;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Determines if point compression should be used.
|
||||
#[derive(Copy, Clone, PartialEq)]
|
||||
pub enum UseCompression {
|
||||
Yes,
|
||||
No
|
||||
}
|
||||
|
||||
/// Determines if points should be checked for correctness during deserialization.
|
||||
/// This is not necessary for participants, because a transcript verifier can
|
||||
/// check this theirself.
|
||||
#[derive(Copy, Clone, PartialEq)]
|
||||
pub enum CheckForCorrectness {
|
||||
Yes,
|
||||
No
|
||||
}
|
||||
|
||||
|
||||
/// Errors that might occur during deserialization.
|
||||
#[derive(Debug)]
|
||||
pub enum DeserializationError {
|
||||
IoError(io::Error),
|
||||
DecodingError(GroupDecodingError),
|
||||
PointAtInfinity
|
||||
}
|
||||
|
||||
impl fmt::Display for DeserializationError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
DeserializationError::IoError(ref e) => write!(f, "Disk IO error: {}", e),
|
||||
DeserializationError::DecodingError(ref e) => write!(f, "Decoding error: {}", e),
|
||||
DeserializationError::PointAtInfinity => write!(f, "Point at infinity found")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for DeserializationError {
|
||||
fn from(err: io::Error) -> DeserializationError {
|
||||
DeserializationError::IoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GroupDecodingError> for DeserializationError {
|
||||
fn from(err: GroupDecodingError) -> DeserializationError {
|
||||
DeserializationError::DecodingError(err)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub enum ElementType {
|
||||
TauG1,
|
||||
TauG2,
|
||||
AlphaG1,
|
||||
BetaG1,
|
||||
BetaG2
|
||||
}
|
41
src/small_bn256/mod.rs
Normal file
41
src/small_bn256/mod.rs
Normal file
@ -0,0 +1,41 @@
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
extern crate ff;
|
||||
|
||||
use self::ff::{Field, PrimeField};
|
||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
||||
use self::rand::{SeedableRng, Rng, Rand};
|
||||
use self::rand::chacha::ChaChaRng;
|
||||
use self::pairing::bn256::{Bn256};
|
||||
use self::pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use self::generic_array::GenericArray;
|
||||
use self::typenum::consts::U64;
|
||||
use self::blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
use crate::parameters::*;
|
||||
use crate::keypair::*;
|
||||
use crate::utils::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Bn256CeremonyParameters {
|
||||
|
||||
}
|
||||
|
||||
impl PowersOfTauParameters for Bn256CeremonyParameters {
|
||||
const REQUIRED_POWER: usize = 21; // generate to have roughly 2 million constraints
|
||||
|
||||
// This ceremony is based on the BN256 elliptic curve construction.
|
||||
const G1_UNCOMPRESSED_BYTE_SIZE: usize = 64;
|
||||
const G2_UNCOMPRESSED_BYTE_SIZE: usize = 128;
|
||||
const G1_COMPRESSED_BYTE_SIZE: usize = 32;
|
||||
const G2_COMPRESSED_BYTE_SIZE: usize = 64;
|
||||
}
|
168
src/utils.rs
Normal file
168
src/utils.rs
Normal file
@ -0,0 +1,168 @@
|
||||
extern crate pairing;
|
||||
extern crate rand;
|
||||
extern crate crossbeam;
|
||||
extern crate num_cpus;
|
||||
extern crate blake2;
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
extern crate byteorder;
|
||||
extern crate ff;
|
||||
|
||||
use self::ff::{Field, PrimeField};
|
||||
use self::byteorder::{ReadBytesExt, BigEndian};
|
||||
use self::rand::{SeedableRng, Rng, Rand};
|
||||
use self::rand::chacha::ChaChaRng;
|
||||
use self::pairing::bn256::{Bn256};
|
||||
use self::pairing::*;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use self::generic_array::GenericArray;
|
||||
use self::typenum::consts::U64;
|
||||
use self::blake2::{Blake2b, Digest};
|
||||
use std::fmt;
|
||||
|
||||
use super::parameters::*;
|
||||
|
||||
/// Hashes to G2 using the first 32 bytes of `digest`. Panics if `digest` is less
|
||||
/// than 32 bytes.
|
||||
pub fn hash_to_g2<E:Engine>(mut digest: &[u8]) -> E::G2
|
||||
{
|
||||
assert!(digest.len() >= 32);
|
||||
|
||||
let mut seed = Vec::with_capacity(8);
|
||||
|
||||
for _ in 0..8 {
|
||||
seed.push(digest.read_u32::<BigEndian>().expect("assertion above guarantees this to work"));
|
||||
}
|
||||
|
||||
ChaChaRng::from_seed(&seed).gen()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_to_g2() {
|
||||
assert!(
|
||||
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])
|
||||
==
|
||||
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34])
|
||||
);
|
||||
|
||||
assert!(
|
||||
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])
|
||||
!=
|
||||
hash_to_g2::<Bn256>(&[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33])
|
||||
);
|
||||
}
|
||||
|
||||
/// Computes a random linear combination over v1/v2.
|
||||
///
|
||||
/// Checking that many pairs of elements are exponentiated by
|
||||
/// the same `x` can be achieved (with high probability) with
|
||||
/// the following technique:
|
||||
///
|
||||
/// Given v1 = [a, b, c] and v2 = [as, bs, cs], compute
|
||||
/// (a*r1 + b*r2 + c*r3, (as)*r1 + (bs)*r2 + (cs)*r3) for some
|
||||
/// random r1, r2, r3. Given (g, g^s)...
|
||||
///
|
||||
/// e(g, (as)*r1 + (bs)*r2 + (cs)*r3) = e(g^s, a*r1 + b*r2 + c*r3)
|
||||
///
|
||||
/// ... with high probability.
|
||||
fn merge_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v1: &[G], v2: &[G]) -> (G, G)
|
||||
{
|
||||
use std::sync::{Arc, Mutex};
|
||||
use self::rand::{thread_rng};
|
||||
|
||||
assert_eq!(v1.len(), v2.len());
|
||||
|
||||
let chunk = (v1.len() / num_cpus::get()) + 1;
|
||||
|
||||
let s = Arc::new(Mutex::new(G::Projective::zero()));
|
||||
let sx = Arc::new(Mutex::new(G::Projective::zero()));
|
||||
|
||||
crossbeam::scope(|scope| {
|
||||
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
|
||||
let s = s.clone();
|
||||
let sx = sx.clone();
|
||||
|
||||
scope.spawn(move || {
|
||||
// We do not need to be overly cautious of the RNG
|
||||
// used for this check.
|
||||
let rng = &mut thread_rng();
|
||||
|
||||
let mut wnaf = Wnaf::new();
|
||||
let mut local_s = G::Projective::zero();
|
||||
let mut local_sx = G::Projective::zero();
|
||||
|
||||
for (v1, v2) in v1.iter().zip(v2.iter()) {
|
||||
let rho = G::Scalar::rand(rng);
|
||||
let mut wnaf = wnaf.scalar(rho.into_repr());
|
||||
let v1 = wnaf.base(v1.into_projective());
|
||||
let v2 = wnaf.base(v2.into_projective());
|
||||
|
||||
local_s.add_assign(&v1);
|
||||
local_sx.add_assign(&v2);
|
||||
}
|
||||
|
||||
s.lock().unwrap().add_assign(&local_s);
|
||||
sx.lock().unwrap().add_assign(&local_sx);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
let s = s.lock().unwrap().into_affine();
|
||||
let sx = sx.lock().unwrap().into_affine();
|
||||
|
||||
(s, sx)
|
||||
}
|
||||
|
||||
/// Construct a single pair (s, s^x) for a vector of
|
||||
/// the form [1, x, x^2, x^3, ...].
|
||||
pub fn power_pairs<E: Engine, G: CurveAffine<Engine = E, Scalar = E::Fr>>(v: &[G]) -> (G, G)
|
||||
{
|
||||
merge_pairs::<E, _>(&v[0..(v.len()-1)], &v[1..])
|
||||
}
|
||||
|
||||
/// Compute BLAKE2b("")
|
||||
pub fn blank_hash() -> GenericArray<u8, U64> {
|
||||
Blake2b::new().result()
|
||||
}
|
||||
|
||||
/// Checks if pairs have the same ratio.
|
||||
/// Under the hood uses pairing to check
|
||||
/// x1/x2 = y1/y2 => x1*y2 = x2*y1
|
||||
pub fn same_ratio<E: Engine, G1: CurveAffine<Engine = E, Scalar = E::Fr>>(
|
||||
g1: (G1, G1),
|
||||
g2: (G1::Pair, G1::Pair)
|
||||
) -> bool
|
||||
{
|
||||
g1.0.pairing_with(&g2.1) == g1.1.pairing_with(&g2.0)
|
||||
}
|
||||
|
||||
pub fn write_point<W, G>(
|
||||
writer: &mut W,
|
||||
p: &G,
|
||||
compression: UseCompression
|
||||
) -> io::Result<()>
|
||||
where W: Write,
|
||||
G: CurveAffine
|
||||
{
|
||||
match compression {
|
||||
UseCompression::Yes => writer.write_all(p.into_compressed().as_ref()),
|
||||
UseCompression::No => writer.write_all(p.into_uncompressed().as_ref()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compute_g2_s<E: Engine> (
|
||||
digest: &[u8],
|
||||
g1_s: &E::G1Affine,
|
||||
g1_s_x: &E::G1Affine,
|
||||
personalization: u8
|
||||
) -> E::G2Affine
|
||||
{
|
||||
let mut h = Blake2b::default();
|
||||
h.input(&[personalization]);
|
||||
h.input(digest);
|
||||
h.input(g1_s.into_uncompressed().as_ref());
|
||||
h.input(g1_s_x.into_uncompressed().as_ref());
|
||||
|
||||
hash_to_g2::<E>(h.result().as_ref()).into_affine()
|
||||
}
|
19
test.sh
Executable file
19
test.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
rm challenge
|
||||
rm response
|
||||
rm new_challenge
|
||||
rm challenge_old
|
||||
rm response_old
|
||||
|
||||
cargo run --release --bin new_constrained
|
||||
cargo run --release --bin beacon_constrained
|
||||
cargo run --release --bin verify_transform_constrained
|
||||
|
||||
mv challenge challenge_old
|
||||
mv response response_old
|
||||
|
||||
mv new_challenge challenge
|
||||
|
||||
cargo run --release --bin compute_constrained
|
||||
cargo run --release --bin verify_transform_constrained
|
Loading…
Reference in New Issue
Block a user