accounts/abi: Add one-parameter event test case from enriquefynn/unpack_one_arg_event
This commit is contained in:
commit
13b566e06e
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@ -5,5 +5,7 @@ accounts/usbwallet @karalabe
|
|||||||
consensus @karalabe
|
consensus @karalabe
|
||||||
core/ @karalabe @holiman
|
core/ @karalabe @holiman
|
||||||
eth/ @karalabe
|
eth/ @karalabe
|
||||||
|
les/ @zsfelfoldi
|
||||||
|
light/ @zsfelfoldi
|
||||||
mobile/ @karalabe
|
mobile/ @karalabe
|
||||||
p2p/ @fjl @zsfelfoldi
|
p2p/ @fjl @zsfelfoldi
|
||||||
|
17
.github/stale.yml
vendored
Normal file
17
.github/stale.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Number of days of inactivity before an issue becomes stale
|
||||||
|
daysUntilStale: 366
|
||||||
|
# Number of days of inactivity before a stale issue is closed
|
||||||
|
daysUntilClose: 42
|
||||||
|
# Issues with these labels will never be considered stale
|
||||||
|
exemptLabels:
|
||||||
|
- pinned
|
||||||
|
- security
|
||||||
|
# Label to use when marking an issue as stale
|
||||||
|
staleLabel: stale
|
||||||
|
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||||
|
markComment: >
|
||||||
|
This issue has been automatically marked as stale because it has not had
|
||||||
|
recent activity. It will be closed if no further activity occurs. Thank you
|
||||||
|
for your contributions.
|
||||||
|
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||||
|
closeComment: false
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -34,6 +34,9 @@ profile.cov
|
|||||||
# IdeaIDE
|
# IdeaIDE
|
||||||
.idea
|
.idea
|
||||||
|
|
||||||
|
# VS Code
|
||||||
|
.vscode
|
||||||
|
|
||||||
# dashboard
|
# dashboard
|
||||||
/dashboard/assets/flow-typed
|
/dashboard/assets/flow-typed
|
||||||
/dashboard/assets/node_modules
|
/dashboard/assets/node_modules
|
||||||
|
11
.travis.yml
11
.travis.yml
@ -3,17 +3,6 @@ go_import_path: github.com/ethereum/go-ethereum
|
|||||||
sudo: false
|
sudo: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- os: linux
|
|
||||||
dist: trusty
|
|
||||||
sudo: required
|
|
||||||
go: 1.7.x
|
|
||||||
script:
|
|
||||||
- sudo modprobe fuse
|
|
||||||
- sudo chmod 666 /dev/fuse
|
|
||||||
- sudo chown root:$USER /etc/fuse.conf
|
|
||||||
- go run build/ci.go install
|
|
||||||
- go run build/ci.go test -coverage
|
|
||||||
|
|
||||||
- os: linux
|
- os: linux
|
||||||
dist: trusty
|
dist: trusty
|
||||||
sudo: required
|
sudo: required
|
||||||
|
@ -5,6 +5,8 @@ Official golang implementation of the Ethereum protocol.
|
|||||||
[![API Reference](
|
[![API Reference](
|
||||||
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||||
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||||
|
[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum)
|
||||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||||
|
|
||||||
Automated builds are available for stable releases and the unstable master branch.
|
Automated builds are available for stable releases and the unstable master branch.
|
||||||
|
2
VERSION
2
VERSION
@ -1 +1 @@
|
|||||||
1.8.1
|
1.8.3
|
||||||
|
@ -136,11 +136,11 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
|||||||
|
|
||||||
// MethodById looks up a method by the 4-byte id
|
// MethodById looks up a method by the 4-byte id
|
||||||
// returns nil if none found
|
// returns nil if none found
|
||||||
func (abi *ABI) MethodById(sigdata []byte) *Method {
|
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||||
for _, method := range abi.Methods {
|
for _, method := range abi.Methods {
|
||||||
if bytes.Equal(method.Id(), sigdata[:4]) {
|
if bytes.Equal(method.Id(), sigdata[:4]) {
|
||||||
return &method
|
return &method, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil, fmt.Errorf("no method with id: %#x", sigdata[:4])
|
||||||
}
|
}
|
||||||
|
@ -702,7 +702,11 @@ func TestABI_MethodById(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for name, m := range abi.Methods {
|
for name, m := range abi.Methods {
|
||||||
a := fmt.Sprintf("%v", m)
|
a := fmt.Sprintf("%v", m)
|
||||||
b := fmt.Sprintf("%v", abi.MethodById(m.Id()))
|
m2, err := abi.MethodById(m.Id())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to look up ABI method: %v", err)
|
||||||
|
}
|
||||||
|
b := fmt.Sprintf("%v", m2)
|
||||||
if a != b {
|
if a != b {
|
||||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
||||||
}
|
}
|
||||||
|
@ -67,6 +67,17 @@ func (arguments Arguments) LengthNonIndexed() int {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NonIndexed returns the arguments with indexed arguments filtered out
|
||||||
|
func (arguments Arguments) NonIndexed() Arguments {
|
||||||
|
var ret []Argument
|
||||||
|
for _, arg := range arguments {
|
||||||
|
if !arg.Indexed {
|
||||||
|
ret = append(ret, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
|
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
|
||||||
func (arguments Arguments) isTuple() bool {
|
func (arguments Arguments) isTuple() bool {
|
||||||
return len(arguments) > 1
|
return len(arguments) > 1
|
||||||
@ -74,21 +85,25 @@ func (arguments Arguments) isTuple() bool {
|
|||||||
|
|
||||||
// Unpack performs the operation hexdata -> Go format
|
// Unpack performs the operation hexdata -> Go format
|
||||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||||
if arguments.isTuple() {
|
|
||||||
return arguments.unpackTuple(v, data)
|
|
||||||
}
|
|
||||||
return arguments.unpackAtomic(v, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
|
||||||
// make sure the passed value is arguments pointer
|
// make sure the passed value is arguments pointer
|
||||||
valueOf := reflect.ValueOf(v)
|
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||||
if reflect.Ptr != valueOf.Kind() {
|
|
||||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||||
}
|
}
|
||||||
|
marshalledValues, err := arguments.UnpackValues(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if arguments.isTuple() {
|
||||||
|
return arguments.unpackTuple(v, marshalledValues)
|
||||||
|
}
|
||||||
|
return arguments.unpackAtomic(v, marshalledValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
value = valueOf.Elem()
|
value = reflect.ValueOf(v).Elem()
|
||||||
typ = value.Type()
|
typ = value.Type()
|
||||||
kind = value.Kind()
|
kind = value.Kind()
|
||||||
)
|
)
|
||||||
@ -110,30 +125,9 @@ func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
|||||||
exists[field] = true
|
exists[field] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// `i` counts the nonindexed arguments.
|
for i, arg := range arguments.NonIndexed() {
|
||||||
// `j` counts the number of complex types.
|
|
||||||
// both `i` and `j` are used to to correctly compute `data` offset.
|
|
||||||
|
|
||||||
i, j := -1, 0
|
reflectValue := reflect.ValueOf(marshalledValues[i])
|
||||||
for _, arg := range arguments {
|
|
||||||
|
|
||||||
if arg.Indexed {
|
|
||||||
// can't read, continue
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
marshalledValue, err := toGoType((i+j)*32, arg.Type, output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if arg.Type.T == ArrayTy {
|
|
||||||
// combined index ('i' + 'j') need to be adjusted only by size of array, thus
|
|
||||||
// we need to decrement 'j' because 'i' was incremented
|
|
||||||
j += arg.Type.Size - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
reflectValue := reflect.ValueOf(marshalledValue)
|
|
||||||
|
|
||||||
switch kind {
|
switch kind {
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
@ -166,34 +160,72 @@ func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||||
func (arguments Arguments) unpackAtomic(v interface{}, output []byte) error {
|
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
|
||||||
// make sure the passed value is arguments pointer
|
if len(marshalledValues) != 1 {
|
||||||
valueOf := reflect.ValueOf(v)
|
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
||||||
if reflect.Ptr != valueOf.Kind() {
|
|
||||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
|
||||||
}
|
}
|
||||||
arg := arguments[0]
|
elem := reflect.ValueOf(v).Elem()
|
||||||
if arg.Indexed {
|
reflectValue := reflect.ValueOf(marshalledValues[0])
|
||||||
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
|
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
||||||
}
|
|
||||||
|
|
||||||
value := valueOf.Elem()
|
|
||||||
|
|
||||||
marshalledValue, err := toGoType(0, arg.Type, output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return set(value, reflect.ValueOf(marshalledValue), arg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpack performs the operation Go format -> Hexdata
|
// Computes the full size of an array;
|
||||||
|
// i.e. counting nested arrays, which count towards size for unpacking.
|
||||||
|
func getArraySize(arr *Type) int {
|
||||||
|
size := arr.Size
|
||||||
|
// Arrays can be nested, with each element being the same size
|
||||||
|
arr = arr.Elem
|
||||||
|
for arr.T == ArrayTy {
|
||||||
|
// Keep multiplying by elem.Size while the elem is an array.
|
||||||
|
size *= arr.Size
|
||||||
|
arr = arr.Elem
|
||||||
|
}
|
||||||
|
// Now we have the full array size, including its children.
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
|
||||||
|
// without supplying a struct to unpack into. Instead, this method returns a list containing the
|
||||||
|
// values. An atomic argument will be a list with one element.
|
||||||
|
func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
||||||
|
retval := make([]interface{}, 0, arguments.LengthNonIndexed())
|
||||||
|
virtualArgs := 0
|
||||||
|
for index, arg := range arguments.NonIndexed() {
|
||||||
|
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
||||||
|
if arg.Type.T == ArrayTy {
|
||||||
|
// If we have a static array, like [3]uint256, these are coded as
|
||||||
|
// just like uint256,uint256,uint256.
|
||||||
|
// This means that we need to add two 'virtual' arguments when
|
||||||
|
// we count the index from now on.
|
||||||
|
//
|
||||||
|
// Array values nested multiple levels deep are also encoded inline:
|
||||||
|
// [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
|
||||||
|
//
|
||||||
|
// Calculate the full array size to get the correct offset for the next argument.
|
||||||
|
// Decrement it by 1, as the normal index increment is still applied.
|
||||||
|
virtualArgs += getArraySize(&arg.Type) - 1
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
retval = append(retval, marshalledValue)
|
||||||
|
}
|
||||||
|
return retval, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PackValues performs the operation Go format -> Hexdata
|
||||||
|
// It is the semantic opposite of UnpackValues
|
||||||
|
func (arguments Arguments) PackValues(args []interface{}) ([]byte, error) {
|
||||||
|
return arguments.Pack(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pack performs the operation Go format -> Hexdata
|
||||||
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||||
// Make sure arguments match up and pack them
|
// Make sure arguments match up and pack them
|
||||||
abiArgs := arguments
|
abiArgs := arguments
|
||||||
if len(args) != len(abiArgs) {
|
if len(args) != len(abiArgs) {
|
||||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
|
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
|
||||||
}
|
}
|
||||||
|
|
||||||
// variable input is the output appended at the end of packed
|
// variable input is the output appended at the end of packed
|
||||||
// output. This is used for strings and bytes types input.
|
// output. This is used for strings and bytes types input.
|
||||||
var variableInput []byte
|
var variableInput []byte
|
||||||
@ -207,7 +239,6 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
|||||||
inputOffset += 32
|
inputOffset += 32
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ret []byte
|
var ret []byte
|
||||||
for i, a := range args {
|
for i, a := range args {
|
||||||
input := abiArgs[i]
|
input := abiArgs[i]
|
||||||
@ -216,7 +247,6 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for a slice type (string, bytes, slice)
|
// check for a slice type (string, bytes, slice)
|
||||||
if input.Type.requiresLengthPrefix() {
|
if input.Type.requiresLengthPrefix() {
|
||||||
// calculate the offset
|
// calculate the offset
|
||||||
|
@ -428,10 +428,23 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
|
|||||||
}
|
}
|
||||||
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
|
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||||
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
|
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
||||||
|
receipts := core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash))
|
||||||
|
if receipts == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
logs := make([][]*types.Log, len(receipts))
|
||||||
|
for i, receipt := range receipts {
|
||||||
|
logs[i] = receipt.Logs
|
||||||
|
}
|
||||||
|
return logs, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
||||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||||
<-quit
|
<-quit
|
||||||
|
@ -164,118 +164,147 @@ var bindType = map[Lang]func(kind abi.Type) string{
|
|||||||
LangJava: bindTypeJava,
|
LangJava: bindTypeJava,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function for the binding generators.
|
||||||
|
// It reads the unmatched characters after the inner type-match,
|
||||||
|
// (since the inner type is a prefix of the total type declaration),
|
||||||
|
// looks for valid arrays (possibly a dynamic one) wrapping the inner type,
|
||||||
|
// and returns the sizes of these arrays.
|
||||||
|
//
|
||||||
|
// Returned array sizes are in the same order as solidity signatures; inner array size first.
|
||||||
|
// Array sizes may also be "", indicating a dynamic array.
|
||||||
|
func wrapArray(stringKind string, innerLen int, innerMapping string) (string, []string) {
|
||||||
|
remainder := stringKind[innerLen:]
|
||||||
|
//find all the sizes
|
||||||
|
matches := regexp.MustCompile(`\[(\d*)\]`).FindAllStringSubmatch(remainder, -1)
|
||||||
|
parts := make([]string, 0, len(matches))
|
||||||
|
for _, match := range matches {
|
||||||
|
//get group 1 from the regex match
|
||||||
|
parts = append(parts, match[1])
|
||||||
|
}
|
||||||
|
return innerMapping, parts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Translates the array sizes to a Go-lang declaration of a (nested) array of the inner type.
|
||||||
|
// Simply returns the inner type if arraySizes is empty.
|
||||||
|
func arrayBindingGo(inner string, arraySizes []string) string {
|
||||||
|
out := ""
|
||||||
|
//prepend all array sizes, from outer (end arraySizes) to inner (start arraySizes)
|
||||||
|
for i := len(arraySizes) - 1; i >= 0; i-- {
|
||||||
|
out += "[" + arraySizes[i] + "]"
|
||||||
|
}
|
||||||
|
out += inner
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// bindTypeGo converts a Solidity type to a Go one. Since there is no clear mapping
|
// bindTypeGo converts a Solidity type to a Go one. Since there is no clear mapping
|
||||||
// from all Solidity types to Go ones (e.g. uint17), those that cannot be exactly
|
// from all Solidity types to Go ones (e.g. uint17), those that cannot be exactly
|
||||||
// mapped will use an upscaled type (e.g. *big.Int).
|
// mapped will use an upscaled type (e.g. *big.Int).
|
||||||
func bindTypeGo(kind abi.Type) string {
|
func bindTypeGo(kind abi.Type) string {
|
||||||
stringKind := kind.String()
|
stringKind := kind.String()
|
||||||
|
innerLen, innerMapping := bindUnnestedTypeGo(stringKind)
|
||||||
|
return arrayBindingGo(wrapArray(stringKind, innerLen, innerMapping))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The inner function of bindTypeGo, this finds the inner type of stringKind.
|
||||||
|
// (Or just the type itself if it is not an array or slice)
|
||||||
|
// The length of the matched part is returned, with the the translated type.
|
||||||
|
func bindUnnestedTypeGo(stringKind string) (int, string) {
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(stringKind, "address"):
|
case strings.HasPrefix(stringKind, "address"):
|
||||||
parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
return len("address"), "common.Address"
|
||||||
if len(parts) != 2 {
|
|
||||||
return stringKind
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%scommon.Address", parts[1])
|
|
||||||
|
|
||||||
case strings.HasPrefix(stringKind, "bytes"):
|
case strings.HasPrefix(stringKind, "bytes"):
|
||||||
parts := regexp.MustCompile(`bytes([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
parts := regexp.MustCompile(`bytes([0-9]*)`).FindStringSubmatch(stringKind)
|
||||||
if len(parts) != 3 {
|
return len(parts[0]), fmt.Sprintf("[%s]byte", parts[1])
|
||||||
return stringKind
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s[%s]byte", parts[2], parts[1])
|
|
||||||
|
|
||||||
case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
|
case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
|
||||||
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(stringKind)
|
||||||
if len(parts) != 4 {
|
|
||||||
return stringKind
|
|
||||||
}
|
|
||||||
switch parts[2] {
|
switch parts[2] {
|
||||||
case "8", "16", "32", "64":
|
case "8", "16", "32", "64":
|
||||||
return fmt.Sprintf("%s%sint%s", parts[3], parts[1], parts[2])
|
return len(parts[0]), fmt.Sprintf("%sint%s", parts[1], parts[2])
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s*big.Int", parts[3])
|
return len(parts[0]), "*big.Int"
|
||||||
|
|
||||||
case strings.HasPrefix(stringKind, "bool") || strings.HasPrefix(stringKind, "string"):
|
case strings.HasPrefix(stringKind, "bool"):
|
||||||
parts := regexp.MustCompile(`([a-z]+)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
return len("bool"), "bool"
|
||||||
if len(parts) != 3 {
|
|
||||||
return stringKind
|
case strings.HasPrefix(stringKind, "string"):
|
||||||
}
|
return len("string"), "string"
|
||||||
return fmt.Sprintf("%s%s", parts[2], parts[1])
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return stringKind
|
return len(stringKind), stringKind
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Translates the array sizes to a Java declaration of a (nested) array of the inner type.
|
||||||
|
// Simply returns the inner type if arraySizes is empty.
|
||||||
|
func arrayBindingJava(inner string, arraySizes []string) string {
|
||||||
|
// Java array type declarations do not include the length.
|
||||||
|
return inner + strings.Repeat("[]", len(arraySizes))
|
||||||
|
}
|
||||||
|
|
||||||
// bindTypeJava converts a Solidity type to a Java one. Since there is no clear mapping
|
// bindTypeJava converts a Solidity type to a Java one. Since there is no clear mapping
|
||||||
// from all Solidity types to Java ones (e.g. uint17), those that cannot be exactly
|
// from all Solidity types to Java ones (e.g. uint17), those that cannot be exactly
|
||||||
// mapped will use an upscaled type (e.g. BigDecimal).
|
// mapped will use an upscaled type (e.g. BigDecimal).
|
||||||
func bindTypeJava(kind abi.Type) string {
|
func bindTypeJava(kind abi.Type) string {
|
||||||
stringKind := kind.String()
|
stringKind := kind.String()
|
||||||
|
innerLen, innerMapping := bindUnnestedTypeJava(stringKind)
|
||||||
|
return arrayBindingJava(wrapArray(stringKind, innerLen, innerMapping))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The inner function of bindTypeJava, this finds the inner type of stringKind.
|
||||||
|
// (Or just the type itself if it is not an array or slice)
|
||||||
|
// The length of the matched part is returned, with the the translated type.
|
||||||
|
func bindUnnestedTypeJava(stringKind string) (int, string) {
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(stringKind, "address"):
|
case strings.HasPrefix(stringKind, "address"):
|
||||||
parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return stringKind
|
return len(stringKind), stringKind
|
||||||
}
|
}
|
||||||
if parts[1] == "" {
|
if parts[1] == "" {
|
||||||
return fmt.Sprintf("Address")
|
return len("address"), "Address"
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("Addresses")
|
return len(parts[0]), "Addresses"
|
||||||
|
|
||||||
case strings.HasPrefix(stringKind, "bytes"):
|
case strings.HasPrefix(stringKind, "bytes"):
|
||||||
parts := regexp.MustCompile(`bytes([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
parts := regexp.MustCompile(`bytes([0-9]*)`).FindStringSubmatch(stringKind)
|
||||||
if len(parts) != 3 {
|
if len(parts) != 2 {
|
||||||
return stringKind
|
return len(stringKind), stringKind
|
||||||
}
|
}
|
||||||
if parts[2] != "" {
|
return len(parts[0]), "byte[]"
|
||||||
return "byte[][]"
|
|
||||||
}
|
|
||||||
return "byte[]"
|
|
||||||
|
|
||||||
case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
|
case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
|
||||||
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
//Note that uint and int (without digits) are also matched,
|
||||||
if len(parts) != 4 {
|
// these are size 256, and will translate to BigInt (the default).
|
||||||
return stringKind
|
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(stringKind)
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return len(stringKind), stringKind
|
||||||
}
|
}
|
||||||
switch parts[2] {
|
|
||||||
case "8", "16", "32", "64":
|
namedSize := map[string]string{
|
||||||
if parts[1] == "" {
|
"8": "byte",
|
||||||
if parts[3] == "" {
|
"16": "short",
|
||||||
return fmt.Sprintf("int%s", parts[2])
|
"32": "int",
|
||||||
}
|
"64": "long",
|
||||||
return fmt.Sprintf("int%s[]", parts[2])
|
}[parts[2]]
|
||||||
}
|
|
||||||
|
//default to BigInt
|
||||||
|
if namedSize == "" {
|
||||||
|
namedSize = "BigInt"
|
||||||
}
|
}
|
||||||
if parts[3] == "" {
|
return len(parts[0]), namedSize
|
||||||
return fmt.Sprintf("BigInt")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("BigInts")
|
|
||||||
|
|
||||||
case strings.HasPrefix(stringKind, "bool"):
|
case strings.HasPrefix(stringKind, "bool"):
|
||||||
parts := regexp.MustCompile(`bool(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
return len("bool"), "boolean"
|
||||||
if len(parts) != 2 {
|
|
||||||
return stringKind
|
|
||||||
}
|
|
||||||
if parts[1] == "" {
|
|
||||||
return fmt.Sprintf("bool")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("bool[]")
|
|
||||||
|
|
||||||
case strings.HasPrefix(stringKind, "string"):
|
case strings.HasPrefix(stringKind, "string"):
|
||||||
parts := regexp.MustCompile(`string(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
|
return len("string"), "String"
|
||||||
if len(parts) != 2 {
|
|
||||||
return stringKind
|
|
||||||
}
|
|
||||||
if parts[1] == "" {
|
|
||||||
return fmt.Sprintf("String")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("String[]")
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return stringKind
|
return len(stringKind), stringKind
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,11 +354,13 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
|
|||||||
return "String"
|
return "String"
|
||||||
case "string[]":
|
case "string[]":
|
||||||
return "Strings"
|
return "Strings"
|
||||||
case "bool":
|
case "boolean":
|
||||||
return "Bool"
|
return "Bool"
|
||||||
case "bool[]":
|
case "boolean[]":
|
||||||
return "Bools"
|
return "Bools"
|
||||||
case "BigInt":
|
case "BigInt[]":
|
||||||
|
return "BigInts"
|
||||||
|
default:
|
||||||
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String())
|
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String())
|
||||||
if len(parts) != 4 {
|
if len(parts) != 4 {
|
||||||
return javaKind
|
return javaKind
|
||||||
@ -344,8 +375,6 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
|
|||||||
default:
|
default:
|
||||||
return javaKind
|
return javaKind
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
return javaKind
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -737,6 +737,72 @@ var bindTests = []struct {
|
|||||||
}
|
}
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
`DeeplyNestedArray`,
|
||||||
|
`
|
||||||
|
contract DeeplyNestedArray {
|
||||||
|
uint64[3][4][5] public deepUint64Array;
|
||||||
|
function storeDeepUintArray(uint64[3][4][5] arr) public {
|
||||||
|
deepUint64Array = arr;
|
||||||
|
}
|
||||||
|
function retrieveDeepArray() public view returns (uint64[3][4][5]) {
|
||||||
|
return deepUint64Array;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
`6060604052341561000f57600080fd5b6106438061001e6000396000f300606060405260043610610057576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063344248551461005c5780638ed4573a1461011457806398ed1856146101ab575b600080fd5b341561006757600080fd5b610112600480806107800190600580602002604051908101604052809291906000905b828210156101055783826101800201600480602002604051908101604052809291906000905b828210156100f25783826060020160038060200260405190810160405280929190826003602002808284378201915050505050815260200190600101906100b0565b505050508152602001906001019061008a565b5050505091905050610208565b005b341561011f57600080fd5b61012761021d565b604051808260056000925b8184101561019b578284602002015160046000925b8184101561018d5782846020020151600360200280838360005b8381101561017c578082015181840152602081019050610161565b505050509050019260010192610147565b925050509260010192610132565b9250505091505060405180910390f35b34156101b657600080fd5b6101de6004808035906020019091908035906020019091908035906020019091905050610309565b604051808267ffffffffffffffff1667ffffffffffffffff16815260200191505060405180910390f35b80600090600561021992919061035f565b5050565b6102256103b0565b6000600580602002604051908101604052809291906000905b8282101561030057838260040201600480602002604051908101604052809291906000905b828210156102ed578382016003806020026040519081016040528092919082600380156102d9576020028201916000905b82829054906101000a900467ffffffffffffffff1667ffffffffffffffff16815260200190600801906020826007010492830192600103820291508084116102945790505b505050505081526020019060010190610263565b505050508152602001906001019061023e565b50505050905090565b60008360058110151561031857fe5b600402018260048110151561032957fe5b018160038110151561033757fe5b6004918282040191900660080292509250509054906101000a900467ffffffffffffffff1681565b826005600402810192821561039f579160200282015b8281111561039e5782518290600461038e9291906103df565b5091602001919060040190610375565b5b5090506103ac919061042d565b5090565b610780604051908101604052806005905b6103c9610459565b8152602001906001900390816103c15790505090565b826004810192821561041c579160200282015b8281111561041b5782518290600361040b929190610488565b50916020019190600101906103f2565b5b5090506104299190610536565b5090565b61045691905b8082111561045257600081816104499190610562565b50600401610433565b5090565b90565b610180604051908101604052806004905b6104726105a7565b81526020019060019003908161046a5790505090565b82600380016004900481019282156105255791602002820160005b838211156104ef57835183826101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555092602001926008016020816007010492830192600103026104a3565b80156105235782816101000a81549067ffffffffffffffff02191690556008016020816007010492830192600103026104ef565b505b50905061053291906105d9565b5090565b61055f91905b8082111561055b57600081816105529190610610565b5060010161053c565b5090565b90565b50600081816105719190610610565b50600101600081816105839190610610565b50600101600081816105959190610610565b5060010160006105a59190610610565b565b6060604051908101604052806003905b600067ffffffffffffffff168152602001906001900390816105b75790505090565b61060d91905b8082111561060957600081816101000a81549067ffffffffffffffff0219169055506001016105df565b5090565b90565b50600090555600a165627a7a7230582087e5a43f6965ab6ef7a4ff056ab80ed78fd8c15cff57715a1bf34ec76a93661c0029`,
|
||||||
|
`[{"constant":false,"inputs":[{"name":"arr","type":"uint64[3][4][5]"}],"name":"storeDeepUintArray","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"retrieveDeepArray","outputs":[{"name":"","type":"uint64[3][4][5]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"},{"name":"","type":"uint256"},{"name":"","type":"uint256"}],"name":"deepUint64Array","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"view","type":"function"}]`,
|
||||||
|
`
|
||||||
|
// Generate a new random account and a funded simulator
|
||||||
|
key, _ := crypto.GenerateKey()
|
||||||
|
auth := bind.NewKeyedTransactor(key)
|
||||||
|
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||||
|
|
||||||
|
//deploy the test contract
|
||||||
|
_, _, testContract, err := DeployDeeplyNestedArray(auth, sim)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to deploy test contract: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish deploy.
|
||||||
|
sim.Commit()
|
||||||
|
|
||||||
|
//Create coordinate-filled array, for testing purposes.
|
||||||
|
testArr := [5][4][3]uint64{}
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
testArr[i] = [4][3]uint64{}
|
||||||
|
for j := 0; j < 4; j++ {
|
||||||
|
testArr[i][j] = [3]uint64{}
|
||||||
|
for k := 0; k < 3; k++ {
|
||||||
|
//pack the coordinates, each array value will be unique, and can be validated easily.
|
||||||
|
testArr[i][j][k] = uint64(i) << 16 | uint64(j) << 8 | uint64(k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := testContract.StoreDeepUintArray(&bind.TransactOpts{
|
||||||
|
From: auth.From,
|
||||||
|
Signer: auth.Signer,
|
||||||
|
}, testArr); err != nil {
|
||||||
|
t.Fatalf("Failed to store nested array in test contract: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sim.Commit()
|
||||||
|
|
||||||
|
retrievedArr, err := testContract.RetrieveDeepArray(&bind.CallOpts{
|
||||||
|
From: auth.From,
|
||||||
|
Pending: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to retrieve nested array from test contract: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//quick check to see if contents were copied
|
||||||
|
// (See accounts/abi/unpack_test.go for more extensive testing)
|
||||||
|
if retrievedArr[4][3][2] != testArr[4][3][2] {
|
||||||
|
t.Fatalf("Retrieved value does not match expected value! got: %d, expected: %d. %v", retrievedArr[4][3][2], testArr[4][3][2], err)
|
||||||
|
}`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that packages generated by the binder can be successfully compiled and
|
// Tests that packages generated by the binder can be successfully compiled and
|
||||||
|
@ -299,6 +299,11 @@ func TestPack(t *testing.T) {
|
|||||||
[32]byte{1},
|
[32]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"uint32[2][3][4]",
|
||||||
|
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
|
||||||
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"address[]",
|
"address[]",
|
||||||
[]common.Address{{1}, {2}},
|
[]common.Address{{1}, {2}},
|
||||||
|
@ -93,15 +93,28 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getFullElemSize(elem *Type) int {
|
||||||
|
//all other should be counted as 32 (slices have pointers to respective elements)
|
||||||
|
size := 32
|
||||||
|
//arrays wrap it, each element being the same size
|
||||||
|
for elem.T == ArrayTy {
|
||||||
|
size *= elem.Size
|
||||||
|
elem = elem.Elem
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
// iteratively unpack elements
|
// iteratively unpack elements
|
||||||
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
||||||
|
if size < 0 {
|
||||||
|
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
|
||||||
|
}
|
||||||
if start+32*size > len(output) {
|
if start+32*size > len(output) {
|
||||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// this value will become our slice or our array, depending on the type
|
// this value will become our slice or our array, depending on the type
|
||||||
var refSlice reflect.Value
|
var refSlice reflect.Value
|
||||||
slice := output[start : start+size*32]
|
|
||||||
|
|
||||||
if t.T == SliceTy {
|
if t.T == SliceTy {
|
||||||
// declare our slice
|
// declare our slice
|
||||||
@ -113,15 +126,20 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
|
|||||||
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
|
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, j := start, 0; j*32 < len(slice); i, j = i+32, j+1 {
|
// Arrays have packed elements, resulting in longer unpack steps.
|
||||||
// this corrects the arrangement so that we get all the underlying array values
|
// Slices have just 32 bytes per element (pointing to the contents).
|
||||||
if t.Elem.T == ArrayTy && j != 0 {
|
elemSize := 32
|
||||||
i = start + t.Elem.Size*32*j
|
if t.T == ArrayTy {
|
||||||
}
|
elemSize = getFullElemSize(t.Elem)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
|
||||||
|
|
||||||
inter, err := toGoType(i, *t.Elem, output)
|
inter, err := toGoType(i, *t.Elem, output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// append the item to our reflect slice
|
// append the item to our reflect slice
|
||||||
refSlice.Index(j).Set(reflect.ValueOf(inter))
|
refSlice.Index(j).Set(reflect.ValueOf(inter))
|
||||||
}
|
}
|
||||||
@ -181,16 +199,32 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
|
|
||||||
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
|
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
|
||||||
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
|
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
|
||||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32])
|
||||||
if offset+32 > len(output) {
|
bigOffsetEnd.Add(bigOffsetEnd, common.Big32)
|
||||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
outputLength := big.NewInt(int64(len(output)))
|
||||||
}
|
|
||||||
length = int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
|
||||||
if offset+32+length > len(output) {
|
|
||||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+length)
|
|
||||||
}
|
|
||||||
start = offset + 32
|
|
||||||
|
|
||||||
//fmt.Printf("LENGTH PREFIX INFO: \nsize: %v\noffset: %v\nstart: %v\n", length, offset, start)
|
if bigOffsetEnd.Cmp(outputLength) > 0 {
|
||||||
|
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", bigOffsetEnd, outputLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bigOffsetEnd.BitLen() > 63 {
|
||||||
|
return 0, 0, fmt.Errorf("abi offset larger than int64: %v", bigOffsetEnd)
|
||||||
|
}
|
||||||
|
|
||||||
|
offsetEnd := int(bigOffsetEnd.Uint64())
|
||||||
|
lengthBig := big.NewInt(0).SetBytes(output[offsetEnd-32 : offsetEnd])
|
||||||
|
|
||||||
|
totalSize := big.NewInt(0)
|
||||||
|
totalSize.Add(totalSize, bigOffsetEnd)
|
||||||
|
totalSize.Add(totalSize, lengthBig)
|
||||||
|
if totalSize.BitLen() > 63 {
|
||||||
|
return 0, 0, fmt.Errorf("abi length larger than int64: %v", totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalSize.Cmp(outputLength) > 0 {
|
||||||
|
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %v require %v", outputLength, totalSize)
|
||||||
|
}
|
||||||
|
start = int(bigOffsetEnd.Uint64())
|
||||||
|
length = int(lengthBig.Uint64())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ var unpackTests = []unpackTest{
|
|||||||
{
|
{
|
||||||
def: `[{"type": "bytes32"}]`,
|
def: `[{"type": "bytes32"}]`,
|
||||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
want: common.HexToHash("0100000000000000000000000000000000000000000000000000000000000000"),
|
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type": "function"}]`,
|
def: `[{"type": "function"}]`,
|
||||||
@ -189,6 +189,11 @@ var unpackTests = []unpackTest{
|
|||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
want: [2]uint32{1, 2},
|
want: [2]uint32{1, 2},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint32[2][3][4]"}]`,
|
||||||
|
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018",
|
||||||
|
want: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type": "uint64[]"}]`,
|
def: `[{"type": "uint64[]"}]`,
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
@ -435,6 +440,46 @@ func TestMultiReturnWithArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
||||||
|
// Similar to TestMultiReturnWithArray, but with a special case in mind:
|
||||||
|
// values of nested static arrays count towards the size as well, and any element following
|
||||||
|
// after such nested array argument should be read with the correct offset,
|
||||||
|
// so that it does not read content from the previous array argument.
|
||||||
|
const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]`
|
||||||
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
buff := new(bytes.Buffer)
|
||||||
|
// construct the test array, each 3 char element is joined with 61 '0' chars,
|
||||||
|
// to from the ((3 + 61) * 0.5) = 32 byte elements in the array.
|
||||||
|
buff.Write(common.Hex2Bytes(strings.Join([]string{
|
||||||
|
"", //empty, to apply the 61-char separator to the first element as well.
|
||||||
|
"111", "112", "113", "121", "122", "123",
|
||||||
|
"211", "212", "213", "221", "222", "223",
|
||||||
|
"311", "312", "313", "321", "322", "323",
|
||||||
|
"411", "412", "413", "421", "422", "423",
|
||||||
|
}, "0000000000000000000000000000000000000000000000000000000000000")))
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000009876"))
|
||||||
|
|
||||||
|
ret1, ret1Exp := new([4][2][3]uint64), [4][2][3]uint64{
|
||||||
|
{{0x111, 0x112, 0x113}, {0x121, 0x122, 0x123}},
|
||||||
|
{{0x211, 0x212, 0x213}, {0x221, 0x222, 0x223}},
|
||||||
|
{{0x311, 0x312, 0x313}, {0x321, 0x322, 0x323}},
|
||||||
|
{{0x411, 0x412, 0x413}, {0x421, 0x422, 0x423}},
|
||||||
|
}
|
||||||
|
ret2, ret2Exp := new(uint64), uint64(0x9876)
|
||||||
|
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||||
|
t.Error("array result", *ret1, "!= Expected", ret1Exp)
|
||||||
|
}
|
||||||
|
if *ret2 != ret2Exp {
|
||||||
|
t.Error("int result", *ret2, "!= Expected", ret2Exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestUnmarshal(t *testing.T) {
|
func TestUnmarshal(t *testing.T) {
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
|
||||||
@ -683,3 +728,73 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
t.Fatal("expected error:", err)
|
t.Fatal("expected error:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOOMMaliciousInput(t *testing.T) {
|
||||||
|
oomTests := []unpackTest{
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Length larger than 64 bits
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||||
|
"00ffffffffffffffffffffffffffffffffffffffffffffff0000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Offset very large (over 64 bits)
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "00ffffffffffffffffffffffffffffffffffffffffffffff0000000000000020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Offset very large (below 64 bits)
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000007ffffffffff00020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Offset negative (as 64 bit)
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "000000000000000000000000000000000000000000000000f000000000000020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
|
||||||
|
{ // Negative length
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||||
|
"000000000000000000000000000000000000000000000000f000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Very large length
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000007fffffffff000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, test := range oomTests {
|
||||||
|
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||||
|
abi, err := JSON(strings.NewReader(def))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||||
|
}
|
||||||
|
encb, err := hex.DecodeString(test.enc)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invalid hex: %s" + test.enc)
|
||||||
|
}
|
||||||
|
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error on malicious input, test %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -182,13 +182,13 @@ func doInstall(cmdline []string) {
|
|||||||
// Check Go version. People regularly open issues about compilation
|
// Check Go version. People regularly open issues about compilation
|
||||||
// failure with outdated Go. This should save them the trouble.
|
// failure with outdated Go. This should save them the trouble.
|
||||||
if !strings.Contains(runtime.Version(), "devel") {
|
if !strings.Contains(runtime.Version(), "devel") {
|
||||||
// Figure out the minor version number since we can't textually compare (1.10 < 1.7)
|
// Figure out the minor version number since we can't textually compare (1.10 < 1.8)
|
||||||
var minor int
|
var minor int
|
||||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||||
|
|
||||||
if minor < 7 {
|
if minor < 8 {
|
||||||
log.Println("You have Go version", runtime.Version())
|
log.Println("You have Go version", runtime.Version())
|
||||||
log.Println("go-ethereum requires at least Go version 1.7 and cannot")
|
log.Println("go-ethereum requires at least Go version 1.8 and cannot")
|
||||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
@ -86,10 +86,6 @@ var (
|
|||||||
Name: "create",
|
Name: "create",
|
||||||
Usage: "indicates the action should be create rather than call",
|
Usage: "indicates the action should be create rather than call",
|
||||||
}
|
}
|
||||||
DisableGasMeteringFlag = cli.BoolFlag{
|
|
||||||
Name: "nogasmetering",
|
|
||||||
Usage: "disable gas metering",
|
|
||||||
}
|
|
||||||
GenesisFlag = cli.StringFlag{
|
GenesisFlag = cli.StringFlag{
|
||||||
Name: "prestate",
|
Name: "prestate",
|
||||||
Usage: "JSON file with prestate (genesis) config",
|
Usage: "JSON file with prestate (genesis) config",
|
||||||
@ -128,7 +124,6 @@ func init() {
|
|||||||
ValueFlag,
|
ValueFlag,
|
||||||
DumpFlag,
|
DumpFlag,
|
||||||
InputFlag,
|
InputFlag,
|
||||||
DisableGasMeteringFlag,
|
|
||||||
MemProfileFlag,
|
MemProfileFlag,
|
||||||
CPUProfileFlag,
|
CPUProfileFlag,
|
||||||
StatDumpFlag,
|
StatDumpFlag,
|
||||||
|
@ -161,9 +161,8 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||||
EVMConfig: vm.Config{
|
EVMConfig: vm.Config{
|
||||||
Tracer: tracer,
|
Tracer: tracer,
|
||||||
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
|
||||||
DisableGasMetering: ctx.GlobalBool(DisableGasMeteringFlag.Name),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,9 +533,11 @@ func (f *faucet) loop() {
|
|||||||
}
|
}
|
||||||
defer sub.Unsubscribe()
|
defer sub.Unsubscribe()
|
||||||
|
|
||||||
for {
|
// Start a goroutine to update the state from head notifications in the background
|
||||||
select {
|
update := make(chan *types.Header)
|
||||||
case head := <-heads:
|
|
||||||
|
go func() {
|
||||||
|
for head := range update {
|
||||||
// New chain head arrived, query the current stats and stream to clients
|
// New chain head arrived, query the current stats and stream to clients
|
||||||
var (
|
var (
|
||||||
balance *big.Int
|
balance *big.Int
|
||||||
@ -588,6 +590,17 @@ func (f *faucet) loop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
f.lock.RUnlock()
|
f.lock.RUnlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Wait for various events and assing to the appropriate background threads
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case head := <-heads:
|
||||||
|
// New head arrived, send if for state update if there's none running
|
||||||
|
select {
|
||||||
|
case update <- head:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
case <-f.update:
|
case <-f.update:
|
||||||
// Pending requests updated, stream to clients
|
// Pending requests updated, stream to clients
|
||||||
@ -686,8 +699,6 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
|||||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||||
}
|
}
|
||||||
username := parts[len(parts)-3]
|
|
||||||
|
|
||||||
// Twitter's API isn't really friendly with direct links. Still, we don't
|
// Twitter's API isn't really friendly with direct links. Still, we don't
|
||||||
// want to do ask read permissions from users, so just load the public posts and
|
// want to do ask read permissions from users, so just load the public posts and
|
||||||
// scrape it for the Ethereum address and profile URL.
|
// scrape it for the Ethereum address and profile URL.
|
||||||
@ -697,6 +708,13 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
|||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
// Resolve the username from the final redirect, no intermediate junk
|
||||||
|
parts = strings.Split(res.Request.URL.String(), "/")
|
||||||
|
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||||
|
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||||
|
}
|
||||||
|
username := parts[len(parts)-3]
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(res.Body)
|
body, err := ioutil.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", common.Address{}, err
|
return "", "", common.Address{}, err
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console"
|
||||||
@ -42,7 +43,7 @@ var (
|
|||||||
Description: `
|
Description: `
|
||||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.`,
|
See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.`,
|
||||||
}
|
}
|
||||||
|
|
||||||
attachCommand = cli.Command{
|
attachCommand = cli.Command{
|
||||||
@ -55,7 +56,7 @@ See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.`,
|
|||||||
Description: `
|
Description: `
|
||||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.
|
See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.
|
||||||
This command allows to open a console on a running geth node.`,
|
This command allows to open a console on a running geth node.`,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,7 +69,7 @@ This command allows to open a console on a running geth node.`,
|
|||||||
Category: "CONSOLE COMMANDS",
|
Category: "CONSOLE COMMANDS",
|
||||||
Description: `
|
Description: `
|
||||||
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
||||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console`,
|
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console`,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -207,7 +208,7 @@ func ephemeralConsole(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
// Wait for pending callbacks, but stop for Ctrl-C.
|
// Wait for pending callbacks, but stop for Ctrl-C.
|
||||||
abort := make(chan os.Signal, 1)
|
abort := make(chan os.Signal, 1)
|
||||||
signal.Notify(abort, os.Interrupt)
|
signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
<-abort
|
<-abort
|
||||||
|
@ -168,19 +168,18 @@ type parityChainSpec struct {
|
|||||||
Engine struct {
|
Engine struct {
|
||||||
Ethash struct {
|
Ethash struct {
|
||||||
Params struct {
|
Params struct {
|
||||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
BlockReward *hexutil.Big `json:"blockReward"`
|
||||||
BlockReward *hexutil.Big `json:"blockReward"`
|
HomesteadTransition uint64 `json:"homesteadTransition"`
|
||||||
HomesteadTransition uint64 `json:"homesteadTransition"`
|
EIP150Transition uint64 `json:"eip150Transition"`
|
||||||
EIP150Transition uint64 `json:"eip150Transition"`
|
EIP160Transition uint64 `json:"eip160Transition"`
|
||||||
EIP160Transition uint64 `json:"eip160Transition"`
|
EIP161abcTransition uint64 `json:"eip161abcTransition"`
|
||||||
EIP161abcTransition uint64 `json:"eip161abcTransition"`
|
EIP161dTransition uint64 `json:"eip161dTransition"`
|
||||||
EIP161dTransition uint64 `json:"eip161dTransition"`
|
EIP649Reward *hexutil.Big `json:"eip649Reward"`
|
||||||
EIP649Reward *hexutil.Big `json:"eip649Reward"`
|
EIP100bTransition uint64 `json:"eip100bTransition"`
|
||||||
EIP100bTransition uint64 `json:"eip100bTransition"`
|
EIP649Transition uint64 `json:"eip649Transition"`
|
||||||
EIP649Transition uint64 `json:"eip649Transition"`
|
|
||||||
} `json:"params"`
|
} `json:"params"`
|
||||||
} `json:"Ethash"`
|
} `json:"Ethash"`
|
||||||
} `json:"engine"`
|
} `json:"engine"`
|
||||||
@ -188,6 +187,7 @@ type parityChainSpec struct {
|
|||||||
Params struct {
|
Params struct {
|
||||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||||
|
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
||||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||||
MaxCodeSize uint64 `json:"maxCodeSize"`
|
MaxCodeSize uint64 `json:"maxCodeSize"`
|
||||||
EIP155Transition uint64 `json:"eip155Transition"`
|
EIP155Transition uint64 `json:"eip155Transition"`
|
||||||
@ -270,7 +270,6 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
|||||||
}
|
}
|
||||||
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||||
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||||
spec.Engine.Ethash.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
|
||||||
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||||
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||||
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
|
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
|
||||||
@ -284,6 +283,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
|||||||
|
|
||||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||||
|
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
||||||
|
@ -631,6 +631,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
|||||||
"Tangerine": conf.Genesis.Config.EIP150Block,
|
"Tangerine": conf.Genesis.Config.EIP150Block,
|
||||||
"Spurious": conf.Genesis.Config.EIP155Block,
|
"Spurious": conf.Genesis.Config.EIP155Block,
|
||||||
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
|
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
|
||||||
|
"Constantinople": conf.Genesis.Config.ConstantinopleBlock,
|
||||||
})
|
})
|
||||||
files[filepath.Join(workdir, "index.html")] = indexfile.Bytes()
|
files[filepath.Join(workdir, "index.html")] = indexfile.Bytes()
|
||||||
|
|
||||||
|
@ -59,15 +59,16 @@ func (w *wizard) run() {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Make sure we have a good network name to work with fmt.Println()
|
// Make sure we have a good network name to work with fmt.Println()
|
||||||
|
// Docker accepts hyphens in image names, but doesn't like it for container names
|
||||||
if w.network == "" {
|
if w.network == "" {
|
||||||
fmt.Println("Please specify a network name to administer (no spaces, please)")
|
fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
|
||||||
for {
|
for {
|
||||||
w.network = w.readString()
|
w.network = w.readString()
|
||||||
if !strings.Contains(w.network, " ") {
|
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
|
||||||
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
log.Error("I also like to live dangerously, still no spaces")
|
log.Error("I also like to live dangerously, still no spaces or hyphens")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Info("Administering Ethereum network", "name", w.network)
|
log.Info("Administering Ethereum network", "name", w.network)
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
@ -97,10 +98,15 @@ func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
|
|||||||
config = bzzapi.NewDefaultConfig()
|
config = bzzapi.NewDefaultConfig()
|
||||||
//first load settings from config file (if provided)
|
//first load settings from config file (if provided)
|
||||||
config, err = configFileOverride(config, ctx)
|
config, err = configFileOverride(config, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
//override settings provided by environment variables
|
//override settings provided by environment variables
|
||||||
config = envVarsOverride(config)
|
config = envVarsOverride(config)
|
||||||
//override settings provided by command line
|
//override settings provided by command line
|
||||||
config = cmdLineOverride(config, ctx)
|
config = cmdLineOverride(config, ctx)
|
||||||
|
//validate configuration parameters
|
||||||
|
err = validateConfig(config)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -194,12 +200,16 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
|||||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||||
}
|
}
|
||||||
|
|
||||||
//EnsApi can be set to "", so can't check for empty string, as it is allowed!
|
|
||||||
if ctx.GlobalIsSet(EnsAPIFlag.Name) {
|
if ctx.GlobalIsSet(EnsAPIFlag.Name) {
|
||||||
currentConfig.EnsApi = ctx.GlobalString(EnsAPIFlag.Name)
|
ensAPIs := ctx.GlobalStringSlice(EnsAPIFlag.Name)
|
||||||
|
// preserve backward compatibility to disable ENS with --ens-api=""
|
||||||
|
if len(ensAPIs) == 1 && ensAPIs[0] == "" {
|
||||||
|
ensAPIs = nil
|
||||||
|
}
|
||||||
|
currentConfig.EnsAPIs = ensAPIs
|
||||||
}
|
}
|
||||||
|
|
||||||
if ensaddr := ctx.GlobalString(EnsAddrFlag.Name); ensaddr != "" {
|
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
|
||||||
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
|
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,9 +276,8 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
|||||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||||
}
|
}
|
||||||
|
|
||||||
//EnsApi can be set to "", so can't check for empty string, as it is allowed
|
if ensapi := os.Getenv(SWARM_ENV_ENS_API); ensapi != "" {
|
||||||
if ensapi, exists := os.LookupEnv(SWARM_ENV_ENS_API); exists {
|
currentConfig.EnsAPIs = strings.Split(ensapi, ",")
|
||||||
currentConfig.EnsApi = ensapi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ensaddr := os.Getenv(SWARM_ENV_ENS_ADDR); ensaddr != "" {
|
if ensaddr := os.Getenv(SWARM_ENV_ENS_ADDR); ensaddr != "" {
|
||||||
@ -309,6 +318,43 @@ func checkDeprecated(ctx *cli.Context) {
|
|||||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||||
}
|
}
|
||||||
|
// warn if --ens-api flag is set
|
||||||
|
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
|
||||||
|
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//validate configuration parameters
|
||||||
|
func validateConfig(cfg *bzzapi.Config) (err error) {
|
||||||
|
for _, ensAPI := range cfg.EnsAPIs {
|
||||||
|
if ensAPI != "" {
|
||||||
|
if err := validateEnsAPIs(ensAPI); err != nil {
|
||||||
|
return fmt.Errorf("invalid format [tld:][contract-addr@]url for ENS API endpoint configuration %q: %v", ensAPI, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//validate EnsAPIs configuration parameter
|
||||||
|
func validateEnsAPIs(s string) (err error) {
|
||||||
|
// missing contract address
|
||||||
|
if strings.HasPrefix(s, "@") {
|
||||||
|
return errors.New("missing contract address")
|
||||||
|
}
|
||||||
|
// missing url
|
||||||
|
if strings.HasSuffix(s, "@") {
|
||||||
|
return errors.New("missing url")
|
||||||
|
}
|
||||||
|
// missing tld
|
||||||
|
if strings.HasPrefix(s, ":") {
|
||||||
|
return errors.New("missing tld")
|
||||||
|
}
|
||||||
|
// missing url
|
||||||
|
if strings.HasSuffix(s, ":") {
|
||||||
|
return errors.New("missing url")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//print a Config as string
|
//print a Config as string
|
||||||
|
@ -457,3 +457,98 @@ func TestCmdLineOverridesFile(t *testing.T) {
|
|||||||
|
|
||||||
node.Shutdown()
|
node.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateConfig(t *testing.T) {
|
||||||
|
for _, c := range []struct {
|
||||||
|
cfg *api.Config
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"/data/testnet/geth.ipc",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"http://127.0.0.1:1234",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"ws://127.0.0.1:1234",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"test:/data/testnet/geth.ipc",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"test:ws://127.0.0.1:1234",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:1234",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"test:314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"eth:314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"eth:314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:12344",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"eth:",
|
||||||
|
}},
|
||||||
|
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"eth:\": missing url",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"314159265dD8dbb310642f98f50C066173C1259b@",
|
||||||
|
}},
|
||||||
|
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"314159265dD8dbb310642f98f50C066173C1259b@\": missing url",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
":314159265dD8dbb310642f98f50C066173C1259",
|
||||||
|
}},
|
||||||
|
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \":314159265dD8dbb310642f98f50C066173C1259\": missing tld",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
cfg: &api.Config{EnsAPIs: []string{
|
||||||
|
"@/data/testnet/geth.ipc",
|
||||||
|
}},
|
||||||
|
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"@/data/testnet/geth.ipc\": missing contract address",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
err := validateConfig(c.cfg)
|
||||||
|
if c.err != "" && err.Error() != c.err {
|
||||||
|
t.Errorf("expected error %q, got %q", c.err, err)
|
||||||
|
}
|
||||||
|
if c.err == "" && err != nil {
|
||||||
|
t.Errorf("unexpected error %q", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -17,11 +17,9 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -29,14 +27,12 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console"
|
||||||
"github.com/ethereum/go-ethereum/contracts/ens"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
@ -45,9 +41,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm"
|
"github.com/ethereum/go-ethereum/swarm"
|
||||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
|
||||||
|
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
@ -110,16 +106,11 @@ var (
|
|||||||
Usage: "Swarm Syncing enabled (default true)",
|
Usage: "Swarm Syncing enabled (default true)",
|
||||||
EnvVar: SWARM_ENV_SYNC_ENABLE,
|
EnvVar: SWARM_ENV_SYNC_ENABLE,
|
||||||
}
|
}
|
||||||
EnsAPIFlag = cli.StringFlag{
|
EnsAPIFlag = cli.StringSliceFlag{
|
||||||
Name: "ens-api",
|
Name: "ens-api",
|
||||||
Usage: "URL of the Ethereum API provider to use for ENS record lookups",
|
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url",
|
||||||
EnvVar: SWARM_ENV_ENS_API,
|
EnvVar: SWARM_ENV_ENS_API,
|
||||||
}
|
}
|
||||||
EnsAddrFlag = cli.StringFlag{
|
|
||||||
Name: "ens-addr",
|
|
||||||
Usage: "ENS contract address (default is detected as testnet or mainnet using --ens-api)",
|
|
||||||
EnvVar: SWARM_ENV_ENS_ADDR,
|
|
||||||
}
|
|
||||||
SwarmApiFlag = cli.StringFlag{
|
SwarmApiFlag = cli.StringFlag{
|
||||||
Name: "bzzapi",
|
Name: "bzzapi",
|
||||||
Usage: "Swarm HTTP endpoint",
|
Usage: "Swarm HTTP endpoint",
|
||||||
@ -156,6 +147,10 @@ var (
|
|||||||
Name: "ethapi",
|
Name: "ethapi",
|
||||||
Usage: "DEPRECATED: please use --ens-api and --swap-api",
|
Usage: "DEPRECATED: please use --ens-api and --swap-api",
|
||||||
}
|
}
|
||||||
|
DeprecatedEnsAddrFlag = cli.StringFlag{
|
||||||
|
Name: "ens-addr",
|
||||||
|
Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
//declare a few constant error messages, useful for later error check comparisons in test
|
//declare a few constant error messages, useful for later error check comparisons in test
|
||||||
@ -343,7 +338,6 @@ DEPRECATED: use 'swarm db clean'.
|
|||||||
// bzzd-specific flags
|
// bzzd-specific flags
|
||||||
CorsStringFlag,
|
CorsStringFlag,
|
||||||
EnsAPIFlag,
|
EnsAPIFlag,
|
||||||
EnsAddrFlag,
|
|
||||||
SwarmTomlConfigPathFlag,
|
SwarmTomlConfigPathFlag,
|
||||||
SwarmConfigPathFlag,
|
SwarmConfigPathFlag,
|
||||||
SwarmSwapEnabledFlag,
|
SwarmSwapEnabledFlag,
|
||||||
@ -363,11 +357,17 @@ DEPRECATED: use 'swarm db clean'.
|
|||||||
SwarmUploadMimeType,
|
SwarmUploadMimeType,
|
||||||
//deprecated flags
|
//deprecated flags
|
||||||
DeprecatedEthAPIFlag,
|
DeprecatedEthAPIFlag,
|
||||||
|
DeprecatedEnsAddrFlag,
|
||||||
}
|
}
|
||||||
app.Flags = append(app.Flags, debug.Flags...)
|
app.Flags = append(app.Flags, debug.Flags...)
|
||||||
|
app.Flags = append(app.Flags, swarmmetrics.Flags...)
|
||||||
app.Before = func(ctx *cli.Context) error {
|
app.Before = func(ctx *cli.Context) error {
|
||||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||||
return debug.Setup(ctx)
|
if err := debug.Setup(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
swarmmetrics.Setup(ctx)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
app.After = func(ctx *cli.Context) error {
|
app.After = func(ctx *cli.Context) error {
|
||||||
debug.Exit()
|
debug.Exit()
|
||||||
@ -448,38 +448,6 @@ func bzzd(ctx *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// detectEnsAddr determines the ENS contract address by getting both the
|
|
||||||
// version and genesis hash using the client and matching them to either
|
|
||||||
// mainnet or testnet addresses
|
|
||||||
func detectEnsAddr(client *rpc.Client) (common.Address, error) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
var version string
|
|
||||||
if err := client.CallContext(ctx, &version, "net_version"); err != nil {
|
|
||||||
return common.Address{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
block, err := ethclient.NewClient(client).BlockByNumber(ctx, big.NewInt(0))
|
|
||||||
if err != nil {
|
|
||||||
return common.Address{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
|
|
||||||
case version == "1" && block.Hash() == params.MainnetGenesisHash:
|
|
||||||
log.Info("using Mainnet ENS contract address", "addr", ens.MainNetAddress)
|
|
||||||
return ens.MainNetAddress, nil
|
|
||||||
|
|
||||||
case version == "3" && block.Hash() == params.TestnetGenesisHash:
|
|
||||||
log.Info("using Testnet ENS contract address", "addr", ens.TestNetAddress)
|
|
||||||
return ens.TestNetAddress, nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return common.Address{}, fmt.Errorf("unknown version and genesis hash: %s %s", version, block.Hash())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
|
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
|
||||||
|
|
||||||
//define the swarm service boot function
|
//define the swarm service boot function
|
||||||
@ -494,27 +462,7 @@ func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ensClient *ethclient.Client
|
return swarm.NewSwarm(ctx, swapClient, bzzconfig)
|
||||||
if bzzconfig.EnsApi != "" {
|
|
||||||
log.Info("connecting to ENS API", "url", bzzconfig.EnsApi)
|
|
||||||
client, err := rpc.Dial(bzzconfig.EnsApi)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error connecting to ENS API %s: %s", bzzconfig.EnsApi, err)
|
|
||||||
}
|
|
||||||
ensClient = ethclient.NewClient(client)
|
|
||||||
|
|
||||||
//no ENS root address set yet
|
|
||||||
if bzzconfig.EnsRoot == (common.Address{}) {
|
|
||||||
ensAddr, err := detectEnsAddr(client)
|
|
||||||
if err == nil {
|
|
||||||
bzzconfig.EnsRoot = ensAddr
|
|
||||||
} else {
|
|
||||||
log.Warn(fmt.Sprintf("could not determine ENS contract address, using default %s", bzzconfig.EnsRoot), "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return swarm.NewSwarm(ctx, swapClient, ensClient, bzzconfig, bzzconfig.SwapEnabled, bzzconfig.SyncEnabled, bzzconfig.Cors)
|
|
||||||
}
|
}
|
||||||
//register within the ethereum node
|
//register within the ethereum node
|
||||||
if err := stack.Register(boot); err != nil {
|
if err := stack.Register(boot); err != nil {
|
||||||
|
@ -35,7 +35,7 @@ const bzzManifestJSON = "application/bzz-manifest+json"
|
|||||||
func add(ctx *cli.Context) {
|
func add(ctx *cli.Context) {
|
||||||
args := ctx.Args()
|
args := ctx.Args()
|
||||||
if len(args) < 3 {
|
if len(args) < 3 {
|
||||||
utils.Fatalf("Need atleast three arguments <MHASH> <path> <HASH> [<content-type>]")
|
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH> [<content-type>]")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -69,7 +69,7 @@ func update(ctx *cli.Context) {
|
|||||||
|
|
||||||
args := ctx.Args()
|
args := ctx.Args()
|
||||||
if len(args) < 3 {
|
if len(args) < 3 {
|
||||||
utils.Fatalf("Need atleast three arguments <MHASH> <path> <HASH>")
|
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH>")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -101,7 +101,7 @@ func update(ctx *cli.Context) {
|
|||||||
func remove(ctx *cli.Context) {
|
func remove(ctx *cli.Context) {
|
||||||
args := ctx.Args()
|
args := ctx.Args()
|
||||||
if len(args) < 2 {
|
if len(args) < 2 {
|
||||||
utils.Fatalf("Need atleast two arguments <MHASH> <path>")
|
utils.Fatalf("Need at least two arguments <MHASH> <path>")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
@ -64,7 +65,7 @@ func StartNode(stack *node.Node) {
|
|||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
sigc := make(chan os.Signal, 1)
|
sigc := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigc, os.Interrupt)
|
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
|
||||||
defer signal.Stop(sigc)
|
defer signal.Stop(sigc)
|
||||||
<-sigc
|
<-sigc
|
||||||
log.Info("Got interrupt, shutting down...")
|
log.Info("Got interrupt, shutting down...")
|
||||||
@ -85,7 +86,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
|||||||
// If a signal is received, the import will stop at the next batch.
|
// If a signal is received, the import will stop at the next batch.
|
||||||
interrupt := make(chan os.Signal, 1)
|
interrupt := make(chan os.Signal, 1)
|
||||||
stop := make(chan struct{})
|
stop := make(chan struct{})
|
||||||
signal.Notify(interrupt, os.Interrupt)
|
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
||||||
defer signal.Stop(interrupt)
|
defer signal.Stop(interrupt)
|
||||||
defer close(interrupt)
|
defer close(interrupt)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -400,7 +400,7 @@ var (
|
|||||||
RPCVirtualHostsFlag = cli.StringFlag{
|
RPCVirtualHostsFlag = cli.StringFlag{
|
||||||
Name: "rpcvhosts",
|
Name: "rpcvhosts",
|
||||||
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
||||||
Value: "localhost",
|
Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
|
||||||
}
|
}
|
||||||
RPCApiFlag = cli.StringFlag{
|
RPCApiFlag = cli.StringFlag{
|
||||||
Name: "rpcapi",
|
Name: "rpcapi",
|
||||||
@ -695,8 +695,9 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
|||||||
if ctx.GlobalIsSet(RPCApiFlag.Name) {
|
if ctx.GlobalIsSet(RPCApiFlag.Name) {
|
||||||
cfg.HTTPModules = splitAndTrim(ctx.GlobalString(RPCApiFlag.Name))
|
cfg.HTTPModules = splitAndTrim(ctx.GlobalString(RPCApiFlag.Name))
|
||||||
}
|
}
|
||||||
|
if ctx.GlobalIsSet(RPCVirtualHostsFlag.Name) {
|
||||||
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(RPCVirtualHostsFlag.Name))
|
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(RPCVirtualHostsFlag.Name))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// setWS creates the WebSocket RPC listener interface string from the set
|
// setWS creates the WebSocket RPC listener interface string from the set
|
||||||
|
@ -22,6 +22,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
|
crand "crypto/rand"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@ -48,6 +49,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const quitCommand = "~Q"
|
const quitCommand = "~Q"
|
||||||
|
const entropySize = 32
|
||||||
|
|
||||||
// singletons
|
// singletons
|
||||||
var (
|
var (
|
||||||
@ -55,6 +57,7 @@ var (
|
|||||||
shh *whisper.Whisper
|
shh *whisper.Whisper
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
mailServer mailserver.WMailServer
|
mailServer mailserver.WMailServer
|
||||||
|
entropy [entropySize]byte
|
||||||
|
|
||||||
input = bufio.NewReader(os.Stdin)
|
input = bufio.NewReader(os.Stdin)
|
||||||
)
|
)
|
||||||
@ -76,14 +79,15 @@ var (
|
|||||||
|
|
||||||
// cmd arguments
|
// cmd arguments
|
||||||
var (
|
var (
|
||||||
bootstrapMode = flag.Bool("standalone", false, "boostrap node: don't actively connect to peers, wait for incoming connections")
|
bootstrapMode = flag.Bool("standalone", false, "boostrap node: don't initiate connection to peers, just wait for incoming connections")
|
||||||
forwarderMode = flag.Bool("forwarder", false, "forwarder mode: only forward messages, neither send nor decrypt messages")
|
forwarderMode = flag.Bool("forwarder", false, "forwarder mode: only forward messages, neither encrypt nor decrypt messages")
|
||||||
mailServerMode = flag.Bool("mailserver", false, "mail server mode: delivers expired messages on demand")
|
mailServerMode = flag.Bool("mailserver", false, "mail server mode: delivers expired messages on demand")
|
||||||
requestMail = flag.Bool("mailclient", false, "request expired messages from the bootstrap server")
|
requestMail = flag.Bool("mailclient", false, "request expired messages from the bootstrap server")
|
||||||
asymmetricMode = flag.Bool("asym", false, "use asymmetric encryption")
|
asymmetricMode = flag.Bool("asym", false, "use asymmetric encryption")
|
||||||
generateKey = flag.Bool("generatekey", false, "generate and show the private key")
|
generateKey = flag.Bool("generatekey", false, "generate and show the private key")
|
||||||
fileExMode = flag.Bool("fileexchange", false, "file exchange mode")
|
fileExMode = flag.Bool("fileexchange", false, "file exchange mode")
|
||||||
testMode = flag.Bool("test", false, "use of predefined parameters for diagnostics")
|
fileReader = flag.Bool("filereader", false, "load and decrypt messages saved as files, display as plain text")
|
||||||
|
testMode = flag.Bool("test", false, "use of predefined parameters for diagnostics (password, etc.)")
|
||||||
echoMode = flag.Bool("echo", false, "echo mode: prints some arguments for diagnostics")
|
echoMode = flag.Bool("echo", false, "echo mode: prints some arguments for diagnostics")
|
||||||
|
|
||||||
argVerbosity = flag.Int("verbosity", int(log.LvlError), "log verbosity level")
|
argVerbosity = flag.Int("verbosity", int(log.LvlError), "log verbosity level")
|
||||||
@ -99,13 +103,14 @@ var (
|
|||||||
argIDFile = flag.String("idfile", "", "file name with node id (private key)")
|
argIDFile = flag.String("idfile", "", "file name with node id (private key)")
|
||||||
argEnode = flag.String("boot", "", "bootstrap node you want to connect to (e.g. enode://e454......08d50@52.176.211.200:16428)")
|
argEnode = flag.String("boot", "", "bootstrap node you want to connect to (e.g. enode://e454......08d50@52.176.211.200:16428)")
|
||||||
argTopic = flag.String("topic", "", "topic in hexadecimal format (e.g. 70a4beef)")
|
argTopic = flag.String("topic", "", "topic in hexadecimal format (e.g. 70a4beef)")
|
||||||
argSaveDir = flag.String("savedir", "", "directory where incoming messages will be saved as files")
|
argSaveDir = flag.String("savedir", "", "directory where all incoming messages will be saved as files")
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
processArgs()
|
processArgs()
|
||||||
initialize()
|
initialize()
|
||||||
run()
|
run()
|
||||||
|
shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func processArgs() {
|
func processArgs() {
|
||||||
@ -205,21 +210,6 @@ func initialize() {
|
|||||||
MinimumAcceptedPOW: *argPoW,
|
MinimumAcceptedPOW: *argPoW,
|
||||||
}
|
}
|
||||||
|
|
||||||
if *mailServerMode {
|
|
||||||
if len(msPassword) == 0 {
|
|
||||||
msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("Failed to read Mail Server password: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
shh = whisper.New(cfg)
|
|
||||||
shh.RegisterServer(&mailServer)
|
|
||||||
mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW)
|
|
||||||
} else {
|
|
||||||
shh = whisper.New(cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
if *argPoW != whisper.DefaultMinimumPoW {
|
if *argPoW != whisper.DefaultMinimumPoW {
|
||||||
err := shh.SetMinimumPoW(*argPoW)
|
err := shh.SetMinimumPoW(*argPoW)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -261,6 +251,26 @@ func initialize() {
|
|||||||
maxPeers = 800
|
maxPeers = 800
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, err = crand.Read(entropy[:])
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("crypto/rand failed: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if *mailServerMode {
|
||||||
|
if len(msPassword) == 0 {
|
||||||
|
msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Failed to read Mail Server password: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
shh = whisper.New(cfg)
|
||||||
|
shh.RegisterServer(&mailServer)
|
||||||
|
mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW)
|
||||||
|
} else {
|
||||||
|
shh = whisper.New(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
server = &p2p.Server{
|
server = &p2p.Server{
|
||||||
Config: p2p.Config{
|
Config: p2p.Config{
|
||||||
PrivateKey: nodeid,
|
PrivateKey: nodeid,
|
||||||
@ -276,10 +286,11 @@ func initialize() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func startServer() {
|
func startServer() error {
|
||||||
err := server.Start()
|
err := server.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to start Whisper peer: %s.", err)
|
fmt.Printf("Failed to start Whisper peer: %s.", err)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("my public key: %s \n", common.ToHex(crypto.FromECDSAPub(&asymKey.PublicKey)))
|
fmt.Printf("my public key: %s \n", common.ToHex(crypto.FromECDSAPub(&asymKey.PublicKey)))
|
||||||
@ -298,6 +309,7 @@ func startServer() {
|
|||||||
if !*forwarderMode {
|
if !*forwarderMode {
|
||||||
fmt.Printf("Please type the message. To quit type: '%s'\n", quitCommand)
|
fmt.Printf("Please type the message. To quit type: '%s'\n", quitCommand)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func isKeyValid(k *ecdsa.PublicKey) bool {
|
func isKeyValid(k *ecdsa.PublicKey) bool {
|
||||||
@ -411,8 +423,10 @@ func waitForConnection(timeout bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func run() {
|
func run() {
|
||||||
defer mailServer.Close()
|
err := startServer()
|
||||||
startServer()
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
defer server.Stop()
|
defer server.Stop()
|
||||||
shh.Start(nil)
|
shh.Start(nil)
|
||||||
defer shh.Stop()
|
defer shh.Stop()
|
||||||
@ -425,21 +439,26 @@ func run() {
|
|||||||
requestExpiredMessagesLoop()
|
requestExpiredMessagesLoop()
|
||||||
} else if *fileExMode {
|
} else if *fileExMode {
|
||||||
sendFilesLoop()
|
sendFilesLoop()
|
||||||
|
} else if *fileReader {
|
||||||
|
fileReaderLoop()
|
||||||
} else {
|
} else {
|
||||||
sendLoop()
|
sendLoop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func shutdown() {
|
||||||
|
close(done)
|
||||||
|
mailServer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func sendLoop() {
|
func sendLoop() {
|
||||||
for {
|
for {
|
||||||
s := scanLine("")
|
s := scanLine("")
|
||||||
if s == quitCommand {
|
if s == quitCommand {
|
||||||
fmt.Println("Quit command received")
|
fmt.Println("Quit command received")
|
||||||
close(done)
|
return
|
||||||
break
|
|
||||||
}
|
}
|
||||||
sendMsg([]byte(s))
|
sendMsg([]byte(s))
|
||||||
|
|
||||||
if *asymmetricMode {
|
if *asymmetricMode {
|
||||||
// print your own message for convenience,
|
// print your own message for convenience,
|
||||||
// because in asymmetric mode it is impossible to decrypt it
|
// because in asymmetric mode it is impossible to decrypt it
|
||||||
@ -455,13 +474,11 @@ func sendFilesLoop() {
|
|||||||
s := scanLine("")
|
s := scanLine("")
|
||||||
if s == quitCommand {
|
if s == quitCommand {
|
||||||
fmt.Println("Quit command received")
|
fmt.Println("Quit command received")
|
||||||
close(done)
|
return
|
||||||
break
|
|
||||||
}
|
}
|
||||||
b, err := ioutil.ReadFile(s)
|
b, err := ioutil.ReadFile(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf(">>> Error: %s \n", err)
|
fmt.Printf(">>> Error: %s \n", err)
|
||||||
continue
|
|
||||||
} else {
|
} else {
|
||||||
h := sendMsg(b)
|
h := sendMsg(b)
|
||||||
if (h == common.Hash{}) {
|
if (h == common.Hash{}) {
|
||||||
@ -475,6 +492,38 @@ func sendFilesLoop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fileReaderLoop() {
|
||||||
|
watcher1 := shh.GetFilter(symFilterID)
|
||||||
|
watcher2 := shh.GetFilter(asymFilterID)
|
||||||
|
if watcher1 == nil && watcher2 == nil {
|
||||||
|
fmt.Println("Error: neither symmetric nor asymmetric filter is installed")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
s := scanLine("")
|
||||||
|
if s == quitCommand {
|
||||||
|
fmt.Println("Quit command received")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
raw, err := ioutil.ReadFile(s)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(">>> Error: %s \n", err)
|
||||||
|
} else {
|
||||||
|
env := whisper.Envelope{Data: raw} // the topic is zero
|
||||||
|
msg := env.Open(watcher1) // force-open envelope regardless of the topic
|
||||||
|
if msg == nil {
|
||||||
|
msg = env.Open(watcher2)
|
||||||
|
}
|
||||||
|
if msg == nil {
|
||||||
|
fmt.Printf(">>> Error: failed to decrypt the message \n")
|
||||||
|
} else {
|
||||||
|
printMessageInfo(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func scanLine(prompt string) string {
|
func scanLine(prompt string) string {
|
||||||
if len(prompt) > 0 {
|
if len(prompt) > 0 {
|
||||||
fmt.Print(prompt)
|
fmt.Print(prompt)
|
||||||
@ -548,20 +597,18 @@ func messageLoop() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
messages := sf.Retrieve()
|
m1 := sf.Retrieve()
|
||||||
|
m2 := af.Retrieve()
|
||||||
|
messages := append(m1, m2...)
|
||||||
for _, msg := range messages {
|
for _, msg := range messages {
|
||||||
if *fileExMode || len(msg.Payload) > 2048 {
|
// All messages are saved upon specifying argSaveDir.
|
||||||
|
// fileExMode only specifies how messages are displayed on the console after they are saved.
|
||||||
|
// if fileExMode == true, only the hashes are displayed, since messages might be too big.
|
||||||
|
if len(*argSaveDir) > 0 {
|
||||||
writeMessageToFile(*argSaveDir, msg)
|
writeMessageToFile(*argSaveDir, msg)
|
||||||
} else {
|
|
||||||
printMessageInfo(msg)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
messages = af.Retrieve()
|
if !*fileExMode && len(msg.Payload) <= 2048 {
|
||||||
for _, msg := range messages {
|
|
||||||
if *fileExMode || len(msg.Payload) > 2048 {
|
|
||||||
writeMessageToFile(*argSaveDir, msg)
|
|
||||||
} else {
|
|
||||||
printMessageInfo(msg)
|
printMessageInfo(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -596,27 +643,30 @@ func writeMessageToFile(dir string, msg *whisper.ReceivedMessage) {
|
|||||||
address = crypto.PubkeyToAddress(*msg.Src)
|
address = crypto.PubkeyToAddress(*msg.Src)
|
||||||
}
|
}
|
||||||
|
|
||||||
if whisper.IsPubKeyEqual(msg.Src, &asymKey.PublicKey) {
|
// this is a sample code; uncomment if you don't want to save your own messages.
|
||||||
// message from myself: don't save, only report
|
//if whisper.IsPubKeyEqual(msg.Src, &asymKey.PublicKey) {
|
||||||
fmt.Printf("\n%s <%x>: message received: '%s'\n", timestamp, address, name)
|
// fmt.Printf("\n%s <%x>: message from myself received, not saved: '%s'\n", timestamp, address, name)
|
||||||
} else if len(dir) > 0 {
|
// return
|
||||||
|
//}
|
||||||
|
|
||||||
|
if len(dir) > 0 {
|
||||||
fullpath := filepath.Join(dir, name)
|
fullpath := filepath.Join(dir, name)
|
||||||
err := ioutil.WriteFile(fullpath, msg.Payload, 0644)
|
err := ioutil.WriteFile(fullpath, msg.Raw, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("\n%s {%x}: message received but not saved: %s\n", timestamp, address, err)
|
fmt.Printf("\n%s {%x}: message received but not saved: %s\n", timestamp, address, err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("\n%s {%x}: message received and saved as '%s' (%d bytes)\n", timestamp, address, name, len(msg.Payload))
|
fmt.Printf("\n%s {%x}: message received and saved as '%s' (%d bytes)\n", timestamp, address, name, len(msg.Raw))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("\n%s {%x}: big message received (%d bytes), but not saved: %s\n", timestamp, address, len(msg.Payload), name)
|
fmt.Printf("\n%s {%x}: message received (%d bytes), but not saved: %s\n", timestamp, address, len(msg.Raw), name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func requestExpiredMessagesLoop() {
|
func requestExpiredMessagesLoop() {
|
||||||
var key, peerID []byte
|
var key, peerID, bloom []byte
|
||||||
var timeLow, timeUpp uint32
|
var timeLow, timeUpp uint32
|
||||||
var t string
|
var t string
|
||||||
var xt, empty whisper.TopicType
|
var xt whisper.TopicType
|
||||||
|
|
||||||
keyID, err := shh.AddSymKeyFromPassword(msPassword)
|
keyID, err := shh.AddSymKeyFromPassword(msPassword)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -639,18 +689,19 @@ func requestExpiredMessagesLoop() {
|
|||||||
utils.Fatalf("Failed to parse the topic: %s", err)
|
utils.Fatalf("Failed to parse the topic: %s", err)
|
||||||
}
|
}
|
||||||
xt = whisper.BytesToTopic(x)
|
xt = whisper.BytesToTopic(x)
|
||||||
|
bloom = whisper.TopicToBloom(xt)
|
||||||
|
obfuscateBloom(bloom)
|
||||||
|
} else {
|
||||||
|
bloom = whisper.MakeFullNodeBloom()
|
||||||
}
|
}
|
||||||
if timeUpp == 0 {
|
if timeUpp == 0 {
|
||||||
timeUpp = 0xFFFFFFFF
|
timeUpp = 0xFFFFFFFF
|
||||||
}
|
}
|
||||||
|
|
||||||
data := make([]byte, 8+whisper.TopicLength)
|
data := make([]byte, 8, 8+whisper.BloomFilterSize)
|
||||||
binary.BigEndian.PutUint32(data, timeLow)
|
binary.BigEndian.PutUint32(data, timeLow)
|
||||||
binary.BigEndian.PutUint32(data[4:], timeUpp)
|
binary.BigEndian.PutUint32(data[4:], timeUpp)
|
||||||
copy(data[8:], xt[:])
|
data = append(data, bloom...)
|
||||||
if xt == empty {
|
|
||||||
data = data[:8]
|
|
||||||
}
|
|
||||||
|
|
||||||
var params whisper.MessageParams
|
var params whisper.MessageParams
|
||||||
params.PoW = *argServerPoW
|
params.PoW = *argServerPoW
|
||||||
@ -684,3 +735,20 @@ func extractIDFromEnode(s string) []byte {
|
|||||||
}
|
}
|
||||||
return n.ID[:]
|
return n.ID[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// obfuscateBloom adds 16 random bits to the the bloom
|
||||||
|
// filter, in order to obfuscate the containing topics.
|
||||||
|
// it does so deterministically within every session.
|
||||||
|
// despite additional bits, it will match on average
|
||||||
|
// 32000 times less messages than full node's bloom filter.
|
||||||
|
func obfuscateBloom(bloom []byte) {
|
||||||
|
const half = entropySize / 2
|
||||||
|
for i := 0; i < half; i++ {
|
||||||
|
x := int(entropy[i])
|
||||||
|
if entropy[half+i] < 128 {
|
||||||
|
x += 256
|
||||||
|
}
|
||||||
|
|
||||||
|
bloom[x/8] = 1 << uint(x%8) // set the bit number X
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -25,6 +25,6 @@ var (
|
|||||||
Big3 = big.NewInt(3)
|
Big3 = big.NewInt(3)
|
||||||
Big0 = big.NewInt(0)
|
Big0 = big.NewInt(0)
|
||||||
Big32 = big.NewInt(32)
|
Big32 = big.NewInt(32)
|
||||||
Big256 = big.NewInt(0xff)
|
Big256 = big.NewInt(256)
|
||||||
Big257 = big.NewInt(257)
|
Big257 = big.NewInt(257)
|
||||||
)
|
)
|
||||||
|
@ -19,6 +19,7 @@ package ethash
|
|||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"hash"
|
"hash"
|
||||||
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
@ -47,6 +48,48 @@ const (
|
|||||||
loopAccesses = 64 // Number of accesses in hashimoto loop
|
loopAccesses = 64 // Number of accesses in hashimoto loop
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// cacheSize returns the size of the ethash verification cache that belongs to a certain
|
||||||
|
// block number.
|
||||||
|
func cacheSize(block uint64) uint64 {
|
||||||
|
epoch := int(block / epochLength)
|
||||||
|
if epoch < maxEpoch {
|
||||||
|
return cacheSizes[epoch]
|
||||||
|
}
|
||||||
|
return calcCacheSize(epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
|
||||||
|
// however, we always take the highest prime below the linearly growing threshold in order
|
||||||
|
// to reduce the risk of accidental regularities leading to cyclic behavior.
|
||||||
|
func calcCacheSize(epoch int) uint64 {
|
||||||
|
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
|
||||||
|
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
||||||
|
size -= 2 * hashBytes
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
// datasetSize returns the size of the ethash mining dataset that belongs to a certain
|
||||||
|
// block number.
|
||||||
|
func datasetSize(block uint64) uint64 {
|
||||||
|
epoch := int(block / epochLength)
|
||||||
|
if epoch < maxEpoch {
|
||||||
|
return datasetSizes[epoch]
|
||||||
|
}
|
||||||
|
return calcDatasetSize(epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
|
||||||
|
// however, we always take the highest prime below the linearly growing threshold in order
|
||||||
|
// to reduce the risk of accidental regularities leading to cyclic behavior.
|
||||||
|
func calcDatasetSize(epoch int) uint64 {
|
||||||
|
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
|
||||||
|
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
||||||
|
size -= 2 * mixBytes
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
// hasher is a repetitive hasher allowing the same hash data structures to be
|
// hasher is a repetitive hasher allowing the same hash data structures to be
|
||||||
// reused between hash runs instead of requiring new ones to be created.
|
// reused between hash runs instead of requiring new ones to be created.
|
||||||
type hasher func(dest []byte, data []byte)
|
type hasher func(dest []byte, data []byte)
|
||||||
|
@ -1,47 +0,0 @@
|
|||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package ethash
|
|
||||||
|
|
||||||
// cacheSize calculates and returns the size of the ethash verification cache that
|
|
||||||
// belongs to a certain block number. The cache size grows linearly, however, we
|
|
||||||
// always take the highest prime below the linearly growing threshold in order to
|
|
||||||
// reduce the risk of accidental regularities leading to cyclic behavior.
|
|
||||||
func cacheSize(block uint64) uint64 {
|
|
||||||
// If we have a pre-generated value, use that
|
|
||||||
epoch := int(block / epochLength)
|
|
||||||
if epoch < maxEpoch {
|
|
||||||
return cacheSizes[epoch]
|
|
||||||
}
|
|
||||||
// We don't have a way to verify primes fast before Go 1.8
|
|
||||||
panic("fast prime testing unsupported in Go < 1.8")
|
|
||||||
}
|
|
||||||
|
|
||||||
// datasetSize calculates and returns the size of the ethash mining dataset that
|
|
||||||
// belongs to a certain block number. The dataset size grows linearly, however, we
|
|
||||||
// always take the highest prime below the linearly growing threshold in order to
|
|
||||||
// reduce the risk of accidental regularities leading to cyclic behavior.
|
|
||||||
func datasetSize(block uint64) uint64 {
|
|
||||||
// If we have a pre-generated value, use that
|
|
||||||
epoch := int(block / epochLength)
|
|
||||||
if epoch < maxEpoch {
|
|
||||||
return datasetSizes[epoch]
|
|
||||||
}
|
|
||||||
// We don't have a way to verify primes fast before Go 1.8
|
|
||||||
panic("fast prime testing unsupported in Go < 1.8")
|
|
||||||
}
|
|
@ -1,63 +0,0 @@
|
|||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package ethash
|
|
||||||
|
|
||||||
import "math/big"
|
|
||||||
|
|
||||||
// cacheSize returns the size of the ethash verification cache that belongs to a certain
|
|
||||||
// block number.
|
|
||||||
func cacheSize(block uint64) uint64 {
|
|
||||||
epoch := int(block / epochLength)
|
|
||||||
if epoch < maxEpoch {
|
|
||||||
return cacheSizes[epoch]
|
|
||||||
}
|
|
||||||
return calcCacheSize(epoch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
|
|
||||||
// however, we always take the highest prime below the linearly growing threshold in order
|
|
||||||
// to reduce the risk of accidental regularities leading to cyclic behavior.
|
|
||||||
func calcCacheSize(epoch int) uint64 {
|
|
||||||
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
|
|
||||||
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
|
||||||
size -= 2 * hashBytes
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
// datasetSize returns the size of the ethash mining dataset that belongs to a certain
|
|
||||||
// block number.
|
|
||||||
func datasetSize(block uint64) uint64 {
|
|
||||||
epoch := int(block / epochLength)
|
|
||||||
if epoch < maxEpoch {
|
|
||||||
return datasetSizes[epoch]
|
|
||||||
}
|
|
||||||
return calcDatasetSize(epoch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
|
|
||||||
// however, we always take the highest prime below the linearly growing threshold in order
|
|
||||||
// to reduce the risk of accidental regularities leading to cyclic behavior.
|
|
||||||
func calcDatasetSize(epoch int) uint64 {
|
|
||||||
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
|
|
||||||
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
|
||||||
size -= 2 * mixBytes
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
@ -1,37 +0,0 @@
|
|||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package ethash
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
// Tests whether the dataset size calculator works correctly by cross checking the
|
|
||||||
// hard coded lookup table with the value generated by it.
|
|
||||||
func TestSizeCalculations(t *testing.T) {
|
|
||||||
// Verify all the cache and dataset sizes from the lookup table.
|
|
||||||
for epoch, want := range cacheSizes {
|
|
||||||
if size := calcCacheSize(epoch); size != want {
|
|
||||||
t.Errorf("cache %d: cache size mismatch: have %d, want %d", epoch, size, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for epoch, want := range datasetSizes {
|
|
||||||
if size := calcDatasetSize(epoch); size != want {
|
|
||||||
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", epoch, size, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -30,6 +30,22 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Tests whether the dataset size calculator works correctly by cross checking the
|
||||||
|
// hard coded lookup table with the value generated by it.
|
||||||
|
func TestSizeCalculations(t *testing.T) {
|
||||||
|
// Verify all the cache and dataset sizes from the lookup table.
|
||||||
|
for epoch, want := range cacheSizes {
|
||||||
|
if size := calcCacheSize(epoch); size != want {
|
||||||
|
t.Errorf("cache %d: cache size mismatch: have %d, want %d", epoch, size, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for epoch, want := range datasetSizes {
|
||||||
|
if size := calcDatasetSize(epoch); size != want {
|
||||||
|
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", epoch, size, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests that verification caches can be correctly generated.
|
// Tests that verification caches can be correctly generated.
|
||||||
func TestCacheGeneration(t *testing.T) {
|
func TestCacheGeneration(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
@ -53,7 +53,6 @@ var (
|
|||||||
errDuplicateUncle = errors.New("duplicate uncle")
|
errDuplicateUncle = errors.New("duplicate uncle")
|
||||||
errUncleIsAncestor = errors.New("uncle is ancestor")
|
errUncleIsAncestor = errors.New("uncle is ancestor")
|
||||||
errDanglingUncle = errors.New("uncle's parent is not ancestor")
|
errDanglingUncle = errors.New("uncle's parent is not ancestor")
|
||||||
errNonceOutOfRange = errors.New("nonce out of range")
|
|
||||||
errInvalidDifficulty = errors.New("non-positive difficulty")
|
errInvalidDifficulty = errors.New("non-positive difficulty")
|
||||||
errInvalidMixDigest = errors.New("invalid mix digest")
|
errInvalidMixDigest = errors.New("invalid mix digest")
|
||||||
errInvalidPoW = errors.New("invalid proof-of-work")
|
errInvalidPoW = errors.New("invalid proof-of-work")
|
||||||
@ -356,7 +355,7 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
|
|||||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||||
x.Set(params.MinimumDifficulty)
|
x.Set(params.MinimumDifficulty)
|
||||||
}
|
}
|
||||||
// calculate a fake block numer for the ice-age delay:
|
// calculate a fake block number for the ice-age delay:
|
||||||
// https://github.com/ethereum/EIPs/pull/669
|
// https://github.com/ethereum/EIPs/pull/669
|
||||||
// fake_block_number = min(0, block.number - 3_000_000
|
// fake_block_number = min(0, block.number - 3_000_000
|
||||||
fakeBlockNumber := new(big.Int)
|
fakeBlockNumber := new(big.Int)
|
||||||
@ -474,18 +473,13 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
|||||||
if ethash.shared != nil {
|
if ethash.shared != nil {
|
||||||
return ethash.shared.VerifySeal(chain, header)
|
return ethash.shared.VerifySeal(chain, header)
|
||||||
}
|
}
|
||||||
// Sanity check that the block number is below the lookup table size (60M blocks)
|
|
||||||
number := header.Number.Uint64()
|
|
||||||
if number/epochLength >= maxEpoch {
|
|
||||||
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
|
|
||||||
return errNonceOutOfRange
|
|
||||||
}
|
|
||||||
// Ensure that we have a valid difficulty for the block
|
// Ensure that we have a valid difficulty for the block
|
||||||
if header.Difficulty.Sign() <= 0 {
|
if header.Difficulty.Sign() <= 0 {
|
||||||
return errInvalidDifficulty
|
return errInvalidDifficulty
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recompute the digest and PoW value and verify against the header
|
// Recompute the digest and PoW value and verify against the header
|
||||||
|
number := header.Number.Uint64()
|
||||||
|
|
||||||
cache := ethash.cache(number)
|
cache := ethash.cache(number)
|
||||||
size := datasetSize(number)
|
size := datasetSize(number)
|
||||||
if ethash.config.PowMode == ModeTest {
|
if ethash.config.PowMode == ModeTest {
|
||||||
|
@ -35,9 +35,9 @@ import (
|
|||||||
mmap "github.com/edsrzf/mmap-go"
|
mmap "github.com/edsrzf/mmap-go"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
"github.com/hashicorp/golang-lru/simplelru"
|
||||||
metrics "github.com/rcrowley/go-metrics"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||||
@ -332,7 +333,7 @@ func (c *Console) Interactive() {
|
|||||||
}()
|
}()
|
||||||
// Monitor Ctrl-C too in case the input is empty and we need to bail
|
// Monitor Ctrl-C too in case the input is empty and we need to bail
|
||||||
abort := make(chan os.Signal, 1)
|
abort := make(chan os.Signal, 1)
|
||||||
signal.Notify(abort, os.Interrupt)
|
signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
// Start sending prompts to the user and reading back inputs
|
// Start sending prompts to the user and reading back inputs
|
||||||
for {
|
for {
|
||||||
|
@ -281,8 +281,8 @@ func TestDeposit(t *testing.T) {
|
|||||||
t.Fatalf("expected balance %v, got %v", exp, chbook.Balance())
|
t.Fatalf("expected balance %v, got %v", exp, chbook.Balance())
|
||||||
}
|
}
|
||||||
|
|
||||||
// autodeposit every 30ms if new cheque issued
|
// autodeposit every 200ms if new cheque issued
|
||||||
interval := 30 * time.Millisecond
|
interval := 200 * time.Millisecond
|
||||||
chbook.AutoDeposit(interval, common.Big1, balance)
|
chbook.AutoDeposit(interval, common.Big1, balance)
|
||||||
_, err = chbook.Issue(addr1, amount)
|
_, err = chbook.Issue(addr1, amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -206,7 +206,7 @@ func lexLine(l *lexer) stateFn {
|
|||||||
return lexComment
|
return lexComment
|
||||||
case isSpace(r):
|
case isSpace(r):
|
||||||
l.ignore()
|
l.ignore()
|
||||||
case isAlphaNumeric(r) || r == '_':
|
case isLetter(r) || r == '_':
|
||||||
return lexElement
|
return lexElement
|
||||||
case isNumber(r):
|
case isNumber(r):
|
||||||
return lexNumber
|
return lexNumber
|
||||||
@ -278,7 +278,7 @@ func lexElement(l *lexer) stateFn {
|
|||||||
return lexLine
|
return lexLine
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAlphaNumeric(t rune) bool {
|
func isLetter(t rune) bool {
|
||||||
return unicode.IsLetter(t)
|
return unicode.IsLetter(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
blockInsertTimer = metrics.NewTimer("chain/inserts")
|
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
|
||||||
|
|
||||||
ErrNoGenesis = errors.New("Genesis not found in chain")
|
ErrNoGenesis = errors.New("Genesis not found in chain")
|
||||||
)
|
)
|
||||||
@ -107,8 +107,8 @@ type BlockChain struct {
|
|||||||
procmu sync.RWMutex // block processor lock
|
procmu sync.RWMutex // block processor lock
|
||||||
|
|
||||||
checkpoint int // checkpoint counts towards the new checkpoint
|
checkpoint int // checkpoint counts towards the new checkpoint
|
||||||
currentBlock *types.Block // Current head of the block chain
|
currentBlock atomic.Value // Current head of the block chain
|
||||||
currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!)
|
currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
|
||||||
|
|
||||||
stateCache state.Database // State database to reuse between imports (contains state cache)
|
stateCache state.Database // State database to reuse between imports (contains state cache)
|
||||||
bodyCache *lru.Cache // Cache for the most recent block bodies
|
bodyCache *lru.Cache // Cache for the most recent block bodies
|
||||||
@ -224,10 +224,10 @@ func (bc *BlockChain) loadLastState() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Everything seems to be fine, set as the head block
|
// Everything seems to be fine, set as the head block
|
||||||
bc.currentBlock = currentBlock
|
bc.currentBlock.Store(currentBlock)
|
||||||
|
|
||||||
// Restore the last known head header
|
// Restore the last known head header
|
||||||
currentHeader := bc.currentBlock.Header()
|
currentHeader := currentBlock.Header()
|
||||||
if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) {
|
if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) {
|
||||||
if header := bc.GetHeaderByHash(head); header != nil {
|
if header := bc.GetHeaderByHash(head); header != nil {
|
||||||
currentHeader = header
|
currentHeader = header
|
||||||
@ -236,21 +236,23 @@ func (bc *BlockChain) loadLastState() error {
|
|||||||
bc.hc.SetCurrentHeader(currentHeader)
|
bc.hc.SetCurrentHeader(currentHeader)
|
||||||
|
|
||||||
// Restore the last known head fast block
|
// Restore the last known head fast block
|
||||||
bc.currentFastBlock = bc.currentBlock
|
bc.currentFastBlock.Store(currentBlock)
|
||||||
if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
|
if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
|
||||||
if block := bc.GetBlockByHash(head); block != nil {
|
if block := bc.GetBlockByHash(head); block != nil {
|
||||||
bc.currentFastBlock = block
|
bc.currentFastBlock.Store(block)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Issue a status log for the user
|
// Issue a status log for the user
|
||||||
|
currentFastBlock := bc.CurrentFastBlock()
|
||||||
|
|
||||||
headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
|
headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
|
||||||
blockTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
|
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
|
||||||
fastTd := bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64())
|
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
|
||||||
|
|
||||||
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
|
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
|
||||||
log.Info("Loaded most recent local full block", "number", bc.currentBlock.Number(), "hash", bc.currentBlock.Hash(), "td", blockTd)
|
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
|
||||||
log.Info("Loaded most recent local fast block", "number", bc.currentFastBlock.Number(), "hash", bc.currentFastBlock.Hash(), "td", fastTd)
|
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -279,30 +281,32 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
|||||||
bc.futureBlocks.Purge()
|
bc.futureBlocks.Purge()
|
||||||
|
|
||||||
// Rewind the block chain, ensuring we don't end up with a stateless head block
|
// Rewind the block chain, ensuring we don't end up with a stateless head block
|
||||||
if bc.currentBlock != nil && currentHeader.Number.Uint64() < bc.currentBlock.NumberU64() {
|
if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
|
||||||
bc.currentBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())
|
bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
|
||||||
}
|
}
|
||||||
if bc.currentBlock != nil {
|
if currentBlock := bc.CurrentBlock(); currentBlock != nil {
|
||||||
if _, err := state.New(bc.currentBlock.Root(), bc.stateCache); err != nil {
|
if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
|
||||||
// Rewound state missing, rolled back to before pivot, reset to genesis
|
// Rewound state missing, rolled back to before pivot, reset to genesis
|
||||||
bc.currentBlock = nil
|
bc.currentBlock.Store(bc.genesisBlock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Rewind the fast block in a simpleton way to the target head
|
// Rewind the fast block in a simpleton way to the target head
|
||||||
if bc.currentFastBlock != nil && currentHeader.Number.Uint64() < bc.currentFastBlock.NumberU64() {
|
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
|
||||||
bc.currentFastBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())
|
bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
|
||||||
}
|
}
|
||||||
// If either blocks reached nil, reset to the genesis state
|
// If either blocks reached nil, reset to the genesis state
|
||||||
if bc.currentBlock == nil {
|
if currentBlock := bc.CurrentBlock(); currentBlock == nil {
|
||||||
bc.currentBlock = bc.genesisBlock
|
bc.currentBlock.Store(bc.genesisBlock)
|
||||||
}
|
}
|
||||||
if bc.currentFastBlock == nil {
|
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
|
||||||
bc.currentFastBlock = bc.genesisBlock
|
bc.currentFastBlock.Store(bc.genesisBlock)
|
||||||
}
|
}
|
||||||
if err := WriteHeadBlockHash(bc.db, bc.currentBlock.Hash()); err != nil {
|
currentBlock := bc.CurrentBlock()
|
||||||
|
currentFastBlock := bc.CurrentFastBlock()
|
||||||
|
if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil {
|
||||||
log.Crit("Failed to reset head full block", "err", err)
|
log.Crit("Failed to reset head full block", "err", err)
|
||||||
}
|
}
|
||||||
if err := WriteHeadFastBlockHash(bc.db, bc.currentFastBlock.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil {
|
||||||
log.Crit("Failed to reset head fast block", "err", err)
|
log.Crit("Failed to reset head fast block", "err", err)
|
||||||
}
|
}
|
||||||
return bc.loadLastState()
|
return bc.loadLastState()
|
||||||
@ -321,7 +325,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
|
|||||||
}
|
}
|
||||||
// If all checks out, manually set the head block
|
// If all checks out, manually set the head block
|
||||||
bc.mu.Lock()
|
bc.mu.Lock()
|
||||||
bc.currentBlock = block
|
bc.currentBlock.Store(block)
|
||||||
bc.mu.Unlock()
|
bc.mu.Unlock()
|
||||||
|
|
||||||
log.Info("Committed new head block", "number", block.Number(), "hash", hash)
|
log.Info("Committed new head block", "number", block.Number(), "hash", hash)
|
||||||
@ -330,28 +334,19 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
|
|||||||
|
|
||||||
// GasLimit returns the gas limit of the current HEAD block.
|
// GasLimit returns the gas limit of the current HEAD block.
|
||||||
func (bc *BlockChain) GasLimit() uint64 {
|
func (bc *BlockChain) GasLimit() uint64 {
|
||||||
bc.mu.RLock()
|
return bc.CurrentBlock().GasLimit()
|
||||||
defer bc.mu.RUnlock()
|
|
||||||
|
|
||||||
return bc.currentBlock.GasLimit()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CurrentBlock retrieves the current head block of the canonical chain. The
|
// CurrentBlock retrieves the current head block of the canonical chain. The
|
||||||
// block is retrieved from the blockchain's internal cache.
|
// block is retrieved from the blockchain's internal cache.
|
||||||
func (bc *BlockChain) CurrentBlock() *types.Block {
|
func (bc *BlockChain) CurrentBlock() *types.Block {
|
||||||
bc.mu.RLock()
|
return bc.currentBlock.Load().(*types.Block)
|
||||||
defer bc.mu.RUnlock()
|
|
||||||
|
|
||||||
return bc.currentBlock
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
|
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
|
||||||
// chain. The block is retrieved from the blockchain's internal cache.
|
// chain. The block is retrieved from the blockchain's internal cache.
|
||||||
func (bc *BlockChain) CurrentFastBlock() *types.Block {
|
func (bc *BlockChain) CurrentFastBlock() *types.Block {
|
||||||
bc.mu.RLock()
|
return bc.currentFastBlock.Load().(*types.Block)
|
||||||
defer bc.mu.RUnlock()
|
|
||||||
|
|
||||||
return bc.currentFastBlock
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetProcessor sets the processor required for making state modifications.
|
// SetProcessor sets the processor required for making state modifications.
|
||||||
@ -416,10 +411,10 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
|
|||||||
}
|
}
|
||||||
bc.genesisBlock = genesis
|
bc.genesisBlock = genesis
|
||||||
bc.insert(bc.genesisBlock)
|
bc.insert(bc.genesisBlock)
|
||||||
bc.currentBlock = bc.genesisBlock
|
bc.currentBlock.Store(bc.genesisBlock)
|
||||||
bc.hc.SetGenesis(bc.genesisBlock.Header())
|
bc.hc.SetGenesis(bc.genesisBlock.Header())
|
||||||
bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
|
bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
|
||||||
bc.currentFastBlock = bc.genesisBlock
|
bc.currentFastBlock.Store(bc.genesisBlock)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -444,7 +439,7 @@ func (bc *BlockChain) repair(head **types.Block) error {
|
|||||||
|
|
||||||
// Export writes the active chain to the given writer.
|
// Export writes the active chain to the given writer.
|
||||||
func (bc *BlockChain) Export(w io.Writer) error {
|
func (bc *BlockChain) Export(w io.Writer) error {
|
||||||
return bc.ExportN(w, uint64(0), bc.currentBlock.NumberU64())
|
return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExportN writes a subset of the active chain to the given writer.
|
// ExportN writes a subset of the active chain to the given writer.
|
||||||
@ -488,7 +483,7 @@ func (bc *BlockChain) insert(block *types.Block) {
|
|||||||
if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil {
|
if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil {
|
||||||
log.Crit("Failed to insert head block hash", "err", err)
|
log.Crit("Failed to insert head block hash", "err", err)
|
||||||
}
|
}
|
||||||
bc.currentBlock = block
|
bc.currentBlock.Store(block)
|
||||||
|
|
||||||
// If the block is better than our head or is on a different chain, force update heads
|
// If the block is better than our head or is on a different chain, force update heads
|
||||||
if updateHeads {
|
if updateHeads {
|
||||||
@ -497,7 +492,7 @@ func (bc *BlockChain) insert(block *types.Block) {
|
|||||||
if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil {
|
||||||
log.Crit("Failed to insert head fast block hash", "err", err)
|
log.Crit("Failed to insert head fast block hash", "err", err)
|
||||||
}
|
}
|
||||||
bc.currentFastBlock = block
|
bc.currentFastBlock.Store(block)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -648,22 +643,21 @@ func (bc *BlockChain) Stop() {
|
|||||||
bc.wg.Wait()
|
bc.wg.Wait()
|
||||||
|
|
||||||
// Ensure the state of a recent block is also stored to disk before exiting.
|
// Ensure the state of a recent block is also stored to disk before exiting.
|
||||||
// It is fine if this state does not exist (fast start/stop cycle), but it is
|
// We're writing three different states to catch different restart scenarios:
|
||||||
// advisable to leave an N block gap from the head so 1) a restart loads up
|
// - HEAD: So we don't need to reprocess any blocks in the general case
|
||||||
// the last N blocks as sync assistance to remote nodes; 2) a restart during
|
// - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
|
||||||
// a (small) reorg doesn't require deep reprocesses; 3) chain "repair" from
|
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
|
||||||
// missing states are constantly tested.
|
|
||||||
//
|
|
||||||
// This may be tuned a bit on mainnet if its too annoying to reprocess the last
|
|
||||||
// N blocks.
|
|
||||||
if !bc.cacheConfig.Disabled {
|
if !bc.cacheConfig.Disabled {
|
||||||
triedb := bc.stateCache.TrieDB()
|
triedb := bc.stateCache.TrieDB()
|
||||||
if number := bc.CurrentBlock().NumberU64(); number >= triesInMemory {
|
|
||||||
recent := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - triesInMemory + 1)
|
|
||||||
|
|
||||||
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
|
for _, offset := range []uint64{0, 1, triesInMemory - 1} {
|
||||||
if err := triedb.Commit(recent.Root(), true); err != nil {
|
if number := bc.CurrentBlock().NumberU64(); number > offset {
|
||||||
log.Error("Failed to commit recent state trie", "err", err)
|
recent := bc.GetBlockByNumber(number - offset)
|
||||||
|
|
||||||
|
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
|
||||||
|
if err := triedb.Commit(recent.Root(), true); err != nil {
|
||||||
|
log.Error("Failed to commit recent state trie", "err", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for !bc.triegc.Empty() {
|
for !bc.triegc.Empty() {
|
||||||
@ -715,13 +709,15 @@ func (bc *BlockChain) Rollback(chain []common.Hash) {
|
|||||||
if currentHeader.Hash() == hash {
|
if currentHeader.Hash() == hash {
|
||||||
bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
|
bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
|
||||||
}
|
}
|
||||||
if bc.currentFastBlock.Hash() == hash {
|
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
|
||||||
bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash(), bc.currentFastBlock.NumberU64()-1)
|
newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
|
||||||
WriteHeadFastBlockHash(bc.db, bc.currentFastBlock.Hash())
|
bc.currentFastBlock.Store(newFastBlock)
|
||||||
|
WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
|
||||||
}
|
}
|
||||||
if bc.currentBlock.Hash() == hash {
|
if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
|
||||||
bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash(), bc.currentBlock.NumberU64()-1)
|
newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
|
||||||
WriteHeadBlockHash(bc.db, bc.currentBlock.Hash())
|
bc.currentBlock.Store(newBlock)
|
||||||
|
WriteHeadBlockHash(bc.db, newBlock.Hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -830,11 +826,12 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
bc.mu.Lock()
|
bc.mu.Lock()
|
||||||
head := blockChain[len(blockChain)-1]
|
head := blockChain[len(blockChain)-1]
|
||||||
if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
|
if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
|
||||||
if bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64()).Cmp(td) < 0 {
|
currentFastBlock := bc.CurrentFastBlock()
|
||||||
|
if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
|
||||||
if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil {
|
||||||
log.Crit("Failed to update head fast block hash", "err", err)
|
log.Crit("Failed to update head fast block hash", "err", err)
|
||||||
}
|
}
|
||||||
bc.currentFastBlock = head
|
bc.currentFastBlock.Store(head)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bc.mu.Unlock()
|
bc.mu.Unlock()
|
||||||
@ -881,7 +878,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
|||||||
bc.mu.Lock()
|
bc.mu.Lock()
|
||||||
defer bc.mu.Unlock()
|
defer bc.mu.Unlock()
|
||||||
|
|
||||||
localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
|
currentBlock := bc.CurrentBlock()
|
||||||
|
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
|
||||||
externTd := new(big.Int).Add(block.Difficulty(), ptd)
|
externTd := new(big.Int).Add(block.Difficulty(), ptd)
|
||||||
|
|
||||||
// Irrelevant of the canonical status, write the block itself to the database
|
// Irrelevant of the canonical status, write the block itself to the database
|
||||||
@ -956,14 +954,15 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
|||||||
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
||||||
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
||||||
reorg := externTd.Cmp(localTd) > 0
|
reorg := externTd.Cmp(localTd) > 0
|
||||||
|
currentBlock = bc.CurrentBlock()
|
||||||
if !reorg && externTd.Cmp(localTd) == 0 {
|
if !reorg && externTd.Cmp(localTd) == 0 {
|
||||||
// Split same-difficulty blocks by number, then at random
|
// Split same-difficulty blocks by number, then at random
|
||||||
reorg = block.NumberU64() < bc.currentBlock.NumberU64() || (block.NumberU64() == bc.currentBlock.NumberU64() && mrand.Float64() < 0.5)
|
reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
|
||||||
}
|
}
|
||||||
if reorg {
|
if reorg {
|
||||||
// Reorganise the chain if the parent is not the head block
|
// Reorganise the chain if the parent is not the head block
|
||||||
if block.ParentHash() != bc.currentBlock.Hash() {
|
if block.ParentHash() != currentBlock.Hash() {
|
||||||
if err := bc.reorg(bc.currentBlock, block); err != nil {
|
if err := bc.reorg(currentBlock, block); err != nil {
|
||||||
return NonStatTy, err
|
return NonStatTy, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1092,7 +1091,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
|||||||
case err == consensus.ErrPrunedAncestor:
|
case err == consensus.ErrPrunedAncestor:
|
||||||
// Block competing with the canonical chain, store in the db, but don't process
|
// Block competing with the canonical chain, store in the db, but don't process
|
||||||
// until the competitor TD goes above the canonical TD
|
// until the competitor TD goes above the canonical TD
|
||||||
localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
|
currentBlock := bc.CurrentBlock()
|
||||||
|
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
|
||||||
externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
|
externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
|
||||||
if localTd.Cmp(externTd) > 0 {
|
if localTd.Cmp(externTd) > 0 {
|
||||||
if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
|
if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
|
||||||
@ -1481,9 +1481,6 @@ func (bc *BlockChain) writeHeader(header *types.Header) error {
|
|||||||
// CurrentHeader retrieves the current head header of the canonical chain. The
|
// CurrentHeader retrieves the current head header of the canonical chain. The
|
||||||
// header is retrieved from the HeaderChain's internal cache.
|
// header is retrieved from the HeaderChain's internal cache.
|
||||||
func (bc *BlockChain) CurrentHeader() *types.Header {
|
func (bc *BlockChain) CurrentHeader() *types.Header {
|
||||||
bc.mu.RLock()
|
|
||||||
defer bc.mu.RUnlock()
|
|
||||||
|
|
||||||
return bc.hc.CurrentHeader()
|
return bc.hc.CurrentHeader()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,26 +34,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
// newTestBlockChain creates a blockchain without validation.
|
|
||||||
func newTestBlockChain(fake bool) *BlockChain {
|
|
||||||
db, _ := ethdb.NewMemDatabase()
|
|
||||||
gspec := &Genesis{
|
|
||||||
Config: params.TestChainConfig,
|
|
||||||
Difficulty: big.NewInt(1),
|
|
||||||
}
|
|
||||||
gspec.MustCommit(db)
|
|
||||||
engine := ethash.NewFullFaker()
|
|
||||||
if !fake {
|
|
||||||
engine = ethash.NewTester()
|
|
||||||
}
|
|
||||||
blockchain, err := NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
blockchain.SetValidator(bproc{})
|
|
||||||
return blockchain
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test fork of length N starting from block i
|
// Test fork of length N starting from block i
|
||||||
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
||||||
// Copy old chain up to #i into a new db
|
// Copy old chain up to #i into a new db
|
||||||
@ -183,13 +163,18 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLastBlock(t *testing.T) {
|
func TestLastBlock(t *testing.T) {
|
||||||
bchain := newTestBlockChain(false)
|
_, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
|
||||||
defer bchain.Stop()
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
|
}
|
||||||
|
defer blockchain.Stop()
|
||||||
|
|
||||||
block := makeBlockChain(bchain.CurrentBlock(), 1, ethash.NewFaker(), bchain.db, 0)[0]
|
blocks := makeBlockChain(blockchain.CurrentBlock(), 1, ethash.NewFullFaker(), blockchain.db, 0)
|
||||||
bchain.insert(block)
|
if _, err := blockchain.InsertChain(blocks); err != nil {
|
||||||
if block.Hash() != GetHeadBlockHash(bchain.db) {
|
t.Fatalf("Failed to insert block: %v", err)
|
||||||
t.Errorf("Write/Get HeadBlockHash failed")
|
}
|
||||||
|
if blocks[len(blocks)-1].Hash() != GetHeadBlockHash(blockchain.db) {
|
||||||
|
t.Fatalf("Write/Get HeadBlockHash failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,55 +322,13 @@ func testBrokenChain(t *testing.T, full bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type bproc struct{}
|
|
||||||
|
|
||||||
func (bproc) ValidateBody(*types.Block) error { return nil }
|
|
||||||
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas uint64) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (bproc) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) {
|
|
||||||
return nil, nil, 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
|
|
||||||
blocks := makeBlockChainWithDiff(genesis, d, seed)
|
|
||||||
headers := make([]*types.Header, len(blocks))
|
|
||||||
for i, block := range blocks {
|
|
||||||
headers[i] = block.Header()
|
|
||||||
}
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
|
|
||||||
var chain []*types.Block
|
|
||||||
for i, difficulty := range d {
|
|
||||||
header := &types.Header{
|
|
||||||
Coinbase: common.Address{seed},
|
|
||||||
Number: big.NewInt(int64(i + 1)),
|
|
||||||
Difficulty: big.NewInt(int64(difficulty)),
|
|
||||||
UncleHash: types.EmptyUncleHash,
|
|
||||||
TxHash: types.EmptyRootHash,
|
|
||||||
ReceiptHash: types.EmptyRootHash,
|
|
||||||
Time: big.NewInt(int64(i) + 1),
|
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
header.ParentHash = genesis.Hash()
|
|
||||||
} else {
|
|
||||||
header.ParentHash = chain[i-1].Hash()
|
|
||||||
}
|
|
||||||
block := types.NewBlockWithHeader(header)
|
|
||||||
chain = append(chain, block)
|
|
||||||
}
|
|
||||||
return chain
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that reorganising a long difficult chain after a short easy one
|
// Tests that reorganising a long difficult chain after a short easy one
|
||||||
// overwrites the canonical numbers and links in the database.
|
// overwrites the canonical numbers and links in the database.
|
||||||
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
|
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
|
||||||
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
|
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
|
||||||
|
|
||||||
func testReorgLong(t *testing.T, full bool) {
|
func testReorgLong(t *testing.T, full bool) {
|
||||||
testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10, full)
|
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280, full)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that reorganising a short difficult chain after a long easy one
|
// Tests that reorganising a short difficult chain after a long easy one
|
||||||
@ -394,45 +337,82 @@ func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
|
|||||||
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
|
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
|
||||||
|
|
||||||
func testReorgShort(t *testing.T, full bool) {
|
func testReorgShort(t *testing.T, full bool) {
|
||||||
testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11, full)
|
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
|
||||||
|
// we need a fairly long chain of blocks with different difficulties for a short
|
||||||
|
// one to become heavyer than a long one. The 96 is an empirical value.
|
||||||
|
easy := make([]int64, 96)
|
||||||
|
for i := 0; i < len(easy); i++ {
|
||||||
|
easy[i] = 60
|
||||||
|
}
|
||||||
|
diff := make([]int64, len(easy)-1)
|
||||||
|
for i := 0; i < len(diff); i++ {
|
||||||
|
diff[i] = -9
|
||||||
|
}
|
||||||
|
testReorg(t, easy, diff, 12615120, full)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
|
func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
|
||||||
bc := newTestBlockChain(true)
|
// Create a pristine chain and database
|
||||||
defer bc.Stop()
|
db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
|
}
|
||||||
|
defer blockchain.Stop()
|
||||||
|
|
||||||
// Insert an easy and a difficult chain afterwards
|
// Insert an easy and a difficult chain afterwards
|
||||||
|
easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), ethash.NewFaker(), db, len(first), func(i int, b *BlockGen) {
|
||||||
|
b.OffsetTime(first[i])
|
||||||
|
})
|
||||||
|
diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), ethash.NewFaker(), db, len(second), func(i int, b *BlockGen) {
|
||||||
|
b.OffsetTime(second[i])
|
||||||
|
})
|
||||||
if full {
|
if full {
|
||||||
bc.InsertChain(makeBlockChainWithDiff(bc.genesisBlock, first, 11))
|
if _, err := blockchain.InsertChain(easyBlocks); err != nil {
|
||||||
bc.InsertChain(makeBlockChainWithDiff(bc.genesisBlock, second, 22))
|
t.Fatalf("failed to insert easy chain: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := blockchain.InsertChain(diffBlocks); err != nil {
|
||||||
|
t.Fatalf("failed to insert difficult chain: %v", err)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, first, 11), 1)
|
easyHeaders := make([]*types.Header, len(easyBlocks))
|
||||||
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, second, 22), 1)
|
for i, block := range easyBlocks {
|
||||||
|
easyHeaders[i] = block.Header()
|
||||||
|
}
|
||||||
|
diffHeaders := make([]*types.Header, len(diffBlocks))
|
||||||
|
for i, block := range diffBlocks {
|
||||||
|
diffHeaders[i] = block.Header()
|
||||||
|
}
|
||||||
|
if _, err := blockchain.InsertHeaderChain(easyHeaders, 1); err != nil {
|
||||||
|
t.Fatalf("failed to insert easy chain: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := blockchain.InsertHeaderChain(diffHeaders, 1); err != nil {
|
||||||
|
t.Fatalf("failed to insert difficult chain: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Check that the chain is valid number and link wise
|
// Check that the chain is valid number and link wise
|
||||||
if full {
|
if full {
|
||||||
prev := bc.CurrentBlock()
|
prev := blockchain.CurrentBlock()
|
||||||
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
|
for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, blockchain.GetBlockByNumber(block.NumberU64()-1) {
|
||||||
if prev.ParentHash() != block.Hash() {
|
if prev.ParentHash() != block.Hash() {
|
||||||
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash())
|
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
prev := bc.CurrentHeader()
|
prev := blockchain.CurrentHeader()
|
||||||
for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) {
|
for header := blockchain.GetHeaderByNumber(blockchain.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, blockchain.GetHeaderByNumber(header.Number.Uint64()-1) {
|
||||||
if prev.ParentHash != header.Hash() {
|
if prev.ParentHash != header.Hash() {
|
||||||
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
|
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Make sure the chain total difficulty is the correct one
|
// Make sure the chain total difficulty is the correct one
|
||||||
want := new(big.Int).Add(bc.genesisBlock.Difficulty(), big.NewInt(td))
|
want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
|
||||||
if full {
|
if full {
|
||||||
if have := bc.GetTdByHash(bc.CurrentBlock().Hash()); have.Cmp(want) != 0 {
|
if have := blockchain.GetTdByHash(blockchain.CurrentBlock().Hash()); have.Cmp(want) != 0 {
|
||||||
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if have := bc.GetTdByHash(bc.CurrentHeader().Hash()); have.Cmp(want) != 0 {
|
if have := blockchain.GetTdByHash(blockchain.CurrentHeader().Hash()); have.Cmp(want) != 0 {
|
||||||
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -443,19 +423,28 @@ func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
|
|||||||
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
|
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
|
||||||
|
|
||||||
func testBadHashes(t *testing.T, full bool) {
|
func testBadHashes(t *testing.T, full bool) {
|
||||||
bc := newTestBlockChain(true)
|
// Create a pristine chain and database
|
||||||
defer bc.Stop()
|
db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
|
}
|
||||||
|
defer blockchain.Stop()
|
||||||
|
|
||||||
// Create a chain, ban a hash and try to import
|
// Create a chain, ban a hash and try to import
|
||||||
var err error
|
|
||||||
if full {
|
if full {
|
||||||
blocks := makeBlockChainWithDiff(bc.genesisBlock, []int{1, 2, 4}, 10)
|
blocks := makeBlockChain(blockchain.CurrentBlock(), 3, ethash.NewFaker(), db, 10)
|
||||||
|
|
||||||
BadHashes[blocks[2].Header().Hash()] = true
|
BadHashes[blocks[2].Header().Hash()] = true
|
||||||
_, err = bc.InsertChain(blocks)
|
defer func() { delete(BadHashes, blocks[2].Header().Hash()) }()
|
||||||
|
|
||||||
|
_, err = blockchain.InsertChain(blocks)
|
||||||
} else {
|
} else {
|
||||||
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 4}, 10)
|
headers := makeHeaderChain(blockchain.CurrentHeader(), 3, ethash.NewFaker(), db, 10)
|
||||||
|
|
||||||
BadHashes[headers[2].Hash()] = true
|
BadHashes[headers[2].Hash()] = true
|
||||||
_, err = bc.InsertHeaderChain(headers, 1)
|
defer func() { delete(BadHashes, headers[2].Hash()) }()
|
||||||
|
|
||||||
|
_, err = blockchain.InsertHeaderChain(headers, 1)
|
||||||
}
|
}
|
||||||
if err != ErrBlacklistedHash {
|
if err != ErrBlacklistedHash {
|
||||||
t.Errorf("error mismatch: have: %v, want: %v", err, ErrBlacklistedHash)
|
t.Errorf("error mismatch: have: %v, want: %v", err, ErrBlacklistedHash)
|
||||||
@ -468,40 +457,41 @@ func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
|
|||||||
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
|
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
|
||||||
|
|
||||||
func testReorgBadHashes(t *testing.T, full bool) {
|
func testReorgBadHashes(t *testing.T, full bool) {
|
||||||
bc := newTestBlockChain(true)
|
// Create a pristine chain and database
|
||||||
defer bc.Stop()
|
db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
|
}
|
||||||
// Create a chain, import and ban afterwards
|
// Create a chain, import and ban afterwards
|
||||||
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
|
headers := makeHeaderChain(blockchain.CurrentHeader(), 4, ethash.NewFaker(), db, 10)
|
||||||
blocks := makeBlockChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
|
blocks := makeBlockChain(blockchain.CurrentBlock(), 4, ethash.NewFaker(), db, 10)
|
||||||
|
|
||||||
if full {
|
if full {
|
||||||
if _, err := bc.InsertChain(blocks); err != nil {
|
if _, err = blockchain.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("failed to import blocks: %v", err)
|
t.Errorf("failed to import blocks: %v", err)
|
||||||
}
|
}
|
||||||
if bc.CurrentBlock().Hash() != blocks[3].Hash() {
|
if blockchain.CurrentBlock().Hash() != blocks[3].Hash() {
|
||||||
t.Errorf("last block hash mismatch: have: %x, want %x", bc.CurrentBlock().Hash(), blocks[3].Header().Hash())
|
t.Errorf("last block hash mismatch: have: %x, want %x", blockchain.CurrentBlock().Hash(), blocks[3].Header().Hash())
|
||||||
}
|
}
|
||||||
BadHashes[blocks[3].Header().Hash()] = true
|
BadHashes[blocks[3].Header().Hash()] = true
|
||||||
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
|
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
|
||||||
} else {
|
} else {
|
||||||
if _, err := bc.InsertHeaderChain(headers, 1); err != nil {
|
if _, err = blockchain.InsertHeaderChain(headers, 1); err != nil {
|
||||||
t.Fatalf("failed to import headers: %v", err)
|
t.Errorf("failed to import headers: %v", err)
|
||||||
}
|
}
|
||||||
if bc.CurrentHeader().Hash() != headers[3].Hash() {
|
if blockchain.CurrentHeader().Hash() != headers[3].Hash() {
|
||||||
t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash())
|
t.Errorf("last header hash mismatch: have: %x, want %x", blockchain.CurrentHeader().Hash(), headers[3].Hash())
|
||||||
}
|
}
|
||||||
BadHashes[headers[3].Hash()] = true
|
BadHashes[headers[3].Hash()] = true
|
||||||
defer func() { delete(BadHashes, headers[3].Hash()) }()
|
defer func() { delete(BadHashes, headers[3].Hash()) }()
|
||||||
}
|
}
|
||||||
|
blockchain.Stop()
|
||||||
|
|
||||||
// Create a new BlockChain and check that it rolled back the state.
|
// Create a new BlockChain and check that it rolled back the state.
|
||||||
ncm, err := NewBlockChain(bc.db, nil, bc.chainConfig, ethash.NewFaker(), vm.Config{})
|
ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create new chain manager: %v", err)
|
t.Fatalf("failed to create new chain manager: %v", err)
|
||||||
}
|
}
|
||||||
defer ncm.Stop()
|
|
||||||
|
|
||||||
if full {
|
if full {
|
||||||
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
|
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
|
||||||
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
|
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
|
||||||
@ -514,6 +504,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
|
|||||||
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
|
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ncm.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
||||||
@ -989,10 +980,13 @@ done:
|
|||||||
|
|
||||||
// Tests if the canonical block can be fetched from the database during chain insertion.
|
// Tests if the canonical block can be fetched from the database during chain insertion.
|
||||||
func TestCanonicalBlockRetrieval(t *testing.T) {
|
func TestCanonicalBlockRetrieval(t *testing.T) {
|
||||||
bc := newTestBlockChain(true)
|
_, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
|
||||||
defer bc.Stop()
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
|
}
|
||||||
|
defer blockchain.Stop()
|
||||||
|
|
||||||
chain, _ := GenerateChain(bc.chainConfig, bc.genesisBlock, ethash.NewFaker(), bc.db, 10, func(i int, gen *BlockGen) {})
|
chain, _ := GenerateChain(blockchain.chainConfig, blockchain.genesisBlock, ethash.NewFaker(), blockchain.db, 10, func(i int, gen *BlockGen) {})
|
||||||
|
|
||||||
var pend sync.WaitGroup
|
var pend sync.WaitGroup
|
||||||
pend.Add(len(chain))
|
pend.Add(len(chain))
|
||||||
@ -1003,14 +997,14 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
|
|||||||
|
|
||||||
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
|
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
|
||||||
for {
|
for {
|
||||||
ch := GetCanonicalHash(bc.db, block.NumberU64())
|
ch := GetCanonicalHash(blockchain.db, block.NumberU64())
|
||||||
if ch == (common.Hash{}) {
|
if ch == (common.Hash{}) {
|
||||||
continue // busy wait for canonical hash to be written
|
continue // busy wait for canonical hash to be written
|
||||||
}
|
}
|
||||||
if ch != block.Hash() {
|
if ch != block.Hash() {
|
||||||
t.Fatalf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
|
t.Fatalf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
|
||||||
}
|
}
|
||||||
fb := GetBlock(bc.db, ch, block.NumberU64())
|
fb := GetBlock(blockchain.db, ch, block.NumberU64())
|
||||||
if fb == nil {
|
if fb == nil {
|
||||||
t.Fatalf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
|
t.Fatalf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
|
||||||
}
|
}
|
||||||
@ -1021,7 +1015,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}(chain[i])
|
}(chain[i])
|
||||||
|
|
||||||
if _, err := bc.InsertChain(types.Blocks{chain[i]}); err != nil {
|
if _, err := blockchain.InsertChain(types.Blocks{chain[i]}); err != nil {
|
||||||
t.Fatalf("failed to insert block %d: %v", i, err)
|
t.Fatalf("failed to insert block %d: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -47,6 +47,7 @@ var (
|
|||||||
headHeaderKey = []byte("LastHeader")
|
headHeaderKey = []byte("LastHeader")
|
||||||
headBlockKey = []byte("LastBlock")
|
headBlockKey = []byte("LastBlock")
|
||||||
headFastKey = []byte("LastFast")
|
headFastKey = []byte("LastFast")
|
||||||
|
trieSyncKey = []byte("TrieSync")
|
||||||
|
|
||||||
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`).
|
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`).
|
||||||
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
|
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
|
||||||
@ -70,8 +71,8 @@ var (
|
|||||||
|
|
||||||
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
|
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
|
||||||
|
|
||||||
preimageCounter = metrics.NewCounter("db/preimage/total")
|
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
|
||||||
preimageHitCounter = metrics.NewCounter("db/preimage/hits")
|
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
// TxLookupEntry is a positional metadata to help looking up the data content of
|
// TxLookupEntry is a positional metadata to help looking up the data content of
|
||||||
@ -146,6 +147,16 @@ func GetHeadFastBlockHash(db DatabaseReader) common.Hash {
|
|||||||
return common.BytesToHash(data)
|
return common.BytesToHash(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetTrieSyncProgress retrieves the number of tries nodes fast synced to allow
|
||||||
|
// reportinc correct numbers across restarts.
|
||||||
|
func GetTrieSyncProgress(db DatabaseReader) uint64 {
|
||||||
|
data, _ := db.Get(trieSyncKey)
|
||||||
|
if len(data) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return new(big.Int).SetBytes(data).Uint64()
|
||||||
|
}
|
||||||
|
|
||||||
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
|
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
|
||||||
// if the header's not found.
|
// if the header's not found.
|
||||||
func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
|
func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
@ -374,6 +385,15 @@ func WriteHeadFastBlockHash(db ethdb.Putter, hash common.Hash) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WriteTrieSyncProgress stores the fast sync trie process counter to support
|
||||||
|
// retrieving it across restarts.
|
||||||
|
func WriteTrieSyncProgress(db ethdb.Putter, count uint64) error {
|
||||||
|
if err := db.Put(trieSyncKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
|
||||||
|
log.Crit("Failed to store fast sync trie progress", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// WriteHeader serializes a block header into the database.
|
// WriteHeader serializes a block header into the database.
|
||||||
func WriteHeader(db ethdb.Putter, header *types.Header) error {
|
func WriteHeader(db ethdb.Putter, header *types.Header) error {
|
||||||
data, err := rlp.EncodeToBytes(header)
|
data, err := rlp.EncodeToBytes(header)
|
||||||
|
23
core/fees.go
23
core/fees.go
@ -1,23 +0,0 @@
|
|||||||
// Copyright 2014 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
var BlockReward = big.NewInt(5e+18)
|
|
@ -118,10 +118,12 @@ func TestSetupGenesis(t *testing.T) {
|
|||||||
// Commit the 'old' genesis block with Homestead transition at #2.
|
// Commit the 'old' genesis block with Homestead transition at #2.
|
||||||
// Advance to block #4, past the homestead transition block of customg.
|
// Advance to block #4, past the homestead transition block of customg.
|
||||||
genesis := oldcustomg.MustCommit(db)
|
genesis := oldcustomg.MustCommit(db)
|
||||||
|
|
||||||
bc, _ := NewBlockChain(db, nil, oldcustomg.Config, ethash.NewFullFaker(), vm.Config{})
|
bc, _ := NewBlockChain(db, nil, oldcustomg.Config, ethash.NewFullFaker(), vm.Config{})
|
||||||
defer bc.Stop()
|
defer bc.Stop()
|
||||||
bc.SetValidator(bproc{})
|
|
||||||
bc.InsertChain(makeBlockChainWithDiff(genesis, []int{2, 3, 4, 5}, 0))
|
blocks, _ := GenerateChain(oldcustomg.Config, genesis, ethash.NewFaker(), db, 4, nil)
|
||||||
|
bc.InsertChain(blocks)
|
||||||
bc.CurrentBlock()
|
bc.CurrentBlock()
|
||||||
// This should return a compatibility error.
|
// This should return a compatibility error.
|
||||||
return SetupGenesisBlock(db, &customg)
|
return SetupGenesisBlock(db, &customg)
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/hashicorp/golang-lru"
|
"github.com/hashicorp/golang-lru"
|
||||||
|
"sync/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -51,8 +52,8 @@ type HeaderChain struct {
|
|||||||
chainDb ethdb.Database
|
chainDb ethdb.Database
|
||||||
genesisHeader *types.Header
|
genesisHeader *types.Header
|
||||||
|
|
||||||
currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
|
currentHeader atomic.Value // Current head of the header chain (may be above the block chain!)
|
||||||
currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time)
|
currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time)
|
||||||
|
|
||||||
headerCache *lru.Cache // Cache for the most recent block headers
|
headerCache *lru.Cache // Cache for the most recent block headers
|
||||||
tdCache *lru.Cache // Cache for the most recent block total difficulties
|
tdCache *lru.Cache // Cache for the most recent block total difficulties
|
||||||
@ -95,13 +96,13 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
|
|||||||
return nil, ErrNoGenesis
|
return nil, ErrNoGenesis
|
||||||
}
|
}
|
||||||
|
|
||||||
hc.currentHeader = hc.genesisHeader
|
hc.currentHeader.Store(hc.genesisHeader)
|
||||||
if head := GetHeadBlockHash(chainDb); head != (common.Hash{}) {
|
if head := GetHeadBlockHash(chainDb); head != (common.Hash{}) {
|
||||||
if chead := hc.GetHeaderByHash(head); chead != nil {
|
if chead := hc.GetHeaderByHash(head); chead != nil {
|
||||||
hc.currentHeader = chead
|
hc.currentHeader.Store(chead)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
hc.currentHeaderHash = hc.currentHeader.Hash()
|
hc.currentHeaderHash = hc.CurrentHeader().Hash()
|
||||||
|
|
||||||
return hc, nil
|
return hc, nil
|
||||||
}
|
}
|
||||||
@ -139,7 +140,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
if ptd == nil {
|
if ptd == nil {
|
||||||
return NonStatTy, consensus.ErrUnknownAncestor
|
return NonStatTy, consensus.ErrUnknownAncestor
|
||||||
}
|
}
|
||||||
localTd := hc.GetTd(hc.currentHeaderHash, hc.currentHeader.Number.Uint64())
|
localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
|
||||||
externTd := new(big.Int).Add(header.Difficulty, ptd)
|
externTd := new(big.Int).Add(header.Difficulty, ptd)
|
||||||
|
|
||||||
// Irrelevant of the canonical status, write the td and header to the database
|
// Irrelevant of the canonical status, write the td and header to the database
|
||||||
@ -181,7 +182,8 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
|
||||||
log.Crit("Failed to insert head header hash", "err", err)
|
log.Crit("Failed to insert head header hash", "err", err)
|
||||||
}
|
}
|
||||||
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
|
hc.currentHeaderHash = hash
|
||||||
|
hc.currentHeader.Store(types.CopyHeader(header))
|
||||||
|
|
||||||
status = CanonStatTy
|
status = CanonStatTy
|
||||||
} else {
|
} else {
|
||||||
@ -383,7 +385,7 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
|
|||||||
// CurrentHeader retrieves the current head header of the canonical chain. The
|
// CurrentHeader retrieves the current head header of the canonical chain. The
|
||||||
// header is retrieved from the HeaderChain's internal cache.
|
// header is retrieved from the HeaderChain's internal cache.
|
||||||
func (hc *HeaderChain) CurrentHeader() *types.Header {
|
func (hc *HeaderChain) CurrentHeader() *types.Header {
|
||||||
return hc.currentHeader
|
return hc.currentHeader.Load().(*types.Header)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetCurrentHeader sets the current head header of the canonical chain.
|
// SetCurrentHeader sets the current head header of the canonical chain.
|
||||||
@ -391,7 +393,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
|||||||
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
|
||||||
log.Crit("Failed to insert head header hash", "err", err)
|
log.Crit("Failed to insert head header hash", "err", err)
|
||||||
}
|
}
|
||||||
hc.currentHeader = head
|
hc.currentHeader.Store(head)
|
||||||
hc.currentHeaderHash = head.Hash()
|
hc.currentHeaderHash = head.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -403,19 +405,20 @@ type DeleteCallback func(common.Hash, uint64)
|
|||||||
// will be deleted and the new one set.
|
// will be deleted and the new one set.
|
||||||
func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
|
func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
|
||||||
height := uint64(0)
|
height := uint64(0)
|
||||||
if hc.currentHeader != nil {
|
|
||||||
height = hc.currentHeader.Number.Uint64()
|
if hdr := hc.CurrentHeader(); hdr != nil {
|
||||||
|
height = hdr.Number.Uint64()
|
||||||
}
|
}
|
||||||
|
|
||||||
for hc.currentHeader != nil && hc.currentHeader.Number.Uint64() > head {
|
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
|
||||||
hash := hc.currentHeader.Hash()
|
hash := hdr.Hash()
|
||||||
num := hc.currentHeader.Number.Uint64()
|
num := hdr.Number.Uint64()
|
||||||
if delFn != nil {
|
if delFn != nil {
|
||||||
delFn(hash, num)
|
delFn(hash, num)
|
||||||
}
|
}
|
||||||
DeleteHeader(hc.chainDb, hash, num)
|
DeleteHeader(hc.chainDb, hash, num)
|
||||||
DeleteTd(hc.chainDb, hash, num)
|
DeleteTd(hc.chainDb, hash, num)
|
||||||
hc.currentHeader = hc.GetHeader(hc.currentHeader.ParentHash, hc.currentHeader.Number.Uint64()-1)
|
hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1))
|
||||||
}
|
}
|
||||||
// Roll back the canonical chain numbering
|
// Roll back the canonical chain numbering
|
||||||
for i := height; i > head; i-- {
|
for i := height; i > head; i-- {
|
||||||
@ -426,10 +429,10 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
|
|||||||
hc.tdCache.Purge()
|
hc.tdCache.Purge()
|
||||||
hc.numberCache.Purge()
|
hc.numberCache.Purge()
|
||||||
|
|
||||||
if hc.currentHeader == nil {
|
if hc.CurrentHeader() == nil {
|
||||||
hc.currentHeader = hc.genesisHeader
|
hc.currentHeader.Store(hc.genesisHeader)
|
||||||
}
|
}
|
||||||
hc.currentHeaderHash = hc.currentHeader.Hash()
|
hc.currentHeaderHash = hc.CurrentHeader().Hash()
|
||||||
|
|
||||||
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
|
||||||
log.Crit("Failed to reset head header hash", "err", err)
|
log.Crit("Failed to reset head header hash", "err", err)
|
||||||
|
@ -87,20 +87,20 @@ var (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// Metrics for the pending pool
|
// Metrics for the pending pool
|
||||||
pendingDiscardCounter = metrics.NewCounter("txpool/pending/discard")
|
pendingDiscardCounter = metrics.NewRegisteredCounter("txpool/pending/discard", nil)
|
||||||
pendingReplaceCounter = metrics.NewCounter("txpool/pending/replace")
|
pendingReplaceCounter = metrics.NewRegisteredCounter("txpool/pending/replace", nil)
|
||||||
pendingRateLimitCounter = metrics.NewCounter("txpool/pending/ratelimit") // Dropped due to rate limiting
|
pendingRateLimitCounter = metrics.NewRegisteredCounter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
|
||||||
pendingNofundsCounter = metrics.NewCounter("txpool/pending/nofunds") // Dropped due to out-of-funds
|
pendingNofundsCounter = metrics.NewRegisteredCounter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds
|
||||||
|
|
||||||
// Metrics for the queued pool
|
// Metrics for the queued pool
|
||||||
queuedDiscardCounter = metrics.NewCounter("txpool/queued/discard")
|
queuedDiscardCounter = metrics.NewRegisteredCounter("txpool/queued/discard", nil)
|
||||||
queuedReplaceCounter = metrics.NewCounter("txpool/queued/replace")
|
queuedReplaceCounter = metrics.NewRegisteredCounter("txpool/queued/replace", nil)
|
||||||
queuedRateLimitCounter = metrics.NewCounter("txpool/queued/ratelimit") // Dropped due to rate limiting
|
queuedRateLimitCounter = metrics.NewRegisteredCounter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
|
||||||
queuedNofundsCounter = metrics.NewCounter("txpool/queued/nofunds") // Dropped due to out-of-funds
|
queuedNofundsCounter = metrics.NewRegisteredCounter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
|
||||||
|
|
||||||
// General tx metrics
|
// General tx metrics
|
||||||
invalidTxCounter = metrics.NewCounter("txpool/invalid")
|
invalidTxCounter = metrics.NewRegisteredCounter("txpool/invalid", nil)
|
||||||
underpricedTxCounter = metrics.NewCounter("txpool/underpriced")
|
underpricedTxCounter = metrics.NewRegisteredCounter("txpool/underpriced", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
// TxStatus is the current status of a transaction as seen by the pool.
|
// TxStatus is the current status of a transaction as seen by the pool.
|
||||||
|
@ -251,26 +251,12 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
|
|||||||
return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil
|
return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
// errNotOnCurve is returned if a point being unmarshalled as a bn256 elliptic
|
|
||||||
// curve point is not on the curve.
|
|
||||||
errNotOnCurve = errors.New("point not on elliptic curve")
|
|
||||||
|
|
||||||
// errInvalidCurvePoint is returned if a point being unmarshalled as a bn256
|
|
||||||
// elliptic curve point is invalid.
|
|
||||||
errInvalidCurvePoint = errors.New("invalid elliptic curve point")
|
|
||||||
)
|
|
||||||
|
|
||||||
// newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point,
|
// newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point,
|
||||||
// returning it, or an error if the point is invalid.
|
// returning it, or an error if the point is invalid.
|
||||||
func newCurvePoint(blob []byte) (*bn256.G1, error) {
|
func newCurvePoint(blob []byte) (*bn256.G1, error) {
|
||||||
p, onCurve := new(bn256.G1).Unmarshal(blob)
|
p := new(bn256.G1)
|
||||||
if !onCurve {
|
if _, err := p.Unmarshal(blob); err != nil {
|
||||||
return nil, errNotOnCurve
|
return nil, err
|
||||||
}
|
|
||||||
gx, gy, _, _ := p.CurvePoints()
|
|
||||||
if gx.Cmp(bn256.P) >= 0 || gy.Cmp(bn256.P) >= 0 {
|
|
||||||
return nil, errInvalidCurvePoint
|
|
||||||
}
|
}
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
@ -278,14 +264,9 @@ func newCurvePoint(blob []byte) (*bn256.G1, error) {
|
|||||||
// newTwistPoint unmarshals a binary blob into a bn256 elliptic curve point,
|
// newTwistPoint unmarshals a binary blob into a bn256 elliptic curve point,
|
||||||
// returning it, or an error if the point is invalid.
|
// returning it, or an error if the point is invalid.
|
||||||
func newTwistPoint(blob []byte) (*bn256.G2, error) {
|
func newTwistPoint(blob []byte) (*bn256.G2, error) {
|
||||||
p, onCurve := new(bn256.G2).Unmarshal(blob)
|
p := new(bn256.G2)
|
||||||
if !onCurve {
|
if _, err := p.Unmarshal(blob); err != nil {
|
||||||
return nil, errNotOnCurve
|
return nil, err
|
||||||
}
|
|
||||||
x2, y2, _, _ := p.CurvePoints()
|
|
||||||
if x2.Real().Cmp(bn256.P) >= 0 || x2.Imag().Cmp(bn256.P) >= 0 ||
|
|
||||||
y2.Real().Cmp(bn256.P) >= 0 || y2.Imag().Cmp(bn256.P) >= 0 {
|
|
||||||
return nil, errInvalidCurvePoint
|
|
||||||
}
|
}
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
@ -302,6 +302,66 @@ func opMulmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// opSHL implements Shift Left
|
||||||
|
// The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2,
|
||||||
|
// and pushes on the stack arg2 shifted to the left by arg1 number of bits.
|
||||||
|
func opSHL(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||||
|
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
|
||||||
|
shift, value := math.U256(stack.pop()), math.U256(stack.peek())
|
||||||
|
defer evm.interpreter.intPool.put(shift) // First operand back into the pool
|
||||||
|
|
||||||
|
if shift.Cmp(common.Big256) >= 0 {
|
||||||
|
value.SetUint64(0)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
n := uint(shift.Uint64())
|
||||||
|
math.U256(value.Lsh(value, n))
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// opSHR implements Logical Shift Right
|
||||||
|
// The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2,
|
||||||
|
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill.
|
||||||
|
func opSHR(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||||
|
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
|
||||||
|
shift, value := math.U256(stack.pop()), math.U256(stack.peek())
|
||||||
|
defer evm.interpreter.intPool.put(shift) // First operand back into the pool
|
||||||
|
|
||||||
|
if shift.Cmp(common.Big256) >= 0 {
|
||||||
|
value.SetUint64(0)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
n := uint(shift.Uint64())
|
||||||
|
math.U256(value.Rsh(value, n))
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// opSAR implements Arithmetic Shift Right
|
||||||
|
// The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2,
|
||||||
|
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension.
|
||||||
|
func opSAR(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||||
|
// Note, S256 returns (potentially) a new bigint, so we're popping, not peeking this one
|
||||||
|
shift, value := math.U256(stack.pop()), math.S256(stack.pop())
|
||||||
|
defer evm.interpreter.intPool.put(shift) // First operand back into the pool
|
||||||
|
|
||||||
|
if shift.Cmp(common.Big256) >= 0 {
|
||||||
|
if value.Sign() > 0 {
|
||||||
|
value.SetUint64(0)
|
||||||
|
} else {
|
||||||
|
value.SetInt64(-1)
|
||||||
|
}
|
||||||
|
stack.push(math.U256(value))
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
n := uint(shift.Uint64())
|
||||||
|
value.Rsh(value, n)
|
||||||
|
stack.push(math.U256(value))
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
func opSha3(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
func opSha3(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||||
offset, size := stack.pop(), stack.pop()
|
offset, size := stack.pop(), stack.pop()
|
||||||
data := memory.Get(offset.Int64(), size.Int64())
|
data := memory.Get(offset.Int64(), size.Int64())
|
||||||
|
@ -24,6 +24,48 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type twoOperandTest struct {
|
||||||
|
x string
|
||||||
|
y string
|
||||||
|
expected string
|
||||||
|
}
|
||||||
|
|
||||||
|
func testTwoOperandOp(t *testing.T, tests []twoOperandTest, opFn func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error)) {
|
||||||
|
var (
|
||||||
|
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
|
||||||
|
stack = newstack()
|
||||||
|
pc = uint64(0)
|
||||||
|
)
|
||||||
|
for i, test := range tests {
|
||||||
|
x := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
|
||||||
|
shift := new(big.Int).SetBytes(common.Hex2Bytes(test.y))
|
||||||
|
expected := new(big.Int).SetBytes(common.Hex2Bytes(test.expected))
|
||||||
|
stack.push(x)
|
||||||
|
stack.push(shift)
|
||||||
|
opFn(&pc, env, nil, nil, stack)
|
||||||
|
actual := stack.pop()
|
||||||
|
if actual.Cmp(expected) != 0 {
|
||||||
|
t.Errorf("Testcase %d, expected %v, got %v", i, expected, actual)
|
||||||
|
}
|
||||||
|
// Check pool usage
|
||||||
|
// 1.pool is not allowed to contain anything on the stack
|
||||||
|
// 2.pool is not allowed to contain the same pointers twice
|
||||||
|
if env.interpreter.intPool.pool.len() > 0 {
|
||||||
|
|
||||||
|
poolvals := make(map[*big.Int]struct{})
|
||||||
|
poolvals[actual] = struct{}{}
|
||||||
|
|
||||||
|
for env.interpreter.intPool.pool.len() > 0 {
|
||||||
|
key := env.interpreter.intPool.get()
|
||||||
|
if _, exist := poolvals[key]; exist {
|
||||||
|
t.Errorf("Testcase %d, pool contains double-entry", i)
|
||||||
|
}
|
||||||
|
poolvals[key] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestByteOp(t *testing.T) {
|
func TestByteOp(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
|
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
|
||||||
@ -57,6 +99,98 @@ func TestByteOp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSHL(t *testing.T) {
|
||||||
|
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#shl-shift-left
|
||||||
|
tests := []twoOperandTest{
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "00", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "01", "0000000000000000000000000000000000000000000000000000000000000002"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "ff", "8000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "0101", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "00", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "01", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ff", "8000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000000", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "01", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"},
|
||||||
|
}
|
||||||
|
testTwoOperandOp(t, tests, opSHL)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSHR(t *testing.T) {
|
||||||
|
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#shr-logical-shift-right
|
||||||
|
tests := []twoOperandTest{
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "00", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000000", "01", "4000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000000", "ff", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000000", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000000", "0101", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "00", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "01", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ff", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000000", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
}
|
||||||
|
testTwoOperandOp(t, tests, opSHR)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSAR(t *testing.T) {
|
||||||
|
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#sar-arithmetic-shift-right
|
||||||
|
tests := []twoOperandTest{
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "00", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000000", "01", "c000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000000", "ff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000000", "0100", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000000", "0101", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "00", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "01", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0100", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000000", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"4000000000000000000000000000000000000000000000000000000000000000", "fe", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "f8", "000000000000000000000000000000000000000000000000000000000000007f"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "fe", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ff", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
}
|
||||||
|
|
||||||
|
testTwoOperandOp(t, tests, opSAR)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSGT(t *testing.T) {
|
||||||
|
tests := []twoOperandTest{
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000001", "8000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000001", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "8000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
}
|
||||||
|
testTwoOperandOp(t, tests, opSgt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSLT(t *testing.T) {
|
||||||
|
tests := []twoOperandTest{
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"0000000000000000000000000000000000000000000000000000000000000001", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000001", "8000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"8000000000000000000000000000000000000000000000000000000000000001", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
|
||||||
|
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "8000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001"},
|
||||||
|
}
|
||||||
|
testTwoOperandOp(t, tests, opSlt)
|
||||||
|
}
|
||||||
|
|
||||||
func opBenchmark(bench *testing.B, op func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error), args ...string) {
|
func opBenchmark(bench *testing.B, op func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error), args ...string) {
|
||||||
var (
|
var (
|
||||||
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
|
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
|
||||||
@ -259,3 +393,22 @@ func BenchmarkOpMulmod(b *testing.B) {
|
|||||||
|
|
||||||
opBenchmark(b, opMulmod, x, y, z)
|
opBenchmark(b, opMulmod, x, y, z)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkOpSHL(b *testing.B) {
|
||||||
|
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
|
||||||
|
y := "ff"
|
||||||
|
|
||||||
|
opBenchmark(b, opSHL, x, y)
|
||||||
|
}
|
||||||
|
func BenchmarkOpSHR(b *testing.B) {
|
||||||
|
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
|
||||||
|
y := "ff"
|
||||||
|
|
||||||
|
opBenchmark(b, opSHR, x, y)
|
||||||
|
}
|
||||||
|
func BenchmarkOpSAR(b *testing.B) {
|
||||||
|
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
|
||||||
|
y := "ff"
|
||||||
|
|
||||||
|
opBenchmark(b, opSAR, x, y)
|
||||||
|
}
|
||||||
|
@ -37,8 +37,6 @@ type Config struct {
|
|||||||
// NoRecursion disabled Interpreter call, callcode,
|
// NoRecursion disabled Interpreter call, callcode,
|
||||||
// delegate call and create.
|
// delegate call and create.
|
||||||
NoRecursion bool
|
NoRecursion bool
|
||||||
// Disable gas metering
|
|
||||||
DisableGasMetering bool
|
|
||||||
// Enable recording of SHA3/keccak preimages
|
// Enable recording of SHA3/keccak preimages
|
||||||
EnablePreimageRecording bool
|
EnablePreimageRecording bool
|
||||||
// JumpTable contains the EVM instruction table. This
|
// JumpTable contains the EVM instruction table. This
|
||||||
@ -68,6 +66,8 @@ func NewInterpreter(evm *EVM, cfg Config) *Interpreter {
|
|||||||
// we'll set the default jump table.
|
// we'll set the default jump table.
|
||||||
if !cfg.JumpTable[STOP].valid {
|
if !cfg.JumpTable[STOP].valid {
|
||||||
switch {
|
switch {
|
||||||
|
case evm.ChainConfig().IsConstantinople(evm.BlockNumber):
|
||||||
|
cfg.JumpTable = constantinopleInstructionSet
|
||||||
case evm.ChainConfig().IsByzantium(evm.BlockNumber):
|
case evm.ChainConfig().IsByzantium(evm.BlockNumber):
|
||||||
cfg.JumpTable = byzantiumInstructionSet
|
cfg.JumpTable = byzantiumInstructionSet
|
||||||
case evm.ChainConfig().IsHomestead(evm.BlockNumber):
|
case evm.ChainConfig().IsHomestead(evm.BlockNumber):
|
||||||
@ -187,14 +187,11 @@ func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err er
|
|||||||
return nil, errGasUintOverflow
|
return nil, errGasUintOverflow
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// consume the gas and return an error if not enough gas is available.
|
||||||
if !in.cfg.DisableGasMetering {
|
// cost is explicitly set so that the capture state defer method cas get the proper cost
|
||||||
// consume the gas and return an error if not enough gas is available.
|
cost, err = operation.gasCost(in.gasTable, in.evm, contract, stack, mem, memorySize)
|
||||||
// cost is explicitly set so that the capture state defer method cas get the proper cost
|
if err != nil || !contract.UseGas(cost) {
|
||||||
cost, err = operation.gasCost(in.gasTable, in.evm, contract, stack, mem, memorySize)
|
return nil, ErrOutOfGas
|
||||||
if err != nil || !contract.UseGas(cost) {
|
|
||||||
return nil, ErrOutOfGas
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if memorySize > 0 {
|
if memorySize > 0 {
|
||||||
mem.Resize(memorySize)
|
mem.Resize(memorySize)
|
||||||
|
@ -51,11 +51,38 @@ type operation struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
frontierInstructionSet = NewFrontierInstructionSet()
|
frontierInstructionSet = NewFrontierInstructionSet()
|
||||||
homesteadInstructionSet = NewHomesteadInstructionSet()
|
homesteadInstructionSet = NewHomesteadInstructionSet()
|
||||||
byzantiumInstructionSet = NewByzantiumInstructionSet()
|
byzantiumInstructionSet = NewByzantiumInstructionSet()
|
||||||
|
constantinopleInstructionSet = NewConstantinopleInstructionSet()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewConstantinopleInstructionSet returns the frontier, homestead
|
||||||
|
// byzantium and contantinople instructions.
|
||||||
|
func NewConstantinopleInstructionSet() [256]operation {
|
||||||
|
// instructions that can be executed during the byzantium phase.
|
||||||
|
instructionSet := NewByzantiumInstructionSet()
|
||||||
|
instructionSet[SHL] = operation{
|
||||||
|
execute: opSHL,
|
||||||
|
gasCost: constGasFunc(GasFastestStep),
|
||||||
|
validateStack: makeStackFunc(2, 1),
|
||||||
|
valid: true,
|
||||||
|
}
|
||||||
|
instructionSet[SHR] = operation{
|
||||||
|
execute: opSHR,
|
||||||
|
gasCost: constGasFunc(GasFastestStep),
|
||||||
|
validateStack: makeStackFunc(2, 1),
|
||||||
|
valid: true,
|
||||||
|
}
|
||||||
|
instructionSet[SAR] = operation{
|
||||||
|
execute: opSAR,
|
||||||
|
gasCost: constGasFunc(GasFastestStep),
|
||||||
|
validateStack: makeStackFunc(2, 1),
|
||||||
|
valid: true,
|
||||||
|
}
|
||||||
|
return instructionSet
|
||||||
|
}
|
||||||
|
|
||||||
// NewByzantiumInstructionSet returns the frontier, homestead and
|
// NewByzantiumInstructionSet returns the frontier, homestead and
|
||||||
// byzantium instructions.
|
// byzantium instructions.
|
||||||
func NewByzantiumInstructionSet() [256]operation {
|
func NewByzantiumInstructionSet() [256]operation {
|
||||||
|
@ -63,6 +63,9 @@ const (
|
|||||||
XOR
|
XOR
|
||||||
NOT
|
NOT
|
||||||
BYTE
|
BYTE
|
||||||
|
SHL
|
||||||
|
SHR
|
||||||
|
SAR
|
||||||
|
|
||||||
SHA3 = 0x20
|
SHA3 = 0x20
|
||||||
)
|
)
|
||||||
@ -234,6 +237,9 @@ var opCodeToString = map[OpCode]string{
|
|||||||
OR: "OR",
|
OR: "OR",
|
||||||
XOR: "XOR",
|
XOR: "XOR",
|
||||||
BYTE: "BYTE",
|
BYTE: "BYTE",
|
||||||
|
SHL: "SHL",
|
||||||
|
SHR: "SHR",
|
||||||
|
SAR: "SAR",
|
||||||
ADDMOD: "ADDMOD",
|
ADDMOD: "ADDMOD",
|
||||||
MULMOD: "MULMOD",
|
MULMOD: "MULMOD",
|
||||||
|
|
||||||
@ -400,6 +406,9 @@ var stringToOp = map[string]OpCode{
|
|||||||
"OR": OR,
|
"OR": OR,
|
||||||
"XOR": XOR,
|
"XOR": XOR,
|
||||||
"BYTE": BYTE,
|
"BYTE": BYTE,
|
||||||
|
"SHL": SHL,
|
||||||
|
"SHR": SHR,
|
||||||
|
"SAR": SAR,
|
||||||
"ADDMOD": ADDMOD,
|
"ADDMOD": ADDMOD,
|
||||||
"MULMOD": MULMOD,
|
"MULMOD": MULMOD,
|
||||||
"SHA3": SHA3,
|
"SHA3": SHA3,
|
||||||
|
63
crypto/bn256/bn256_amd64.go
Normal file
63
crypto/bn256/bn256_amd64.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// +build amd64,!appengine,!gccgo
|
||||||
|
|
||||||
|
// Package bn256 implements the Optimal Ate pairing over a 256-bit Barreto-Naehrig curve.
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
|
||||||
|
)
|
||||||
|
|
||||||
|
// G1 is an abstract cyclic group. The zero value is suitable for use as the
|
||||||
|
// output of an operation, but cannot be used as an input.
|
||||||
|
type G1 struct {
|
||||||
|
bn256.G1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add sets e to a+b and then returns e.
|
||||||
|
func (e *G1) Add(a, b *G1) *G1 {
|
||||||
|
e.G1.Add(&a.G1, &b.G1)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScalarMult sets e to a*k and then returns e.
|
||||||
|
func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
|
||||||
|
e.G1.ScalarMult(&a.G1, k)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// G2 is an abstract cyclic group. The zero value is suitable for use as the
|
||||||
|
// output of an operation, but cannot be used as an input.
|
||||||
|
type G2 struct {
|
||||||
|
bn256.G2
|
||||||
|
}
|
||||||
|
|
||||||
|
// PairingCheck calculates the Optimal Ate pairing for a set of points.
|
||||||
|
func PairingCheck(a []*G1, b []*G2) bool {
|
||||||
|
as := make([]*bn256.G1, len(a))
|
||||||
|
for i, p := range a {
|
||||||
|
as[i] = &p.G1
|
||||||
|
}
|
||||||
|
bs := make([]*bn256.G2, len(b))
|
||||||
|
for i, p := range b {
|
||||||
|
bs[i] = &p.G2
|
||||||
|
}
|
||||||
|
return bn256.PairingCheck(as, bs)
|
||||||
|
}
|
63
crypto/bn256/bn256_other.go
Normal file
63
crypto/bn256/bn256_other.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// +build !amd64 appengine gccgo
|
||||||
|
|
||||||
|
// Package bn256 implements the Optimal Ate pairing over a 256-bit Barreto-Naehrig curve.
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/bn256/google"
|
||||||
|
)
|
||||||
|
|
||||||
|
// G1 is an abstract cyclic group. The zero value is suitable for use as the
|
||||||
|
// output of an operation, but cannot be used as an input.
|
||||||
|
type G1 struct {
|
||||||
|
bn256.G1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add sets e to a+b and then returns e.
|
||||||
|
func (e *G1) Add(a, b *G1) *G1 {
|
||||||
|
e.G1.Add(&a.G1, &b.G1)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScalarMult sets e to a*k and then returns e.
|
||||||
|
func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
|
||||||
|
e.G1.ScalarMult(&a.G1, k)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// G2 is an abstract cyclic group. The zero value is suitable for use as the
|
||||||
|
// output of an operation, but cannot be used as an input.
|
||||||
|
type G2 struct {
|
||||||
|
bn256.G2
|
||||||
|
}
|
||||||
|
|
||||||
|
// PairingCheck calculates the Optimal Ate pairing for a set of points.
|
||||||
|
func PairingCheck(a []*G1, b []*G2) bool {
|
||||||
|
as := make([]*bn256.G1, len(a))
|
||||||
|
for i, p := range a {
|
||||||
|
as[i] = &p.G1
|
||||||
|
}
|
||||||
|
bs := make([]*bn256.G2, len(b))
|
||||||
|
for i, p := range b {
|
||||||
|
bs[i] = &p.G2
|
||||||
|
}
|
||||||
|
return bn256.PairingCheck(as, bs)
|
||||||
|
}
|
481
crypto/bn256/cloudflare/bn256.go
Normal file
481
crypto/bn256/cloudflare/bn256.go
Normal file
@ -0,0 +1,481 @@
|
|||||||
|
// Package bn256 implements a particular bilinear group at the 128-bit security
|
||||||
|
// level.
|
||||||
|
//
|
||||||
|
// Bilinear groups are the basis of many of the new cryptographic protocols that
|
||||||
|
// have been proposed over the past decade. They consist of a triplet of groups
|
||||||
|
// (G₁, G₂ and GT) such that there exists a function e(g₁ˣ,g₂ʸ)=gTˣʸ (where gₓ
|
||||||
|
// is a generator of the respective group). That function is called a pairing
|
||||||
|
// function.
|
||||||
|
//
|
||||||
|
// This package specifically implements the Optimal Ate pairing over a 256-bit
|
||||||
|
// Barreto-Naehrig curve as described in
|
||||||
|
// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
|
||||||
|
// with the implementation described in that paper.
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
func randomK(r io.Reader) (k *big.Int, err error) {
|
||||||
|
for {
|
||||||
|
k, err = rand.Int(r, Order)
|
||||||
|
if k.Sign() > 0 || err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// G1 is an abstract cyclic group. The zero value is suitable for use as the
|
||||||
|
// output of an operation, but cannot be used as an input.
|
||||||
|
type G1 struct {
|
||||||
|
p *curvePoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// RandomG1 returns x and g₁ˣ where x is a random, non-zero number read from r.
|
||||||
|
func RandomG1(r io.Reader) (*big.Int, *G1, error) {
|
||||||
|
k, err := randomK(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return k, new(G1).ScalarBaseMult(k), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *G1) String() string {
|
||||||
|
return "bn256.G1" + g.p.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScalarBaseMult sets e to g*k where g is the generator of the group and then
|
||||||
|
// returns e.
|
||||||
|
func (e *G1) ScalarBaseMult(k *big.Int) *G1 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &curvePoint{}
|
||||||
|
}
|
||||||
|
e.p.Mul(curveGen, k)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScalarMult sets e to a*k and then returns e.
|
||||||
|
func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &curvePoint{}
|
||||||
|
}
|
||||||
|
e.p.Mul(a.p, k)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add sets e to a+b and then returns e.
|
||||||
|
func (e *G1) Add(a, b *G1) *G1 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &curvePoint{}
|
||||||
|
}
|
||||||
|
e.p.Add(a.p, b.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Neg sets e to -a and then returns e.
|
||||||
|
func (e *G1) Neg(a *G1) *G1 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &curvePoint{}
|
||||||
|
}
|
||||||
|
e.p.Neg(a.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets e to a and then returns e.
|
||||||
|
func (e *G1) Set(a *G1) *G1 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &curvePoint{}
|
||||||
|
}
|
||||||
|
e.p.Set(a.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal converts e to a byte slice.
|
||||||
|
func (e *G1) Marshal() []byte {
|
||||||
|
// Each value is a 256-bit number.
|
||||||
|
const numBytes = 256 / 8
|
||||||
|
|
||||||
|
e.p.MakeAffine()
|
||||||
|
ret := make([]byte, numBytes*2)
|
||||||
|
if e.p.IsInfinity() {
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
temp := &gfP{}
|
||||||
|
|
||||||
|
montDecode(temp, &e.p.x)
|
||||||
|
temp.Marshal(ret)
|
||||||
|
montDecode(temp, &e.p.y)
|
||||||
|
temp.Marshal(ret[numBytes:])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal sets e to the result of converting the output of Marshal back into
|
||||||
|
// a group element and then returns e.
|
||||||
|
func (e *G1) Unmarshal(m []byte) ([]byte, error) {
|
||||||
|
// Each value is a 256-bit number.
|
||||||
|
const numBytes = 256 / 8
|
||||||
|
if len(m) < 2*numBytes {
|
||||||
|
return nil, errors.New("bn256: not enough data")
|
||||||
|
}
|
||||||
|
// Unmarshal the points and check their caps
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &curvePoint{}
|
||||||
|
} else {
|
||||||
|
e.p.x, e.p.y = gfP{0}, gfP{0}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if err = e.p.x.Unmarshal(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.Unmarshal(m[numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Encode into Montgomery form and ensure it's on the curve
|
||||||
|
montEncode(&e.p.x, &e.p.x)
|
||||||
|
montEncode(&e.p.y, &e.p.y)
|
||||||
|
|
||||||
|
zero := gfP{0}
|
||||||
|
if e.p.x == zero && e.p.y == zero {
|
||||||
|
// This is the point at infinity.
|
||||||
|
e.p.y = *newGFp(1)
|
||||||
|
e.p.z = gfP{0}
|
||||||
|
e.p.t = gfP{0}
|
||||||
|
} else {
|
||||||
|
e.p.z = *newGFp(1)
|
||||||
|
e.p.t = *newGFp(1)
|
||||||
|
|
||||||
|
if !e.p.IsOnCurve() {
|
||||||
|
return nil, errors.New("bn256: malformed point")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m[2*numBytes:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// G2 is an abstract cyclic group. The zero value is suitable for use as the
|
||||||
|
// output of an operation, but cannot be used as an input.
|
||||||
|
type G2 struct {
|
||||||
|
p *twistPoint
|
||||||
|
}
|
||||||
|
|
||||||
|
// RandomG2 returns x and g₂ˣ where x is a random, non-zero number read from r.
|
||||||
|
func RandomG2(r io.Reader) (*big.Int, *G2, error) {
|
||||||
|
k, err := randomK(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return k, new(G2).ScalarBaseMult(k), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *G2) String() string {
|
||||||
|
return "bn256.G2" + e.p.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScalarBaseMult sets e to g*k where g is the generator of the group and then
|
||||||
|
// returns out.
|
||||||
|
func (e *G2) ScalarBaseMult(k *big.Int) *G2 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &twistPoint{}
|
||||||
|
}
|
||||||
|
e.p.Mul(twistGen, k)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScalarMult sets e to a*k and then returns e.
|
||||||
|
func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &twistPoint{}
|
||||||
|
}
|
||||||
|
e.p.Mul(a.p, k)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add sets e to a+b and then returns e.
|
||||||
|
func (e *G2) Add(a, b *G2) *G2 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &twistPoint{}
|
||||||
|
}
|
||||||
|
e.p.Add(a.p, b.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Neg sets e to -a and then returns e.
|
||||||
|
func (e *G2) Neg(a *G2) *G2 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &twistPoint{}
|
||||||
|
}
|
||||||
|
e.p.Neg(a.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets e to a and then returns e.
|
||||||
|
func (e *G2) Set(a *G2) *G2 {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &twistPoint{}
|
||||||
|
}
|
||||||
|
e.p.Set(a.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal converts e into a byte slice.
|
||||||
|
func (e *G2) Marshal() []byte {
|
||||||
|
// Each value is a 256-bit number.
|
||||||
|
const numBytes = 256 / 8
|
||||||
|
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &twistPoint{}
|
||||||
|
}
|
||||||
|
|
||||||
|
e.p.MakeAffine()
|
||||||
|
ret := make([]byte, numBytes*4)
|
||||||
|
if e.p.IsInfinity() {
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
temp := &gfP{}
|
||||||
|
|
||||||
|
montDecode(temp, &e.p.x.x)
|
||||||
|
temp.Marshal(ret)
|
||||||
|
montDecode(temp, &e.p.x.y)
|
||||||
|
temp.Marshal(ret[numBytes:])
|
||||||
|
montDecode(temp, &e.p.y.x)
|
||||||
|
temp.Marshal(ret[2*numBytes:])
|
||||||
|
montDecode(temp, &e.p.y.y)
|
||||||
|
temp.Marshal(ret[3*numBytes:])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal sets e to the result of converting the output of Marshal back into
|
||||||
|
// a group element and then returns e.
|
||||||
|
func (e *G2) Unmarshal(m []byte) ([]byte, error) {
|
||||||
|
// Each value is a 256-bit number.
|
||||||
|
const numBytes = 256 / 8
|
||||||
|
if len(m) < 4*numBytes {
|
||||||
|
return nil, errors.New("bn256: not enough data")
|
||||||
|
}
|
||||||
|
// Unmarshal the points and check their caps
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &twistPoint{}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if err = e.p.x.x.Unmarshal(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.x.y.Unmarshal(m[numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.x.Unmarshal(m[2*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.y.Unmarshal(m[3*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Encode into Montgomery form and ensure it's on the curve
|
||||||
|
montEncode(&e.p.x.x, &e.p.x.x)
|
||||||
|
montEncode(&e.p.x.y, &e.p.x.y)
|
||||||
|
montEncode(&e.p.y.x, &e.p.y.x)
|
||||||
|
montEncode(&e.p.y.y, &e.p.y.y)
|
||||||
|
|
||||||
|
if e.p.x.IsZero() && e.p.y.IsZero() {
|
||||||
|
// This is the point at infinity.
|
||||||
|
e.p.y.SetOne()
|
||||||
|
e.p.z.SetZero()
|
||||||
|
e.p.t.SetZero()
|
||||||
|
} else {
|
||||||
|
e.p.z.SetOne()
|
||||||
|
e.p.t.SetOne()
|
||||||
|
|
||||||
|
if !e.p.IsOnCurve() {
|
||||||
|
return nil, errors.New("bn256: malformed point")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m[4*numBytes:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GT is an abstract cyclic group. The zero value is suitable for use as the
|
||||||
|
// output of an operation, but cannot be used as an input.
|
||||||
|
type GT struct {
|
||||||
|
p *gfP12
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pair calculates an Optimal Ate pairing.
|
||||||
|
func Pair(g1 *G1, g2 *G2) *GT {
|
||||||
|
return >{optimalAte(g2.p, g1.p)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PairingCheck calculates the Optimal Ate pairing for a set of points.
|
||||||
|
func PairingCheck(a []*G1, b []*G2) bool {
|
||||||
|
acc := new(gfP12)
|
||||||
|
acc.SetOne()
|
||||||
|
|
||||||
|
for i := 0; i < len(a); i++ {
|
||||||
|
if a[i].p.IsInfinity() || b[i].p.IsInfinity() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
acc.Mul(acc, miller(b[i].p, a[i].p))
|
||||||
|
}
|
||||||
|
return finalExponentiation(acc).IsOne()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Miller applies Miller's algorithm, which is a bilinear function from the
|
||||||
|
// source groups to F_p^12. Miller(g1, g2).Finalize() is equivalent to Pair(g1,
|
||||||
|
// g2).
|
||||||
|
func Miller(g1 *G1, g2 *G2) *GT {
|
||||||
|
return >{miller(g2.p, g1.p)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GT) String() string {
|
||||||
|
return "bn256.GT" + g.p.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScalarMult sets e to a*k and then returns e.
|
||||||
|
func (e *GT) ScalarMult(a *GT, k *big.Int) *GT {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &gfP12{}
|
||||||
|
}
|
||||||
|
e.p.Exp(a.p, k)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add sets e to a+b and then returns e.
|
||||||
|
func (e *GT) Add(a, b *GT) *GT {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &gfP12{}
|
||||||
|
}
|
||||||
|
e.p.Mul(a.p, b.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Neg sets e to -a and then returns e.
|
||||||
|
func (e *GT) Neg(a *GT) *GT {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &gfP12{}
|
||||||
|
}
|
||||||
|
e.p.Conjugate(a.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets e to a and then returns e.
|
||||||
|
func (e *GT) Set(a *GT) *GT {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &gfP12{}
|
||||||
|
}
|
||||||
|
e.p.Set(a.p)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize is a linear function from F_p^12 to GT.
|
||||||
|
func (e *GT) Finalize() *GT {
|
||||||
|
ret := finalExponentiation(e.p)
|
||||||
|
e.p.Set(ret)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal converts e into a byte slice.
|
||||||
|
func (e *GT) Marshal() []byte {
|
||||||
|
// Each value is a 256-bit number.
|
||||||
|
const numBytes = 256 / 8
|
||||||
|
|
||||||
|
ret := make([]byte, numBytes*12)
|
||||||
|
temp := &gfP{}
|
||||||
|
|
||||||
|
montDecode(temp, &e.p.x.x.x)
|
||||||
|
temp.Marshal(ret)
|
||||||
|
montDecode(temp, &e.p.x.x.y)
|
||||||
|
temp.Marshal(ret[numBytes:])
|
||||||
|
montDecode(temp, &e.p.x.y.x)
|
||||||
|
temp.Marshal(ret[2*numBytes:])
|
||||||
|
montDecode(temp, &e.p.x.y.y)
|
||||||
|
temp.Marshal(ret[3*numBytes:])
|
||||||
|
montDecode(temp, &e.p.x.z.x)
|
||||||
|
temp.Marshal(ret[4*numBytes:])
|
||||||
|
montDecode(temp, &e.p.x.z.y)
|
||||||
|
temp.Marshal(ret[5*numBytes:])
|
||||||
|
montDecode(temp, &e.p.y.x.x)
|
||||||
|
temp.Marshal(ret[6*numBytes:])
|
||||||
|
montDecode(temp, &e.p.y.x.y)
|
||||||
|
temp.Marshal(ret[7*numBytes:])
|
||||||
|
montDecode(temp, &e.p.y.y.x)
|
||||||
|
temp.Marshal(ret[8*numBytes:])
|
||||||
|
montDecode(temp, &e.p.y.y.y)
|
||||||
|
temp.Marshal(ret[9*numBytes:])
|
||||||
|
montDecode(temp, &e.p.y.z.x)
|
||||||
|
temp.Marshal(ret[10*numBytes:])
|
||||||
|
montDecode(temp, &e.p.y.z.y)
|
||||||
|
temp.Marshal(ret[11*numBytes:])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal sets e to the result of converting the output of Marshal back into
|
||||||
|
// a group element and then returns e.
|
||||||
|
func (e *GT) Unmarshal(m []byte) ([]byte, error) {
|
||||||
|
// Each value is a 256-bit number.
|
||||||
|
const numBytes = 256 / 8
|
||||||
|
|
||||||
|
if len(m) < 12*numBytes {
|
||||||
|
return nil, errors.New("bn256: not enough data")
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = &gfP12{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if err = e.p.x.x.x.Unmarshal(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.x.x.y.Unmarshal(m[numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.x.y.x.Unmarshal(m[2*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.x.y.y.Unmarshal(m[3*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.x.z.x.Unmarshal(m[4*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.x.z.y.Unmarshal(m[5*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.x.x.Unmarshal(m[6*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.x.y.Unmarshal(m[7*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.y.x.Unmarshal(m[8*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.y.y.Unmarshal(m[9*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.z.x.Unmarshal(m[10*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = e.p.y.z.y.Unmarshal(m[11*numBytes:]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
montEncode(&e.p.x.x.x, &e.p.x.x.x)
|
||||||
|
montEncode(&e.p.x.x.y, &e.p.x.x.y)
|
||||||
|
montEncode(&e.p.x.y.x, &e.p.x.y.x)
|
||||||
|
montEncode(&e.p.x.y.y, &e.p.x.y.y)
|
||||||
|
montEncode(&e.p.x.z.x, &e.p.x.z.x)
|
||||||
|
montEncode(&e.p.x.z.y, &e.p.x.z.y)
|
||||||
|
montEncode(&e.p.y.x.x, &e.p.y.x.x)
|
||||||
|
montEncode(&e.p.y.x.y, &e.p.y.x.y)
|
||||||
|
montEncode(&e.p.y.y.x, &e.p.y.y.x)
|
||||||
|
montEncode(&e.p.y.y.y, &e.p.y.y.y)
|
||||||
|
montEncode(&e.p.y.z.x, &e.p.y.z.x)
|
||||||
|
montEncode(&e.p.y.z.y, &e.p.y.z.y)
|
||||||
|
|
||||||
|
return m[12*numBytes:], nil
|
||||||
|
}
|
118
crypto/bn256/cloudflare/bn256_test.go
Normal file
118
crypto/bn256/cloudflare/bn256_test.go
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// +build amd64,!appengine,!gccgo
|
||||||
|
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestG1Marshal(t *testing.T) {
|
||||||
|
_, Ga, err := RandomG1(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
ma := Ga.Marshal()
|
||||||
|
|
||||||
|
Gb := new(G1)
|
||||||
|
_, err = Gb.Unmarshal(ma)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mb := Gb.Marshal()
|
||||||
|
|
||||||
|
if !bytes.Equal(ma, mb) {
|
||||||
|
t.Fatal("bytes are different")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestG2Marshal(t *testing.T) {
|
||||||
|
_, Ga, err := RandomG2(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
ma := Ga.Marshal()
|
||||||
|
|
||||||
|
Gb := new(G2)
|
||||||
|
_, err = Gb.Unmarshal(ma)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mb := Gb.Marshal()
|
||||||
|
|
||||||
|
if !bytes.Equal(ma, mb) {
|
||||||
|
t.Fatal("bytes are different")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBilinearity(t *testing.T) {
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
a, p1, _ := RandomG1(rand.Reader)
|
||||||
|
b, p2, _ := RandomG2(rand.Reader)
|
||||||
|
e1 := Pair(p1, p2)
|
||||||
|
|
||||||
|
e2 := Pair(&G1{curveGen}, &G2{twistGen})
|
||||||
|
e2.ScalarMult(e2, a)
|
||||||
|
e2.ScalarMult(e2, b)
|
||||||
|
|
||||||
|
if *e1.p != *e2.p {
|
||||||
|
t.Fatalf("bad pairing result: %s", e1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTripartiteDiffieHellman(t *testing.T) {
|
||||||
|
a, _ := rand.Int(rand.Reader, Order)
|
||||||
|
b, _ := rand.Int(rand.Reader, Order)
|
||||||
|
c, _ := rand.Int(rand.Reader, Order)
|
||||||
|
|
||||||
|
pa, pb, pc := new(G1), new(G1), new(G1)
|
||||||
|
qa, qb, qc := new(G2), new(G2), new(G2)
|
||||||
|
|
||||||
|
pa.Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
|
||||||
|
qa.Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
|
||||||
|
pb.Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
|
||||||
|
qb.Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
|
||||||
|
pc.Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
|
||||||
|
qc.Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
|
||||||
|
|
||||||
|
k1 := Pair(pb, qc)
|
||||||
|
k1.ScalarMult(k1, a)
|
||||||
|
k1Bytes := k1.Marshal()
|
||||||
|
|
||||||
|
k2 := Pair(pc, qa)
|
||||||
|
k2.ScalarMult(k2, b)
|
||||||
|
k2Bytes := k2.Marshal()
|
||||||
|
|
||||||
|
k3 := Pair(pa, qb)
|
||||||
|
k3.ScalarMult(k3, c)
|
||||||
|
k3Bytes := k3.Marshal()
|
||||||
|
|
||||||
|
if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) {
|
||||||
|
t.Errorf("keys didn't agree")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkG1(b *testing.B) {
|
||||||
|
x, _ := rand.Int(rand.Reader, Order)
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
new(G1).ScalarBaseMult(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkG2(b *testing.B) {
|
||||||
|
x, _ := rand.Int(rand.Reader, Order)
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
new(G2).ScalarBaseMult(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkPairing(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
Pair(&G1{curveGen}, &G2{twistGen})
|
||||||
|
}
|
||||||
|
}
|
59
crypto/bn256/cloudflare/constants.go
Normal file
59
crypto/bn256/cloudflare/constants.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
func bigFromBase10(s string) *big.Int {
|
||||||
|
n, _ := new(big.Int).SetString(s, 10)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// u is the BN parameter that determines the prime: 1868033³.
|
||||||
|
var u = bigFromBase10("4965661367192848881")
|
||||||
|
|
||||||
|
// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u²+6u+1.
|
||||||
|
var Order = bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617")
|
||||||
|
|
||||||
|
// P is a prime over which we form a basic field: 36u⁴+36u³+24u²+6u+1.
|
||||||
|
var P = bigFromBase10("21888242871839275222246405745257275088696311157297823662689037894645226208583")
|
||||||
|
|
||||||
|
// p2 is p, represented as little-endian 64-bit words.
|
||||||
|
var p2 = [4]uint64{0x3c208c16d87cfd47, 0x97816a916871ca8d, 0xb85045b68181585d, 0x30644e72e131a029}
|
||||||
|
|
||||||
|
// np is the negative inverse of p, mod 2^256.
|
||||||
|
var np = [4]uint64{0x87d20782e4866389, 0x9ede7d651eca6ac9, 0xd8afcbd01833da80, 0xf57a22b791888c6b}
|
||||||
|
|
||||||
|
// rN1 is R^-1 where R = 2^256 mod p.
|
||||||
|
var rN1 = &gfP{0xed84884a014afa37, 0xeb2022850278edf8, 0xcf63e9cfb74492d9, 0x2e67157159e5c639}
|
||||||
|
|
||||||
|
// r2 is R^2 where R = 2^256 mod p.
|
||||||
|
var r2 = &gfP{0xf32cfc5b538afa89, 0xb5e71911d44501fb, 0x47ab1eff0a417ff6, 0x06d89f71cab8351f}
|
||||||
|
|
||||||
|
// r3 is R^3 where R = 2^256 mod p.
|
||||||
|
var r3 = &gfP{0xb1cd6dafda1530df, 0x62f210e6a7283db6, 0xef7f0b0c0ada0afb, 0x20fd6e902d592544}
|
||||||
|
|
||||||
|
// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+9.
|
||||||
|
var xiToPMinus1Over6 = &gfP2{gfP{0xa222ae234c492d72, 0xd00f02a4565de15b, 0xdc2ff3a253dfc926, 0x10a75716b3899551}, gfP{0xaf9ba69633144907, 0xca6b1d7387afb78a, 0x11bded5ef08a2087, 0x02f34d751a1f3a7c}}
|
||||||
|
|
||||||
|
// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+9.
|
||||||
|
var xiToPMinus1Over3 = &gfP2{gfP{0x6e849f1ea0aa4757, 0xaa1c7b6d89f89141, 0xb6e713cdfae0ca3a, 0x26694fbb4e82ebc3}, gfP{0xb5773b104563ab30, 0x347f91c8a9aa6454, 0x7a007127242e0991, 0x1956bcd8118214ec}}
|
||||||
|
|
||||||
|
// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+9.
|
||||||
|
var xiToPMinus1Over2 = &gfP2{gfP{0xa1d77ce45ffe77c7, 0x07affd117826d1db, 0x6d16bd27bb7edc6b, 0x2c87200285defecc}, gfP{0xe4bbdd0c2936b629, 0xbb30f162e133bacb, 0x31a9d1b6f9645366, 0x253570bea500f8dd}}
|
||||||
|
|
||||||
|
// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+9.
|
||||||
|
var xiToPSquaredMinus1Over3 = &gfP{0x3350c88e13e80b9c, 0x7dce557cdb5e56b9, 0x6001b4b8b615564a, 0x2682e617020217e0}
|
||||||
|
|
||||||
|
// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+9 (a cubic root of unity, mod p).
|
||||||
|
var xiTo2PSquaredMinus2Over3 = &gfP{0x71930c11d782e155, 0xa6bb947cffbe3323, 0xaa303344d4741444, 0x2c3b3f0d26594943}
|
||||||
|
|
||||||
|
// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+9 (a cubic root of -1, mod p).
|
||||||
|
var xiToPSquaredMinus1Over6 = &gfP{0xca8d800500fa1bf2, 0xf0c5d61468b39769, 0x0e201271ad0d4418, 0x04290f65bad856e6}
|
||||||
|
|
||||||
|
// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+9.
|
||||||
|
var xiTo2PMinus2Over3 = &gfP2{gfP{0x5dddfd154bd8c949, 0x62cb29a5a4445b60, 0x37bc870a0c7dd2b9, 0x24830a9d3171f0fd}, gfP{0x7361d77f843abe92, 0xa5bb2bd3273411fb, 0x9c941f314b3e2399, 0x15df9cddbb9fd3ec}}
|
229
crypto/bn256/cloudflare/curve.go
Normal file
229
crypto/bn256/cloudflare/curve.go
Normal file
@ -0,0 +1,229 @@
|
|||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// curvePoint implements the elliptic curve y²=x³+3. Points are kept in Jacobian
|
||||||
|
// form and t=z² when valid. G₁ is the set of points of this curve on GF(p).
|
||||||
|
type curvePoint struct {
|
||||||
|
x, y, z, t gfP
|
||||||
|
}
|
||||||
|
|
||||||
|
var curveB = newGFp(3)
|
||||||
|
|
||||||
|
// curveGen is the generator of G₁.
|
||||||
|
var curveGen = &curvePoint{
|
||||||
|
x: *newGFp(1),
|
||||||
|
y: *newGFp(2),
|
||||||
|
z: *newGFp(1),
|
||||||
|
t: *newGFp(1),
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) String() string {
|
||||||
|
c.MakeAffine()
|
||||||
|
x, y := &gfP{}, &gfP{}
|
||||||
|
montDecode(x, &c.x)
|
||||||
|
montDecode(y, &c.y)
|
||||||
|
return "(" + x.String() + ", " + y.String() + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) Set(a *curvePoint) {
|
||||||
|
c.x.Set(&a.x)
|
||||||
|
c.y.Set(&a.y)
|
||||||
|
c.z.Set(&a.z)
|
||||||
|
c.t.Set(&a.t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsOnCurve returns true iff c is on the curve.
|
||||||
|
func (c *curvePoint) IsOnCurve() bool {
|
||||||
|
c.MakeAffine()
|
||||||
|
if c.IsInfinity() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
y2, x3 := &gfP{}, &gfP{}
|
||||||
|
gfpMul(y2, &c.y, &c.y)
|
||||||
|
gfpMul(x3, &c.x, &c.x)
|
||||||
|
gfpMul(x3, x3, &c.x)
|
||||||
|
gfpAdd(x3, x3, curveB)
|
||||||
|
|
||||||
|
return *y2 == *x3
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) SetInfinity() {
|
||||||
|
c.x = gfP{0}
|
||||||
|
c.y = *newGFp(1)
|
||||||
|
c.z = gfP{0}
|
||||||
|
c.t = gfP{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) IsInfinity() bool {
|
||||||
|
return c.z == gfP{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) Add(a, b *curvePoint) {
|
||||||
|
if a.IsInfinity() {
|
||||||
|
c.Set(b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if b.IsInfinity() {
|
||||||
|
c.Set(a)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
|
||||||
|
|
||||||
|
// Normalize the points by replacing a = [x1:y1:z1] and b = [x2:y2:z2]
|
||||||
|
// by [u1:s1:z1·z2] and [u2:s2:z1·z2]
|
||||||
|
// where u1 = x1·z2², s1 = y1·z2³ and u1 = x2·z1², s2 = y2·z1³
|
||||||
|
z12, z22 := &gfP{}, &gfP{}
|
||||||
|
gfpMul(z12, &a.z, &a.z)
|
||||||
|
gfpMul(z22, &b.z, &b.z)
|
||||||
|
|
||||||
|
u1, u2 := &gfP{}, &gfP{}
|
||||||
|
gfpMul(u1, &a.x, z22)
|
||||||
|
gfpMul(u2, &b.x, z12)
|
||||||
|
|
||||||
|
t, s1 := &gfP{}, &gfP{}
|
||||||
|
gfpMul(t, &b.z, z22)
|
||||||
|
gfpMul(s1, &a.y, t)
|
||||||
|
|
||||||
|
s2 := &gfP{}
|
||||||
|
gfpMul(t, &a.z, z12)
|
||||||
|
gfpMul(s2, &b.y, t)
|
||||||
|
|
||||||
|
// Compute x = (2h)²(s²-u1-u2)
|
||||||
|
// where s = (s2-s1)/(u2-u1) is the slope of the line through
|
||||||
|
// (u1,s1) and (u2,s2). The extra factor 2h = 2(u2-u1) comes from the value of z below.
|
||||||
|
// This is also:
|
||||||
|
// 4(s2-s1)² - 4h²(u1+u2) = 4(s2-s1)² - 4h³ - 4h²(2u1)
|
||||||
|
// = r² - j - 2v
|
||||||
|
// with the notations below.
|
||||||
|
h := &gfP{}
|
||||||
|
gfpSub(h, u2, u1)
|
||||||
|
xEqual := *h == gfP{0}
|
||||||
|
|
||||||
|
gfpAdd(t, h, h)
|
||||||
|
// i = 4h²
|
||||||
|
i := &gfP{}
|
||||||
|
gfpMul(i, t, t)
|
||||||
|
// j = 4h³
|
||||||
|
j := &gfP{}
|
||||||
|
gfpMul(j, h, i)
|
||||||
|
|
||||||
|
gfpSub(t, s2, s1)
|
||||||
|
yEqual := *t == gfP{0}
|
||||||
|
if xEqual && yEqual {
|
||||||
|
c.Double(a)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r := &gfP{}
|
||||||
|
gfpAdd(r, t, t)
|
||||||
|
|
||||||
|
v := &gfP{}
|
||||||
|
gfpMul(v, u1, i)
|
||||||
|
|
||||||
|
// t4 = 4(s2-s1)²
|
||||||
|
t4, t6 := &gfP{}, &gfP{}
|
||||||
|
gfpMul(t4, r, r)
|
||||||
|
gfpAdd(t, v, v)
|
||||||
|
gfpSub(t6, t4, j)
|
||||||
|
|
||||||
|
gfpSub(&c.x, t6, t)
|
||||||
|
|
||||||
|
// Set y = -(2h)³(s1 + s*(x/4h²-u1))
|
||||||
|
// This is also
|
||||||
|
// y = - 2·s1·j - (s2-s1)(2x - 2i·u1) = r(v-x) - 2·s1·j
|
||||||
|
gfpSub(t, v, &c.x) // t7
|
||||||
|
gfpMul(t4, s1, j) // t8
|
||||||
|
gfpAdd(t6, t4, t4) // t9
|
||||||
|
gfpMul(t4, r, t) // t10
|
||||||
|
gfpSub(&c.y, t4, t6)
|
||||||
|
|
||||||
|
// Set z = 2(u2-u1)·z1·z2 = 2h·z1·z2
|
||||||
|
gfpAdd(t, &a.z, &b.z) // t11
|
||||||
|
gfpMul(t4, t, t) // t12
|
||||||
|
gfpSub(t, t4, z12) // t13
|
||||||
|
gfpSub(t4, t, z22) // t14
|
||||||
|
gfpMul(&c.z, t4, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) Double(a *curvePoint) {
|
||||||
|
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
|
||||||
|
A, B, C := &gfP{}, &gfP{}, &gfP{}
|
||||||
|
gfpMul(A, &a.x, &a.x)
|
||||||
|
gfpMul(B, &a.y, &a.y)
|
||||||
|
gfpMul(C, B, B)
|
||||||
|
|
||||||
|
t, t2 := &gfP{}, &gfP{}
|
||||||
|
gfpAdd(t, &a.x, B)
|
||||||
|
gfpMul(t2, t, t)
|
||||||
|
gfpSub(t, t2, A)
|
||||||
|
gfpSub(t2, t, C)
|
||||||
|
|
||||||
|
d, e, f := &gfP{}, &gfP{}, &gfP{}
|
||||||
|
gfpAdd(d, t2, t2)
|
||||||
|
gfpAdd(t, A, A)
|
||||||
|
gfpAdd(e, t, A)
|
||||||
|
gfpMul(f, e, e)
|
||||||
|
|
||||||
|
gfpAdd(t, d, d)
|
||||||
|
gfpSub(&c.x, f, t)
|
||||||
|
|
||||||
|
gfpAdd(t, C, C)
|
||||||
|
gfpAdd(t2, t, t)
|
||||||
|
gfpAdd(t, t2, t2)
|
||||||
|
gfpSub(&c.y, d, &c.x)
|
||||||
|
gfpMul(t2, e, &c.y)
|
||||||
|
gfpSub(&c.y, t2, t)
|
||||||
|
|
||||||
|
gfpMul(t, &a.y, &a.z)
|
||||||
|
gfpAdd(&c.z, t, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int) {
|
||||||
|
sum, t := &curvePoint{}, &curvePoint{}
|
||||||
|
sum.SetInfinity()
|
||||||
|
|
||||||
|
for i := scalar.BitLen(); i >= 0; i-- {
|
||||||
|
t.Double(sum)
|
||||||
|
if scalar.Bit(i) != 0 {
|
||||||
|
sum.Add(t, a)
|
||||||
|
} else {
|
||||||
|
sum.Set(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.Set(sum)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) MakeAffine() {
|
||||||
|
if c.z == *newGFp(1) {
|
||||||
|
return
|
||||||
|
} else if c.z == *newGFp(0) {
|
||||||
|
c.x = gfP{0}
|
||||||
|
c.y = *newGFp(1)
|
||||||
|
c.t = gfP{0}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
zInv := &gfP{}
|
||||||
|
zInv.Invert(&c.z)
|
||||||
|
|
||||||
|
t, zInv2 := &gfP{}, &gfP{}
|
||||||
|
gfpMul(t, &c.y, zInv)
|
||||||
|
gfpMul(zInv2, zInv, zInv)
|
||||||
|
|
||||||
|
gfpMul(&c.x, &c.x, zInv2)
|
||||||
|
gfpMul(&c.y, t, zInv2)
|
||||||
|
|
||||||
|
c.z = *newGFp(1)
|
||||||
|
c.t = *newGFp(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *curvePoint) Neg(a *curvePoint) {
|
||||||
|
c.x.Set(&a.x)
|
||||||
|
gfpNeg(&c.y, &a.y)
|
||||||
|
c.z.Set(&a.z)
|
||||||
|
c.t = gfP{0}
|
||||||
|
}
|
45
crypto/bn256/cloudflare/example_test.go
Normal file
45
crypto/bn256/cloudflare/example_test.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build amd64,!appengine,!gccgo
|
||||||
|
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExamplePair() {
|
||||||
|
// This implements the tripartite Diffie-Hellman algorithm from "A One
|
||||||
|
// Round Protocol for Tripartite Diffie-Hellman", A. Joux.
|
||||||
|
// http://www.springerlink.com/content/cddc57yyva0hburb/fulltext.pdf
|
||||||
|
|
||||||
|
// Each of three parties, a, b and c, generate a private value.
|
||||||
|
a, _ := rand.Int(rand.Reader, Order)
|
||||||
|
b, _ := rand.Int(rand.Reader, Order)
|
||||||
|
c, _ := rand.Int(rand.Reader, Order)
|
||||||
|
|
||||||
|
// Then each party calculates g₁ and g₂ times their private value.
|
||||||
|
pa := new(G1).ScalarBaseMult(a)
|
||||||
|
qa := new(G2).ScalarBaseMult(a)
|
||||||
|
|
||||||
|
pb := new(G1).ScalarBaseMult(b)
|
||||||
|
qb := new(G2).ScalarBaseMult(b)
|
||||||
|
|
||||||
|
pc := new(G1).ScalarBaseMult(c)
|
||||||
|
qc := new(G2).ScalarBaseMult(c)
|
||||||
|
|
||||||
|
// Now each party exchanges its public values with the other two and
|
||||||
|
// all parties can calculate the shared key.
|
||||||
|
k1 := Pair(pb, qc)
|
||||||
|
k1.ScalarMult(k1, a)
|
||||||
|
|
||||||
|
k2 := Pair(pc, qa)
|
||||||
|
k2.ScalarMult(k2, b)
|
||||||
|
|
||||||
|
k3 := Pair(pa, qb)
|
||||||
|
k3.ScalarMult(k3, c)
|
||||||
|
|
||||||
|
// k1, k2 and k3 will all be equal.
|
||||||
|
}
|
81
crypto/bn256/cloudflare/gfp.go
Normal file
81
crypto/bn256/cloudflare/gfp.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type gfP [4]uint64
|
||||||
|
|
||||||
|
func newGFp(x int64) (out *gfP) {
|
||||||
|
if x >= 0 {
|
||||||
|
out = &gfP{uint64(x)}
|
||||||
|
} else {
|
||||||
|
out = &gfP{uint64(-x)}
|
||||||
|
gfpNeg(out, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
montEncode(out, out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP) String() string {
|
||||||
|
return fmt.Sprintf("%16.16x%16.16x%16.16x%16.16x", e[3], e[2], e[1], e[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP) Set(f *gfP) {
|
||||||
|
e[0] = f[0]
|
||||||
|
e[1] = f[1]
|
||||||
|
e[2] = f[2]
|
||||||
|
e[3] = f[3]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP) Invert(f *gfP) {
|
||||||
|
bits := [4]uint64{0x3c208c16d87cfd45, 0x97816a916871ca8d, 0xb85045b68181585d, 0x30644e72e131a029}
|
||||||
|
|
||||||
|
sum, power := &gfP{}, &gfP{}
|
||||||
|
sum.Set(rN1)
|
||||||
|
power.Set(f)
|
||||||
|
|
||||||
|
for word := 0; word < 4; word++ {
|
||||||
|
for bit := uint(0); bit < 64; bit++ {
|
||||||
|
if (bits[word]>>bit)&1 == 1 {
|
||||||
|
gfpMul(sum, sum, power)
|
||||||
|
}
|
||||||
|
gfpMul(power, power, power)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gfpMul(sum, sum, r3)
|
||||||
|
e.Set(sum)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP) Marshal(out []byte) {
|
||||||
|
for w := uint(0); w < 4; w++ {
|
||||||
|
for b := uint(0); b < 8; b++ {
|
||||||
|
out[8*w+b] = byte(e[3-w] >> (56 - 8*b))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP) Unmarshal(in []byte) error {
|
||||||
|
// Unmarshal the bytes into little endian form
|
||||||
|
for w := uint(0); w < 4; w++ {
|
||||||
|
for b := uint(0); b < 8; b++ {
|
||||||
|
e[3-w] += uint64(in[8*w+b]) << (56 - 8*b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Ensure the point respects the curve modulus
|
||||||
|
for i := 3; i >= 0; i-- {
|
||||||
|
if e[i] < p2[i] {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if e[i] > p2[i] {
|
||||||
|
return errors.New("bn256: coordinate exceeds modulus")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.New("bn256: coordinate equals modulus")
|
||||||
|
}
|
||||||
|
|
||||||
|
func montEncode(c, a *gfP) { gfpMul(c, a, r2) }
|
||||||
|
func montDecode(c, a *gfP) { gfpMul(c, a, &gfP{1}) }
|
32
crypto/bn256/cloudflare/gfp.h
Normal file
32
crypto/bn256/cloudflare/gfp.h
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#define storeBlock(a0,a1,a2,a3, r) \
|
||||||
|
MOVQ a0, 0+r \
|
||||||
|
MOVQ a1, 8+r \
|
||||||
|
MOVQ a2, 16+r \
|
||||||
|
MOVQ a3, 24+r
|
||||||
|
|
||||||
|
#define loadBlock(r, a0,a1,a2,a3) \
|
||||||
|
MOVQ 0+r, a0 \
|
||||||
|
MOVQ 8+r, a1 \
|
||||||
|
MOVQ 16+r, a2 \
|
||||||
|
MOVQ 24+r, a3
|
||||||
|
|
||||||
|
#define gfpCarry(a0,a1,a2,a3,a4, b0,b1,b2,b3,b4) \
|
||||||
|
\ // b = a-p
|
||||||
|
MOVQ a0, b0 \
|
||||||
|
MOVQ a1, b1 \
|
||||||
|
MOVQ a2, b2 \
|
||||||
|
MOVQ a3, b3 \
|
||||||
|
MOVQ a4, b4 \
|
||||||
|
\
|
||||||
|
SUBQ ·p2+0(SB), b0 \
|
||||||
|
SBBQ ·p2+8(SB), b1 \
|
||||||
|
SBBQ ·p2+16(SB), b2 \
|
||||||
|
SBBQ ·p2+24(SB), b3 \
|
||||||
|
SBBQ $0, b4 \
|
||||||
|
\
|
||||||
|
\ // if b is negative then return a
|
||||||
|
\ // else return b
|
||||||
|
CMOVQCC b0, a0 \
|
||||||
|
CMOVQCC b1, a1 \
|
||||||
|
CMOVQCC b2, a2 \
|
||||||
|
CMOVQCC b3, a3
|
160
crypto/bn256/cloudflare/gfp12.go
Normal file
160
crypto/bn256/cloudflare/gfp12.go
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
package bn256
|
||||||
|
|
||||||
|
// For details of the algorithms used, see "Multiplication and Squaring on
|
||||||
|
// Pairing-Friendly Fields, Devegili et al.
|
||||||
|
// http://eprint.iacr.org/2006/471.pdf.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// gfP12 implements the field of size p¹² as a quadratic extension of gfP6
|
||||||
|
// where ω²=τ.
|
||||||
|
type gfP12 struct {
|
||||||
|
x, y gfP6 // value is xω + y
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) String() string {
|
||||||
|
return "(" + e.x.String() + "," + e.y.String() + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) Set(a *gfP12) *gfP12 {
|
||||||
|
e.x.Set(&a.x)
|
||||||
|
e.y.Set(&a.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) SetZero() *gfP12 {
|
||||||
|
e.x.SetZero()
|
||||||
|
e.y.SetZero()
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) SetOne() *gfP12 {
|
||||||
|
e.x.SetZero()
|
||||||
|
e.y.SetOne()
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) IsZero() bool {
|
||||||
|
return e.x.IsZero() && e.y.IsZero()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) IsOne() bool {
|
||||||
|
return e.x.IsZero() && e.y.IsOne()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) Conjugate(a *gfP12) *gfP12 {
|
||||||
|
e.x.Neg(&a.x)
|
||||||
|
e.y.Set(&a.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) Neg(a *gfP12) *gfP12 {
|
||||||
|
e.x.Neg(&a.x)
|
||||||
|
e.y.Neg(&a.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Frobenius computes (xω+y)^p = x^p ω·ξ^((p-1)/6) + y^p
|
||||||
|
func (e *gfP12) Frobenius(a *gfP12) *gfP12 {
|
||||||
|
e.x.Frobenius(&a.x)
|
||||||
|
e.y.Frobenius(&a.y)
|
||||||
|
e.x.MulScalar(&e.x, xiToPMinus1Over6)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// FrobeniusP2 computes (xω+y)^p² = x^p² ω·ξ^((p²-1)/6) + y^p²
|
||||||
|
func (e *gfP12) FrobeniusP2(a *gfP12) *gfP12 {
|
||||||
|
e.x.FrobeniusP2(&a.x)
|
||||||
|
e.x.MulGFP(&e.x, xiToPSquaredMinus1Over6)
|
||||||
|
e.y.FrobeniusP2(&a.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) FrobeniusP4(a *gfP12) *gfP12 {
|
||||||
|
e.x.FrobeniusP4(&a.x)
|
||||||
|
e.x.MulGFP(&e.x, xiToPSquaredMinus1Over3)
|
||||||
|
e.y.FrobeniusP4(&a.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) Add(a, b *gfP12) *gfP12 {
|
||||||
|
e.x.Add(&a.x, &b.x)
|
||||||
|
e.y.Add(&a.y, &b.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) Sub(a, b *gfP12) *gfP12 {
|
||||||
|
e.x.Sub(&a.x, &b.x)
|
||||||
|
e.y.Sub(&a.y, &b.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) Mul(a, b *gfP12) *gfP12 {
|
||||||
|
tx := (&gfP6{}).Mul(&a.x, &b.y)
|
||||||
|
t := (&gfP6{}).Mul(&b.x, &a.y)
|
||||||
|
tx.Add(tx, t)
|
||||||
|
|
||||||
|
ty := (&gfP6{}).Mul(&a.y, &b.y)
|
||||||
|
t.Mul(&a.x, &b.x).MulTau(t)
|
||||||
|
|
||||||
|
e.x.Set(tx)
|
||||||
|
e.y.Add(ty, t)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) MulScalar(a *gfP12, b *gfP6) *gfP12 {
|
||||||
|
e.x.Mul(&e.x, b)
|
||||||
|
e.y.Mul(&e.y, b)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gfP12) Exp(a *gfP12, power *big.Int) *gfP12 {
|
||||||
|
sum := (&gfP12{}).SetOne()
|
||||||
|
t := &gfP12{}
|
||||||
|
|
||||||
|
for i := power.BitLen() - 1; i >= 0; i-- {
|
||||||
|
t.Square(sum)
|
||||||
|
if power.Bit(i) != 0 {
|
||||||
|
sum.Mul(t, a)
|
||||||
|
} else {
|
||||||
|
sum.Set(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Set(sum)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) Square(a *gfP12) *gfP12 {
|
||||||
|
// Complex squaring algorithm
|
||||||
|
v0 := (&gfP6{}).Mul(&a.x, &a.y)
|
||||||
|
|
||||||
|
t := (&gfP6{}).MulTau(&a.x)
|
||||||
|
t.Add(&a.y, t)
|
||||||
|
ty := (&gfP6{}).Add(&a.x, &a.y)
|
||||||
|
ty.Mul(ty, t).Sub(ty, v0)
|
||||||
|
t.MulTau(v0)
|
||||||
|
ty.Sub(ty, t)
|
||||||
|
|
||||||
|
e.x.Add(v0, v0)
|
||||||
|
e.y.Set(ty)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP12) Invert(a *gfP12) *gfP12 {
|
||||||
|
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
|
||||||
|
// ftp://136.206.11.249/pub/crypto/pairings.pdf
|
||||||
|
t1, t2 := &gfP6{}, &gfP6{}
|
||||||
|
|
||||||
|
t1.Square(&a.x)
|
||||||
|
t2.Square(&a.y)
|
||||||
|
t1.MulTau(t1).Sub(t2, t1)
|
||||||
|
t2.Invert(t1)
|
||||||
|
|
||||||
|
e.x.Neg(&a.x)
|
||||||
|
e.y.Set(&a.y)
|
||||||
|
e.MulScalar(e, t2)
|
||||||
|
return e
|
||||||
|
}
|
156
crypto/bn256/cloudflare/gfp2.go
Normal file
156
crypto/bn256/cloudflare/gfp2.go
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
package bn256
|
||||||
|
|
||||||
|
// For details of the algorithms used, see "Multiplication and Squaring on
|
||||||
|
// Pairing-Friendly Fields, Devegili et al.
|
||||||
|
// http://eprint.iacr.org/2006/471.pdf.
|
||||||
|
|
||||||
|
// gfP2 implements a field of size p² as a quadratic extension of the base field
|
||||||
|
// where i²=-1.
|
||||||
|
type gfP2 struct {
|
||||||
|
x, y gfP // value is xi+y.
|
||||||
|
}
|
||||||
|
|
||||||
|
func gfP2Decode(in *gfP2) *gfP2 {
|
||||||
|
out := &gfP2{}
|
||||||
|
montDecode(&out.x, &in.x)
|
||||||
|
montDecode(&out.y, &in.y)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) String() string {
|
||||||
|
return "(" + e.x.String() + ", " + e.y.String() + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) Set(a *gfP2) *gfP2 {
|
||||||
|
e.x.Set(&a.x)
|
||||||
|
e.y.Set(&a.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) SetZero() *gfP2 {
|
||||||
|
e.x = gfP{0}
|
||||||
|
e.y = gfP{0}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) SetOne() *gfP2 {
|
||||||
|
e.x = gfP{0}
|
||||||
|
e.y = *newGFp(1)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) IsZero() bool {
|
||||||
|
zero := gfP{0}
|
||||||
|
return e.x == zero && e.y == zero
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) IsOne() bool {
|
||||||
|
zero, one := gfP{0}, *newGFp(1)
|
||||||
|
return e.x == zero && e.y == one
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) Conjugate(a *gfP2) *gfP2 {
|
||||||
|
e.y.Set(&a.y)
|
||||||
|
gfpNeg(&e.x, &a.x)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) Neg(a *gfP2) *gfP2 {
|
||||||
|
gfpNeg(&e.x, &a.x)
|
||||||
|
gfpNeg(&e.y, &a.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) Add(a, b *gfP2) *gfP2 {
|
||||||
|
gfpAdd(&e.x, &a.x, &b.x)
|
||||||
|
gfpAdd(&e.y, &a.y, &b.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) Sub(a, b *gfP2) *gfP2 {
|
||||||
|
gfpSub(&e.x, &a.x, &b.x)
|
||||||
|
gfpSub(&e.y, &a.y, &b.y)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// See "Multiplication and Squaring in Pairing-Friendly Fields",
|
||||||
|
// http://eprint.iacr.org/2006/471.pdf
|
||||||
|
func (e *gfP2) Mul(a, b *gfP2) *gfP2 {
|
||||||
|
tx, t := &gfP{}, &gfP{}
|
||||||
|
gfpMul(tx, &a.x, &b.y)
|
||||||
|
gfpMul(t, &b.x, &a.y)
|
||||||
|
gfpAdd(tx, tx, t)
|
||||||
|
|
||||||
|
ty := &gfP{}
|
||||||
|
gfpMul(ty, &a.y, &b.y)
|
||||||
|
gfpMul(t, &a.x, &b.x)
|
||||||
|
gfpSub(ty, ty, t)
|
||||||
|
|
||||||
|
e.x.Set(tx)
|
||||||
|
e.y.Set(ty)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) MulScalar(a *gfP2, b *gfP) *gfP2 {
|
||||||
|
gfpMul(&e.x, &a.x, b)
|
||||||
|
gfpMul(&e.y, &a.y, b)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// MulXi sets e=ξa where ξ=i+9 and then returns e.
|
||||||
|
func (e *gfP2) MulXi(a *gfP2) *gfP2 {
|
||||||
|
// (xi+y)(i+9) = (9x+y)i+(9y-x)
|
||||||
|
tx := &gfP{}
|
||||||
|
gfpAdd(tx, &a.x, &a.x)
|
||||||
|
gfpAdd(tx, tx, tx)
|
||||||
|
gfpAdd(tx, tx, tx)
|
||||||
|
gfpAdd(tx, tx, &a.x)
|
||||||
|
|
||||||
|
gfpAdd(tx, tx, &a.y)
|
||||||
|
|
||||||
|
ty := &gfP{}
|
||||||
|
gfpAdd(ty, &a.y, &a.y)
|
||||||
|
gfpAdd(ty, ty, ty)
|
||||||
|
gfpAdd(ty, ty, ty)
|
||||||
|
gfpAdd(ty, ty, &a.y)
|
||||||
|
|
||||||
|
gfpSub(ty, ty, &a.x)
|
||||||
|
|
||||||
|
e.x.Set(tx)
|
||||||
|
e.y.Set(ty)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) Square(a *gfP2) *gfP2 {
|
||||||
|
// Complex squaring algorithm:
|
||||||
|
// (xi+y)² = (x+y)(y-x) + 2*i*x*y
|
||||||
|
tx, ty := &gfP{}, &gfP{}
|
||||||
|
gfpSub(tx, &a.y, &a.x)
|
||||||
|
gfpAdd(ty, &a.x, &a.y)
|
||||||
|
gfpMul(ty, tx, ty)
|
||||||
|
|
||||||
|
gfpMul(tx, &a.x, &a.y)
|
||||||
|
gfpAdd(tx, tx, tx)
|
||||||
|
|
||||||
|
e.x.Set(tx)
|
||||||
|
e.y.Set(ty)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP2) Invert(a *gfP2) *gfP2 {
|
||||||
|
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
|
||||||
|
// ftp://136.206.11.249/pub/crypto/pairings.pdf
|
||||||
|
t1, t2 := &gfP{}, &gfP{}
|
||||||
|
gfpMul(t1, &a.x, &a.x)
|
||||||
|
gfpMul(t2, &a.y, &a.y)
|
||||||
|
gfpAdd(t1, t1, t2)
|
||||||
|
|
||||||
|
inv := &gfP{}
|
||||||
|
inv.Invert(t1)
|
||||||
|
|
||||||
|
gfpNeg(t1, &a.x)
|
||||||
|
|
||||||
|
gfpMul(&e.x, t1, inv)
|
||||||
|
gfpMul(&e.y, &a.y, inv)
|
||||||
|
return e
|
||||||
|
}
|
213
crypto/bn256/cloudflare/gfp6.go
Normal file
213
crypto/bn256/cloudflare/gfp6.go
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
package bn256
|
||||||
|
|
||||||
|
// For details of the algorithms used, see "Multiplication and Squaring on
|
||||||
|
// Pairing-Friendly Fields, Devegili et al.
|
||||||
|
// http://eprint.iacr.org/2006/471.pdf.
|
||||||
|
|
||||||
|
// gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ
|
||||||
|
// and ξ=i+3.
|
||||||
|
type gfP6 struct {
|
||||||
|
x, y, z gfP2 // value is xτ² + yτ + z
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) String() string {
|
||||||
|
return "(" + e.x.String() + ", " + e.y.String() + ", " + e.z.String() + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) Set(a *gfP6) *gfP6 {
|
||||||
|
e.x.Set(&a.x)
|
||||||
|
e.y.Set(&a.y)
|
||||||
|
e.z.Set(&a.z)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) SetZero() *gfP6 {
|
||||||
|
e.x.SetZero()
|
||||||
|
e.y.SetZero()
|
||||||
|
e.z.SetZero()
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) SetOne() *gfP6 {
|
||||||
|
e.x.SetZero()
|
||||||
|
e.y.SetZero()
|
||||||
|
e.z.SetOne()
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) IsZero() bool {
|
||||||
|
return e.x.IsZero() && e.y.IsZero() && e.z.IsZero()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) IsOne() bool {
|
||||||
|
return e.x.IsZero() && e.y.IsZero() && e.z.IsOne()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) Neg(a *gfP6) *gfP6 {
|
||||||
|
e.x.Neg(&a.x)
|
||||||
|
e.y.Neg(&a.y)
|
||||||
|
e.z.Neg(&a.z)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) Frobenius(a *gfP6) *gfP6 {
|
||||||
|
e.x.Conjugate(&a.x)
|
||||||
|
e.y.Conjugate(&a.y)
|
||||||
|
e.z.Conjugate(&a.z)
|
||||||
|
|
||||||
|
e.x.Mul(&e.x, xiTo2PMinus2Over3)
|
||||||
|
e.y.Mul(&e.y, xiToPMinus1Over3)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// FrobeniusP2 computes (xτ²+yτ+z)^(p²) = xτ^(2p²) + yτ^(p²) + z
|
||||||
|
func (e *gfP6) FrobeniusP2(a *gfP6) *gfP6 {
|
||||||
|
// τ^(2p²) = τ²τ^(2p²-2) = τ²ξ^((2p²-2)/3)
|
||||||
|
e.x.MulScalar(&a.x, xiTo2PSquaredMinus2Over3)
|
||||||
|
// τ^(p²) = ττ^(p²-1) = τξ^((p²-1)/3)
|
||||||
|
e.y.MulScalar(&a.y, xiToPSquaredMinus1Over3)
|
||||||
|
e.z.Set(&a.z)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) FrobeniusP4(a *gfP6) *gfP6 {
|
||||||
|
e.x.MulScalar(&a.x, xiToPSquaredMinus1Over3)
|
||||||
|
e.y.MulScalar(&a.y, xiTo2PSquaredMinus2Over3)
|
||||||
|
e.z.Set(&a.z)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) Add(a, b *gfP6) *gfP6 {
|
||||||
|
e.x.Add(&a.x, &b.x)
|
||||||
|
e.y.Add(&a.y, &b.y)
|
||||||
|
e.z.Add(&a.z, &b.z)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) Sub(a, b *gfP6) *gfP6 {
|
||||||
|
e.x.Sub(&a.x, &b.x)
|
||||||
|
e.y.Sub(&a.y, &b.y)
|
||||||
|
e.z.Sub(&a.z, &b.z)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) Mul(a, b *gfP6) *gfP6 {
|
||||||
|
// "Multiplication and Squaring on Pairing-Friendly Fields"
|
||||||
|
// Section 4, Karatsuba method.
|
||||||
|
// http://eprint.iacr.org/2006/471.pdf
|
||||||
|
v0 := (&gfP2{}).Mul(&a.z, &b.z)
|
||||||
|
v1 := (&gfP2{}).Mul(&a.y, &b.y)
|
||||||
|
v2 := (&gfP2{}).Mul(&a.x, &b.x)
|
||||||
|
|
||||||
|
t0 := (&gfP2{}).Add(&a.x, &a.y)
|
||||||
|
t1 := (&gfP2{}).Add(&b.x, &b.y)
|
||||||
|
tz := (&gfP2{}).Mul(t0, t1)
|
||||||
|
tz.Sub(tz, v1).Sub(tz, v2).MulXi(tz).Add(tz, v0)
|
||||||
|
|
||||||
|
t0.Add(&a.y, &a.z)
|
||||||
|
t1.Add(&b.y, &b.z)
|
||||||
|
ty := (&gfP2{}).Mul(t0, t1)
|
||||||
|
t0.MulXi(v2)
|
||||||
|
ty.Sub(ty, v0).Sub(ty, v1).Add(ty, t0)
|
||||||
|
|
||||||
|
t0.Add(&a.x, &a.z)
|
||||||
|
t1.Add(&b.x, &b.z)
|
||||||
|
tx := (&gfP2{}).Mul(t0, t1)
|
||||||
|
tx.Sub(tx, v0).Add(tx, v1).Sub(tx, v2)
|
||||||
|
|
||||||
|
e.x.Set(tx)
|
||||||
|
e.y.Set(ty)
|
||||||
|
e.z.Set(tz)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) MulScalar(a *gfP6, b *gfP2) *gfP6 {
|
||||||
|
e.x.Mul(&a.x, b)
|
||||||
|
e.y.Mul(&a.y, b)
|
||||||
|
e.z.Mul(&a.z, b)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) MulGFP(a *gfP6, b *gfP) *gfP6 {
|
||||||
|
e.x.MulScalar(&a.x, b)
|
||||||
|
e.y.MulScalar(&a.y, b)
|
||||||
|
e.z.MulScalar(&a.z, b)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// MulTau computes τ·(aτ²+bτ+c) = bτ²+cτ+aξ
|
||||||
|
func (e *gfP6) MulTau(a *gfP6) *gfP6 {
|
||||||
|
tz := (&gfP2{}).MulXi(&a.x)
|
||||||
|
ty := (&gfP2{}).Set(&a.y)
|
||||||
|
|
||||||
|
e.y.Set(&a.z)
|
||||||
|
e.x.Set(ty)
|
||||||
|
e.z.Set(tz)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) Square(a *gfP6) *gfP6 {
|
||||||
|
v0 := (&gfP2{}).Square(&a.z)
|
||||||
|
v1 := (&gfP2{}).Square(&a.y)
|
||||||
|
v2 := (&gfP2{}).Square(&a.x)
|
||||||
|
|
||||||
|
c0 := (&gfP2{}).Add(&a.x, &a.y)
|
||||||
|
c0.Square(c0).Sub(c0, v1).Sub(c0, v2).MulXi(c0).Add(c0, v0)
|
||||||
|
|
||||||
|
c1 := (&gfP2{}).Add(&a.y, &a.z)
|
||||||
|
c1.Square(c1).Sub(c1, v0).Sub(c1, v1)
|
||||||
|
xiV2 := (&gfP2{}).MulXi(v2)
|
||||||
|
c1.Add(c1, xiV2)
|
||||||
|
|
||||||
|
c2 := (&gfP2{}).Add(&a.x, &a.z)
|
||||||
|
c2.Square(c2).Sub(c2, v0).Add(c2, v1).Sub(c2, v2)
|
||||||
|
|
||||||
|
e.x.Set(c2)
|
||||||
|
e.y.Set(c1)
|
||||||
|
e.z.Set(c0)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *gfP6) Invert(a *gfP6) *gfP6 {
|
||||||
|
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
|
||||||
|
// ftp://136.206.11.249/pub/crypto/pairings.pdf
|
||||||
|
|
||||||
|
// Here we can give a short explanation of how it works: let j be a cubic root of
|
||||||
|
// unity in GF(p²) so that 1+j+j²=0.
|
||||||
|
// Then (xτ² + yτ + z)(xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
|
||||||
|
// = (xτ² + yτ + z)(Cτ²+Bτ+A)
|
||||||
|
// = (x³ξ²+y³ξ+z³-3ξxyz) = F is an element of the base field (the norm).
|
||||||
|
//
|
||||||
|
// On the other hand (xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
|
||||||
|
// = τ²(y²-ξxz) + τ(ξx²-yz) + (z²-ξxy)
|
||||||
|
//
|
||||||
|
// So that's why A = (z²-ξxy), B = (ξx²-yz), C = (y²-ξxz)
|
||||||
|
t1 := (&gfP2{}).Mul(&a.x, &a.y)
|
||||||
|
t1.MulXi(t1)
|
||||||
|
|
||||||
|
A := (&gfP2{}).Square(&a.z)
|
||||||
|
A.Sub(A, t1)
|
||||||
|
|
||||||
|
B := (&gfP2{}).Square(&a.x)
|
||||||
|
B.MulXi(B)
|
||||||
|
t1.Mul(&a.y, &a.z)
|
||||||
|
B.Sub(B, t1)
|
||||||
|
|
||||||
|
C := (&gfP2{}).Square(&a.y)
|
||||||
|
t1.Mul(&a.x, &a.z)
|
||||||
|
C.Sub(C, t1)
|
||||||
|
|
||||||
|
F := (&gfP2{}).Mul(C, &a.y)
|
||||||
|
F.MulXi(F)
|
||||||
|
t1.Mul(A, &a.z)
|
||||||
|
F.Add(F, t1)
|
||||||
|
t1.Mul(B, &a.x).MulXi(t1)
|
||||||
|
F.Add(F, t1)
|
||||||
|
|
||||||
|
F.Invert(F)
|
||||||
|
|
||||||
|
e.x.Mul(C, F)
|
||||||
|
e.y.Mul(B, F)
|
||||||
|
e.z.Mul(A, F)
|
||||||
|
return e
|
||||||
|
}
|
15
crypto/bn256/cloudflare/gfp_amd64.go
Normal file
15
crypto/bn256/cloudflare/gfp_amd64.go
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// +build amd64,!appengine,!gccgo
|
||||||
|
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
// go:noescape
|
||||||
|
func gfpNeg(c, a *gfP)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func gfpAdd(c, a, b *gfP)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func gfpSub(c, a, b *gfP)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func gfpMul(c, a, b *gfP)
|
97
crypto/bn256/cloudflare/gfp_amd64.s
Normal file
97
crypto/bn256/cloudflare/gfp_amd64.s
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
// +build amd64,!appengine,!gccgo
|
||||||
|
|
||||||
|
#include "gfp.h"
|
||||||
|
#include "mul.h"
|
||||||
|
#include "mul_bmi2.h"
|
||||||
|
|
||||||
|
TEXT ·gfpNeg(SB),0,$0-16
|
||||||
|
MOVQ ·p2+0(SB), R8
|
||||||
|
MOVQ ·p2+8(SB), R9
|
||||||
|
MOVQ ·p2+16(SB), R10
|
||||||
|
MOVQ ·p2+24(SB), R11
|
||||||
|
|
||||||
|
MOVQ a+8(FP), DI
|
||||||
|
SUBQ 0(DI), R8
|
||||||
|
SBBQ 8(DI), R9
|
||||||
|
SBBQ 16(DI), R10
|
||||||
|
SBBQ 24(DI), R11
|
||||||
|
|
||||||
|
MOVQ $0, AX
|
||||||
|
gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,R15,BX)
|
||||||
|
|
||||||
|
MOVQ c+0(FP), DI
|
||||||
|
storeBlock(R8,R9,R10,R11, 0(DI))
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT ·gfpAdd(SB),0,$0-24
|
||||||
|
MOVQ a+8(FP), DI
|
||||||
|
MOVQ b+16(FP), SI
|
||||||
|
|
||||||
|
loadBlock(0(DI), R8,R9,R10,R11)
|
||||||
|
MOVQ $0, R12
|
||||||
|
|
||||||
|
ADDQ 0(SI), R8
|
||||||
|
ADCQ 8(SI), R9
|
||||||
|
ADCQ 16(SI), R10
|
||||||
|
ADCQ 24(SI), R11
|
||||||
|
ADCQ $0, R12
|
||||||
|
|
||||||
|
gfpCarry(R8,R9,R10,R11,R12, R13,R14,R15,AX,BX)
|
||||||
|
|
||||||
|
MOVQ c+0(FP), DI
|
||||||
|
storeBlock(R8,R9,R10,R11, 0(DI))
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT ·gfpSub(SB),0,$0-24
|
||||||
|
MOVQ a+8(FP), DI
|
||||||
|
MOVQ b+16(FP), SI
|
||||||
|
|
||||||
|
loadBlock(0(DI), R8,R9,R10,R11)
|
||||||
|
|
||||||
|
MOVQ ·p2+0(SB), R12
|
||||||
|
MOVQ ·p2+8(SB), R13
|
||||||
|
MOVQ ·p2+16(SB), R14
|
||||||
|
MOVQ ·p2+24(SB), R15
|
||||||
|
MOVQ $0, AX
|
||||||
|
|
||||||
|
SUBQ 0(SI), R8
|
||||||
|
SBBQ 8(SI), R9
|
||||||
|
SBBQ 16(SI), R10
|
||||||
|
SBBQ 24(SI), R11
|
||||||
|
|
||||||
|
CMOVQCC AX, R12
|
||||||
|
CMOVQCC AX, R13
|
||||||
|
CMOVQCC AX, R14
|
||||||
|
CMOVQCC AX, R15
|
||||||
|
|
||||||
|
ADDQ R12, R8
|
||||||
|
ADCQ R13, R9
|
||||||
|
ADCQ R14, R10
|
||||||
|
ADCQ R15, R11
|
||||||
|
|
||||||
|
MOVQ c+0(FP), DI
|
||||||
|
storeBlock(R8,R9,R10,R11, 0(DI))
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT ·gfpMul(SB),0,$160-24
|
||||||
|
MOVQ a+8(FP), DI
|
||||||
|
MOVQ b+16(FP), SI
|
||||||
|
|
||||||
|
// Jump to a slightly different implementation if MULX isn't supported.
|
||||||
|
CMPB runtime·support_bmi2(SB), $0
|
||||||
|
JE nobmi2Mul
|
||||||
|
|
||||||
|
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))
|
||||||
|
storeBlock( R8, R9,R10,R11, 0(SP))
|
||||||
|
storeBlock(R12,R13,R14,R15, 32(SP))
|
||||||
|
gfpReduceBMI2()
|
||||||
|
JMP end
|
||||||
|
|
||||||
|
nobmi2Mul:
|
||||||
|
mul(0(DI),8(DI),16(DI),24(DI), 0(SI), 0(SP))
|
||||||
|
gfpReduce(0(SP))
|
||||||
|
|
||||||
|
end:
|
||||||
|
MOVQ c+0(FP), DI
|
||||||
|
storeBlock(R12,R13,R14,R15, 0(DI))
|
||||||
|
RET
|
19
crypto/bn256/cloudflare/gfp_pure.go
Normal file
19
crypto/bn256/cloudflare/gfp_pure.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
// +build !amd64 appengine gccgo
|
||||||
|
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
func gfpNeg(c, a *gfP) {
|
||||||
|
panic("unsupported architecture")
|
||||||
|
}
|
||||||
|
|
||||||
|
func gfpAdd(c, a, b *gfP) {
|
||||||
|
panic("unsupported architecture")
|
||||||
|
}
|
||||||
|
|
||||||
|
func gfpSub(c, a, b *gfP) {
|
||||||
|
panic("unsupported architecture")
|
||||||
|
}
|
||||||
|
|
||||||
|
func gfpMul(c, a, b *gfP) {
|
||||||
|
panic("unsupported architecture")
|
||||||
|
}
|
62
crypto/bn256/cloudflare/gfp_test.go
Normal file
62
crypto/bn256/cloudflare/gfp_test.go
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
// +build amd64,!appengine,!gccgo
|
||||||
|
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests that negation works the same way on both assembly-optimized and pure Go
|
||||||
|
// implementation.
|
||||||
|
func TestGFpNeg(t *testing.T) {
|
||||||
|
n := &gfP{0x0123456789abcdef, 0xfedcba9876543210, 0xdeadbeefdeadbeef, 0xfeebdaedfeebdaed}
|
||||||
|
w := &gfP{0xfedcba9876543211, 0x0123456789abcdef, 0x2152411021524110, 0x0114251201142512}
|
||||||
|
h := &gfP{}
|
||||||
|
|
||||||
|
gfpNeg(h, n)
|
||||||
|
if *h != *w {
|
||||||
|
t.Errorf("negation mismatch: have %#x, want %#x", *h, *w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that addition works the same way on both assembly-optimized and pure Go
|
||||||
|
// implementation.
|
||||||
|
func TestGFpAdd(t *testing.T) {
|
||||||
|
a := &gfP{0x0123456789abcdef, 0xfedcba9876543210, 0xdeadbeefdeadbeef, 0xfeebdaedfeebdaed}
|
||||||
|
b := &gfP{0xfedcba9876543210, 0x0123456789abcdef, 0xfeebdaedfeebdaed, 0xdeadbeefdeadbeef}
|
||||||
|
w := &gfP{0xc3df73e9278302b8, 0x687e956e978e3572, 0x254954275c18417f, 0xad354b6afc67f9b4}
|
||||||
|
h := &gfP{}
|
||||||
|
|
||||||
|
gfpAdd(h, a, b)
|
||||||
|
if *h != *w {
|
||||||
|
t.Errorf("addition mismatch: have %#x, want %#x", *h, *w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that subtraction works the same way on both assembly-optimized and pure Go
|
||||||
|
// implementation.
|
||||||
|
func TestGFpSub(t *testing.T) {
|
||||||
|
a := &gfP{0x0123456789abcdef, 0xfedcba9876543210, 0xdeadbeefdeadbeef, 0xfeebdaedfeebdaed}
|
||||||
|
b := &gfP{0xfedcba9876543210, 0x0123456789abcdef, 0xfeebdaedfeebdaed, 0xdeadbeefdeadbeef}
|
||||||
|
w := &gfP{0x02468acf13579bdf, 0xfdb97530eca86420, 0xdfc1e401dfc1e402, 0x203e1bfe203e1bfd}
|
||||||
|
h := &gfP{}
|
||||||
|
|
||||||
|
gfpSub(h, a, b)
|
||||||
|
if *h != *w {
|
||||||
|
t.Errorf("subtraction mismatch: have %#x, want %#x", *h, *w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that multiplication works the same way on both assembly-optimized and pure Go
|
||||||
|
// implementation.
|
||||||
|
func TestGFpMul(t *testing.T) {
|
||||||
|
a := &gfP{0x0123456789abcdef, 0xfedcba9876543210, 0xdeadbeefdeadbeef, 0xfeebdaedfeebdaed}
|
||||||
|
b := &gfP{0xfedcba9876543210, 0x0123456789abcdef, 0xfeebdaedfeebdaed, 0xdeadbeefdeadbeef}
|
||||||
|
w := &gfP{0xcbcbd377f7ad22d3, 0x3b89ba5d849379bf, 0x87b61627bd38b6d2, 0xc44052a2a0e654b2}
|
||||||
|
h := &gfP{}
|
||||||
|
|
||||||
|
gfpMul(h, a, b)
|
||||||
|
if *h != *w {
|
||||||
|
t.Errorf("multiplication mismatch: have %#x, want %#x", *h, *w)
|
||||||
|
}
|
||||||
|
}
|
73
crypto/bn256/cloudflare/main_test.go
Normal file
73
crypto/bn256/cloudflare/main_test.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
// +build amd64,!appengine,!gccgo
|
||||||
|
|
||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"crypto/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRandomG2Marshal(t *testing.T) {
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
n, g2, err := RandomG2(rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Logf("%d: %x\n", n, g2.Marshal())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPairings(t *testing.T) {
|
||||||
|
a1 := new(G1).ScalarBaseMult(bigFromBase10("1"))
|
||||||
|
a2 := new(G1).ScalarBaseMult(bigFromBase10("2"))
|
||||||
|
a37 := new(G1).ScalarBaseMult(bigFromBase10("37"))
|
||||||
|
an1 := new(G1).ScalarBaseMult(bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495616"))
|
||||||
|
|
||||||
|
b0 := new(G2).ScalarBaseMult(bigFromBase10("0"))
|
||||||
|
b1 := new(G2).ScalarBaseMult(bigFromBase10("1"))
|
||||||
|
b2 := new(G2).ScalarBaseMult(bigFromBase10("2"))
|
||||||
|
b27 := new(G2).ScalarBaseMult(bigFromBase10("27"))
|
||||||
|
b999 := new(G2).ScalarBaseMult(bigFromBase10("999"))
|
||||||
|
bn1 := new(G2).ScalarBaseMult(bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495616"))
|
||||||
|
|
||||||
|
p1 := Pair(a1, b1)
|
||||||
|
pn1 := Pair(a1, bn1)
|
||||||
|
np1 := Pair(an1, b1)
|
||||||
|
if pn1.String() != np1.String() {
|
||||||
|
t.Error("Pairing mismatch: e(a, -b) != e(-a, b)")
|
||||||
|
}
|
||||||
|
if !PairingCheck([]*G1{a1, an1}, []*G2{b1, b1}) {
|
||||||
|
t.Error("MultiAte check gave false negative!")
|
||||||
|
}
|
||||||
|
p0 := new(GT).Add(p1, pn1)
|
||||||
|
p0_2 := Pair(a1, b0)
|
||||||
|
if p0.String() != p0_2.String() {
|
||||||
|
t.Error("Pairing mismatch: e(a, b) * e(a, -b) != 1")
|
||||||
|
}
|
||||||
|
p0_3 := new(GT).ScalarMult(p1, bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617"))
|
||||||
|
if p0.String() != p0_3.String() {
|
||||||
|
t.Error("Pairing mismatch: e(a, b) has wrong order")
|
||||||
|
}
|
||||||
|
p2 := Pair(a2, b1)
|
||||||
|
p2_2 := Pair(a1, b2)
|
||||||
|
p2_3 := new(GT).ScalarMult(p1, bigFromBase10("2"))
|
||||||
|
if p2.String() != p2_2.String() {
|
||||||
|
t.Error("Pairing mismatch: e(a, b * 2) != e(a * 2, b)")
|
||||||
|
}
|
||||||
|
if p2.String() != p2_3.String() {
|
||||||
|
t.Error("Pairing mismatch: e(a, b * 2) != e(a, b) ** 2")
|
||||||
|
}
|
||||||
|
if p2.String() == p1.String() {
|
||||||
|
t.Error("Pairing is degenerate!")
|
||||||
|
}
|
||||||
|
if PairingCheck([]*G1{a1, a1}, []*G2{b1, b1}) {
|
||||||
|
t.Error("MultiAte check gave false positive!")
|
||||||
|
}
|
||||||
|
p999 := Pair(a37, b27)
|
||||||
|
p999_2 := Pair(a1, b999)
|
||||||
|
if p999.String() != p999_2.String() {
|
||||||
|
t.Error("Pairing mismatch: e(a * 37, b * 27) != e(a, b * 999)")
|
||||||
|
}
|
||||||
|
}
|
181
crypto/bn256/cloudflare/mul.h
Normal file
181
crypto/bn256/cloudflare/mul.h
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
#define mul(a0,a1,a2,a3, rb, stack) \
|
||||||
|
MOVQ a0, AX \
|
||||||
|
MULQ 0+rb \
|
||||||
|
MOVQ AX, R8 \
|
||||||
|
MOVQ DX, R9 \
|
||||||
|
MOVQ a0, AX \
|
||||||
|
MULQ 8+rb \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R10 \
|
||||||
|
MOVQ a0, AX \
|
||||||
|
MULQ 16+rb \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R11 \
|
||||||
|
MOVQ a0, AX \
|
||||||
|
MULQ 24+rb \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R12 \
|
||||||
|
\
|
||||||
|
storeBlock(R8,R9,R10,R11, 0+stack) \
|
||||||
|
MOVQ R12, 32+stack \
|
||||||
|
\
|
||||||
|
MOVQ a1, AX \
|
||||||
|
MULQ 0+rb \
|
||||||
|
MOVQ AX, R8 \
|
||||||
|
MOVQ DX, R9 \
|
||||||
|
MOVQ a1, AX \
|
||||||
|
MULQ 8+rb \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R10 \
|
||||||
|
MOVQ a1, AX \
|
||||||
|
MULQ 16+rb \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R11 \
|
||||||
|
MOVQ a1, AX \
|
||||||
|
MULQ 24+rb \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R12 \
|
||||||
|
\
|
||||||
|
ADDQ 8+stack, R8 \
|
||||||
|
ADCQ 16+stack, R9 \
|
||||||
|
ADCQ 24+stack, R10 \
|
||||||
|
ADCQ 32+stack, R11 \
|
||||||
|
ADCQ $0, R12 \
|
||||||
|
storeBlock(R8,R9,R10,R11, 8+stack) \
|
||||||
|
MOVQ R12, 40+stack \
|
||||||
|
\
|
||||||
|
MOVQ a2, AX \
|
||||||
|
MULQ 0+rb \
|
||||||
|
MOVQ AX, R8 \
|
||||||
|
MOVQ DX, R9 \
|
||||||
|
MOVQ a2, AX \
|
||||||
|
MULQ 8+rb \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R10 \
|
||||||
|
MOVQ a2, AX \
|
||||||
|
MULQ 16+rb \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R11 \
|
||||||
|
MOVQ a2, AX \
|
||||||
|
MULQ 24+rb \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R12 \
|
||||||
|
\
|
||||||
|
ADDQ 16+stack, R8 \
|
||||||
|
ADCQ 24+stack, R9 \
|
||||||
|
ADCQ 32+stack, R10 \
|
||||||
|
ADCQ 40+stack, R11 \
|
||||||
|
ADCQ $0, R12 \
|
||||||
|
storeBlock(R8,R9,R10,R11, 16+stack) \
|
||||||
|
MOVQ R12, 48+stack \
|
||||||
|
\
|
||||||
|
MOVQ a3, AX \
|
||||||
|
MULQ 0+rb \
|
||||||
|
MOVQ AX, R8 \
|
||||||
|
MOVQ DX, R9 \
|
||||||
|
MOVQ a3, AX \
|
||||||
|
MULQ 8+rb \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R10 \
|
||||||
|
MOVQ a3, AX \
|
||||||
|
MULQ 16+rb \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R11 \
|
||||||
|
MOVQ a3, AX \
|
||||||
|
MULQ 24+rb \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R12 \
|
||||||
|
\
|
||||||
|
ADDQ 24+stack, R8 \
|
||||||
|
ADCQ 32+stack, R9 \
|
||||||
|
ADCQ 40+stack, R10 \
|
||||||
|
ADCQ 48+stack, R11 \
|
||||||
|
ADCQ $0, R12 \
|
||||||
|
storeBlock(R8,R9,R10,R11, 24+stack) \
|
||||||
|
MOVQ R12, 56+stack
|
||||||
|
|
||||||
|
#define gfpReduce(stack) \
|
||||||
|
\ // m = (T * N') mod R, store m in R8:R9:R10:R11
|
||||||
|
MOVQ ·np+0(SB), AX \
|
||||||
|
MULQ 0+stack \
|
||||||
|
MOVQ AX, R8 \
|
||||||
|
MOVQ DX, R9 \
|
||||||
|
MOVQ ·np+0(SB), AX \
|
||||||
|
MULQ 8+stack \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R10 \
|
||||||
|
MOVQ ·np+0(SB), AX \
|
||||||
|
MULQ 16+stack \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R11 \
|
||||||
|
MOVQ ·np+0(SB), AX \
|
||||||
|
MULQ 24+stack \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
\
|
||||||
|
MOVQ ·np+8(SB), AX \
|
||||||
|
MULQ 0+stack \
|
||||||
|
MOVQ AX, R12 \
|
||||||
|
MOVQ DX, R13 \
|
||||||
|
MOVQ ·np+8(SB), AX \
|
||||||
|
MULQ 8+stack \
|
||||||
|
ADDQ AX, R13 \
|
||||||
|
ADCQ $0, DX \
|
||||||
|
MOVQ DX, R14 \
|
||||||
|
MOVQ ·np+8(SB), AX \
|
||||||
|
MULQ 16+stack \
|
||||||
|
ADDQ AX, R14 \
|
||||||
|
\
|
||||||
|
ADDQ R12, R9 \
|
||||||
|
ADCQ R13, R10 \
|
||||||
|
ADCQ R14, R11 \
|
||||||
|
\
|
||||||
|
MOVQ ·np+16(SB), AX \
|
||||||
|
MULQ 0+stack \
|
||||||
|
MOVQ AX, R12 \
|
||||||
|
MOVQ DX, R13 \
|
||||||
|
MOVQ ·np+16(SB), AX \
|
||||||
|
MULQ 8+stack \
|
||||||
|
ADDQ AX, R13 \
|
||||||
|
\
|
||||||
|
ADDQ R12, R10 \
|
||||||
|
ADCQ R13, R11 \
|
||||||
|
\
|
||||||
|
MOVQ ·np+24(SB), AX \
|
||||||
|
MULQ 0+stack \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
\
|
||||||
|
storeBlock(R8,R9,R10,R11, 64+stack) \
|
||||||
|
\
|
||||||
|
\ // m * N
|
||||||
|
mul(·p2+0(SB),·p2+8(SB),·p2+16(SB),·p2+24(SB), 64+stack, 96+stack) \
|
||||||
|
\
|
||||||
|
\ // Add the 512-bit intermediate to m*N
|
||||||
|
loadBlock(96+stack, R8,R9,R10,R11) \
|
||||||
|
loadBlock(128+stack, R12,R13,R14,R15) \
|
||||||
|
\
|
||||||
|
MOVQ $0, AX \
|
||||||
|
ADDQ 0+stack, R8 \
|
||||||
|
ADCQ 8+stack, R9 \
|
||||||
|
ADCQ 16+stack, R10 \
|
||||||
|
ADCQ 24+stack, R11 \
|
||||||
|
ADCQ 32+stack, R12 \
|
||||||
|
ADCQ 40+stack, R13 \
|
||||||
|
ADCQ 48+stack, R14 \
|
||||||
|
ADCQ 56+stack, R15 \
|
||||||
|
ADCQ $0, AX \
|
||||||
|
\
|
||||||
|
gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX)
|
112
crypto/bn256/cloudflare/mul_bmi2.h
Normal file
112
crypto/bn256/cloudflare/mul_bmi2.h
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
#define mulBMI2(a0,a1,a2,a3, rb) \
|
||||||
|
MOVQ a0, DX \
|
||||||
|
MOVQ $0, R13 \
|
||||||
|
MULXQ 0+rb, R8, R9 \
|
||||||
|
MULXQ 8+rb, AX, R10 \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
MULXQ 16+rb, AX, R11 \
|
||||||
|
ADCQ AX, R10 \
|
||||||
|
MULXQ 24+rb, AX, R12 \
|
||||||
|
ADCQ AX, R11 \
|
||||||
|
ADCQ $0, R12 \
|
||||||
|
ADCQ $0, R13 \
|
||||||
|
\
|
||||||
|
MOVQ a1, DX \
|
||||||
|
MOVQ $0, R14 \
|
||||||
|
MULXQ 0+rb, AX, BX \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
ADCQ BX, R10 \
|
||||||
|
MULXQ 16+rb, AX, BX \
|
||||||
|
ADCQ AX, R11 \
|
||||||
|
ADCQ BX, R12 \
|
||||||
|
ADCQ $0, R13 \
|
||||||
|
MULXQ 8+rb, AX, BX \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ BX, R11 \
|
||||||
|
MULXQ 24+rb, AX, BX \
|
||||||
|
ADCQ AX, R12 \
|
||||||
|
ADCQ BX, R13 \
|
||||||
|
ADCQ $0, R14 \
|
||||||
|
\
|
||||||
|
MOVQ a2, DX \
|
||||||
|
MOVQ $0, R15 \
|
||||||
|
MULXQ 0+rb, AX, BX \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ BX, R11 \
|
||||||
|
MULXQ 16+rb, AX, BX \
|
||||||
|
ADCQ AX, R12 \
|
||||||
|
ADCQ BX, R13 \
|
||||||
|
ADCQ $0, R14 \
|
||||||
|
MULXQ 8+rb, AX, BX \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
ADCQ BX, R12 \
|
||||||
|
MULXQ 24+rb, AX, BX \
|
||||||
|
ADCQ AX, R13 \
|
||||||
|
ADCQ BX, R14 \
|
||||||
|
ADCQ $0, R15 \
|
||||||
|
\
|
||||||
|
MOVQ a3, DX \
|
||||||
|
MULXQ 0+rb, AX, BX \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
ADCQ BX, R12 \
|
||||||
|
MULXQ 16+rb, AX, BX \
|
||||||
|
ADCQ AX, R13 \
|
||||||
|
ADCQ BX, R14 \
|
||||||
|
ADCQ $0, R15 \
|
||||||
|
MULXQ 8+rb, AX, BX \
|
||||||
|
ADDQ AX, R12 \
|
||||||
|
ADCQ BX, R13 \
|
||||||
|
MULXQ 24+rb, AX, BX \
|
||||||
|
ADCQ AX, R14 \
|
||||||
|
ADCQ BX, R15
|
||||||
|
|
||||||
|
#define gfpReduceBMI2() \
|
||||||
|
\ // m = (T * N') mod R, store m in R8:R9:R10:R11
|
||||||
|
MOVQ ·np+0(SB), DX \
|
||||||
|
MULXQ 0(SP), R8, R9 \
|
||||||
|
MULXQ 8(SP), AX, R10 \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
MULXQ 16(SP), AX, R11 \
|
||||||
|
ADCQ AX, R10 \
|
||||||
|
MULXQ 24(SP), AX, BX \
|
||||||
|
ADCQ AX, R11 \
|
||||||
|
\
|
||||||
|
MOVQ ·np+8(SB), DX \
|
||||||
|
MULXQ 0(SP), AX, BX \
|
||||||
|
ADDQ AX, R9 \
|
||||||
|
ADCQ BX, R10 \
|
||||||
|
MULXQ 16(SP), AX, BX \
|
||||||
|
ADCQ AX, R11 \
|
||||||
|
MULXQ 8(SP), AX, BX \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ BX, R11 \
|
||||||
|
\
|
||||||
|
MOVQ ·np+16(SB), DX \
|
||||||
|
MULXQ 0(SP), AX, BX \
|
||||||
|
ADDQ AX, R10 \
|
||||||
|
ADCQ BX, R11 \
|
||||||
|
MULXQ 8(SP), AX, BX \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
\
|
||||||
|
MOVQ ·np+24(SB), DX \
|
||||||
|
MULXQ 0(SP), AX, BX \
|
||||||
|
ADDQ AX, R11 \
|
||||||
|
\
|
||||||
|
storeBlock(R8,R9,R10,R11, 64(SP)) \
|
||||||
|
\
|
||||||
|
\ // m * N
|
||||||
|
mulBMI2(·p2+0(SB),·p2+8(SB),·p2+16(SB),·p2+24(SB), 64(SP)) \
|
||||||
|
\
|
||||||
|
\ // Add the 512-bit intermediate to m*N
|
||||||
|
MOVQ $0, AX \
|
||||||
|
ADDQ 0(SP), R8 \
|
||||||
|
ADCQ 8(SP), R9 \
|
||||||
|
ADCQ 16(SP), R10 \
|
||||||
|
ADCQ 24(SP), R11 \
|
||||||
|
ADCQ 32(SP), R12 \
|
||||||
|
ADCQ 40(SP), R13 \
|
||||||
|
ADCQ 48(SP), R14 \
|
||||||
|
ADCQ 56(SP), R15 \
|
||||||
|
ADCQ $0, AX \
|
||||||
|
\
|
||||||
|
gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX)
|
271
crypto/bn256/cloudflare/optate.go
Normal file
271
crypto/bn256/cloudflare/optate.go
Normal file
@ -0,0 +1,271 @@
|
|||||||
|
package bn256
|
||||||
|
|
||||||
|
func lineFunctionAdd(r, p *twistPoint, q *curvePoint, r2 *gfP2) (a, b, c *gfP2, rOut *twistPoint) {
|
||||||
|
// See the mixed addition algorithm from "Faster Computation of the
|
||||||
|
// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
|
||||||
|
B := (&gfP2{}).Mul(&p.x, &r.t)
|
||||||
|
|
||||||
|
D := (&gfP2{}).Add(&p.y, &r.z)
|
||||||
|
D.Square(D).Sub(D, r2).Sub(D, &r.t).Mul(D, &r.t)
|
||||||
|
|
||||||
|
H := (&gfP2{}).Sub(B, &r.x)
|
||||||
|
I := (&gfP2{}).Square(H)
|
||||||
|
|
||||||
|
E := (&gfP2{}).Add(I, I)
|
||||||
|
E.Add(E, E)
|
||||||
|
|
||||||
|
J := (&gfP2{}).Mul(H, E)
|
||||||
|
|
||||||
|
L1 := (&gfP2{}).Sub(D, &r.y)
|
||||||
|
L1.Sub(L1, &r.y)
|
||||||
|
|
||||||
|
V := (&gfP2{}).Mul(&r.x, E)
|
||||||
|
|
||||||
|
rOut = &twistPoint{}
|
||||||
|
rOut.x.Square(L1).Sub(&rOut.x, J).Sub(&rOut.x, V).Sub(&rOut.x, V)
|
||||||
|
|
||||||
|
rOut.z.Add(&r.z, H).Square(&rOut.z).Sub(&rOut.z, &r.t).Sub(&rOut.z, I)
|
||||||
|
|
||||||
|
t := (&gfP2{}).Sub(V, &rOut.x)
|
||||||
|
t.Mul(t, L1)
|
||||||
|
t2 := (&gfP2{}).Mul(&r.y, J)
|
||||||
|
t2.Add(t2, t2)
|
||||||
|
rOut.y.Sub(t, t2)
|
||||||
|
|
||||||
|
rOut.t.Square(&rOut.z)
|
||||||
|
|
||||||
|
t.Add(&p.y, &rOut.z).Square(t).Sub(t, r2).Sub(t, &rOut.t)
|
||||||
|
|
||||||
|
t2.Mul(L1, &p.x)
|
||||||
|
t2.Add(t2, t2)
|
||||||
|
a = (&gfP2{}).Sub(t2, t)
|
||||||
|
|
||||||
|
c = (&gfP2{}).MulScalar(&rOut.z, &q.y)
|
||||||
|
c.Add(c, c)
|
||||||
|
|
||||||
|
b = (&gfP2{}).Neg(L1)
|
||||||
|
b.MulScalar(b, &q.x).Add(b, b)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func lineFunctionDouble(r *twistPoint, q *curvePoint) (a, b, c *gfP2, rOut *twistPoint) {
|
||||||
|
// See the doubling algorithm for a=0 from "Faster Computation of the
|
||||||
|
// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
|
||||||
|
A := (&gfP2{}).Square(&r.x)
|
||||||
|
B := (&gfP2{}).Square(&r.y)
|
||||||
|
C := (&gfP2{}).Square(B)
|
||||||
|
|
||||||
|
D := (&gfP2{}).Add(&r.x, B)
|
||||||
|
D.Square(D).Sub(D, A).Sub(D, C).Add(D, D)
|
||||||
|
|
||||||
|
E := (&gfP2{}).Add(A, A)
|
||||||
|
E.Add(E, A)
|
||||||
|
|
||||||
|
G := (&gfP2{}).Square(E)
|
||||||
|
|
||||||
|
rOut = &twistPoint{}
|
||||||
|
rOut.x.Sub(G, D).Sub(&rOut.x, D)
|
||||||
|
|
||||||
|
rOut.z.Add(&r.y, &r.z).Square(&rOut.z).Sub(&rOut.z, B).Sub(&rOut.z, &r.t)
|
||||||
|
|
||||||
|
rOut.y.Sub(D, &rOut.x).Mul(&rOut.y, E)
|
||||||
|
t := (&gfP2{}).Add(C, C)
|
||||||
|
t.Add(t, t).Add(t, t)
|
||||||
|
rOut.y.Sub(&rOut.y, t)
|
||||||
|
|
||||||
|
rOut.t.Square(&rOut.z)
|
||||||
|
|
||||||
|
t.Mul(E, &r.t).Add(t, t)
|
||||||
|
b = (&gfP2{}).Neg(t)
|
||||||
|
b.MulScalar(b, &q.x)
|
||||||
|
|
||||||
|
a = (&gfP2{}).Add(&r.x, E)
|
||||||
|
a.Square(a).Sub(a, A).Sub(a, G)
|
||||||
|
t.Add(B, B).Add(t, t)
|
||||||
|
a.Sub(a, t)
|
||||||
|
|
||||||
|
c = (&gfP2{}).Mul(&rOut.z, &r.t)
|
||||||
|
c.Add(c, c).MulScalar(c, &q.y)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func mulLine(ret *gfP12, a, b, c *gfP2) {
|
||||||
|
a2 := &gfP6{}
|
||||||
|
a2.y.Set(a)
|
||||||
|
a2.z.Set(b)
|
||||||
|
a2.Mul(a2, &ret.x)
|
||||||
|
t3 := (&gfP6{}).MulScalar(&ret.y, c)
|
||||||
|
|
||||||
|
t := (&gfP2{}).Add(b, c)
|
||||||
|
t2 := &gfP6{}
|
||||||
|
t2.y.Set(a)
|
||||||
|
t2.z.Set(t)
|
||||||
|
ret.x.Add(&ret.x, &ret.y)
|
||||||
|
|
||||||
|
ret.y.Set(t3)
|
||||||
|
|
||||||
|
ret.x.Mul(&ret.x, t2).Sub(&ret.x, a2).Sub(&ret.x, &ret.y)
|
||||||
|
a2.MulTau(a2)
|
||||||
|
ret.y.Add(&ret.y, a2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sixuPlus2NAF is 6u+2 in non-adjacent form.
|
||||||
|
var sixuPlus2NAF = []int8{0, 0, 0, 1, 0, 1, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0,
|
||||||
|
0, 1, 1, 0, -1, 0, 0, 1, 0, -1, 0, 0, 0, 0, 1, 1,
|
||||||
|
1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1,
|
||||||
|
1, 0, 0, -1, 0, 0, 0, 1, 1, 0, -1, 0, 0, 1, 0, 1, 1}
|
||||||
|
|
||||||
|
// miller implements the Miller loop for calculating the Optimal Ate pairing.
|
||||||
|
// See algorithm 1 from http://cryptojedi.org/papers/dclxvi-20100714.pdf
|
||||||
|
func miller(q *twistPoint, p *curvePoint) *gfP12 {
|
||||||
|
ret := (&gfP12{}).SetOne()
|
||||||
|
|
||||||
|
aAffine := &twistPoint{}
|
||||||
|
aAffine.Set(q)
|
||||||
|
aAffine.MakeAffine()
|
||||||
|
|
||||||
|
bAffine := &curvePoint{}
|
||||||
|
bAffine.Set(p)
|
||||||
|
bAffine.MakeAffine()
|
||||||
|
|
||||||
|
minusA := &twistPoint{}
|
||||||
|
minusA.Neg(aAffine)
|
||||||
|
|
||||||
|
r := &twistPoint{}
|
||||||
|
r.Set(aAffine)
|
||||||
|
|
||||||
|
r2 := (&gfP2{}).Square(&aAffine.y)
|
||||||
|
|
||||||
|
for i := len(sixuPlus2NAF) - 1; i > 0; i-- {
|
||||||
|
a, b, c, newR := lineFunctionDouble(r, bAffine)
|
||||||
|
if i != len(sixuPlus2NAF)-1 {
|
||||||
|
ret.Square(ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
mulLine(ret, a, b, c)
|
||||||
|
r = newR
|
||||||
|
|
||||||
|
switch sixuPlus2NAF[i-1] {
|
||||||
|
case 1:
|
||||||
|
a, b, c, newR = lineFunctionAdd(r, aAffine, bAffine, r2)
|
||||||
|
case -1:
|
||||||
|
a, b, c, newR = lineFunctionAdd(r, minusA, bAffine, r2)
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mulLine(ret, a, b, c)
|
||||||
|
r = newR
|
||||||
|
}
|
||||||
|
|
||||||
|
// In order to calculate Q1 we have to convert q from the sextic twist
|
||||||
|
// to the full GF(p^12) group, apply the Frobenius there, and convert
|
||||||
|
// back.
|
||||||
|
//
|
||||||
|
// The twist isomorphism is (x', y') -> (xω², yω³). If we consider just
|
||||||
|
// x for a moment, then after applying the Frobenius, we have x̄ω^(2p)
|
||||||
|
// where x̄ is the conjugate of x. If we are going to apply the inverse
|
||||||
|
// isomorphism we need a value with a single coefficient of ω² so we
|
||||||
|
// rewrite this as x̄ω^(2p-2)ω². ξ⁶ = ω and, due to the construction of
|
||||||
|
// p, 2p-2 is a multiple of six. Therefore we can rewrite as
|
||||||
|
// x̄ξ^((p-1)/3)ω² and applying the inverse isomorphism eliminates the
|
||||||
|
// ω².
|
||||||
|
//
|
||||||
|
// A similar argument can be made for the y value.
|
||||||
|
|
||||||
|
q1 := &twistPoint{}
|
||||||
|
q1.x.Conjugate(&aAffine.x).Mul(&q1.x, xiToPMinus1Over3)
|
||||||
|
q1.y.Conjugate(&aAffine.y).Mul(&q1.y, xiToPMinus1Over2)
|
||||||
|
q1.z.SetOne()
|
||||||
|
q1.t.SetOne()
|
||||||
|
|
||||||
|
// For Q2 we are applying the p² Frobenius. The two conjugations cancel
|
||||||
|
// out and we are left only with the factors from the isomorphism. In
|
||||||
|
// the case of x, we end up with a pure number which is why
|
||||||
|
// xiToPSquaredMinus1Over3 is ∈ GF(p). With y we get a factor of -1. We
|
||||||
|
// ignore this to end up with -Q2.
|
||||||
|
|
||||||
|
minusQ2 := &twistPoint{}
|
||||||
|
minusQ2.x.MulScalar(&aAffine.x, xiToPSquaredMinus1Over3)
|
||||||
|
minusQ2.y.Set(&aAffine.y)
|
||||||
|
minusQ2.z.SetOne()
|
||||||
|
minusQ2.t.SetOne()
|
||||||
|
|
||||||
|
r2.Square(&q1.y)
|
||||||
|
a, b, c, newR := lineFunctionAdd(r, q1, bAffine, r2)
|
||||||
|
mulLine(ret, a, b, c)
|
||||||
|
r = newR
|
||||||
|
|
||||||
|
r2.Square(&minusQ2.y)
|
||||||
|
a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2)
|
||||||
|
mulLine(ret, a, b, c)
|
||||||
|
r = newR
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// finalExponentiation computes the (p¹²-1)/Order-th power of an element of
|
||||||
|
// GF(p¹²) to obtain an element of GT (steps 13-15 of algorithm 1 from
|
||||||
|
// http://cryptojedi.org/papers/dclxvi-20100714.pdf)
|
||||||
|
func finalExponentiation(in *gfP12) *gfP12 {
|
||||||
|
t1 := &gfP12{}
|
||||||
|
|
||||||
|
// This is the p^6-Frobenius
|
||||||
|
t1.x.Neg(&in.x)
|
||||||
|
t1.y.Set(&in.y)
|
||||||
|
|
||||||
|
inv := &gfP12{}
|
||||||
|
inv.Invert(in)
|
||||||
|
t1.Mul(t1, inv)
|
||||||
|
|
||||||
|
t2 := (&gfP12{}).FrobeniusP2(t1)
|
||||||
|
t1.Mul(t1, t2)
|
||||||
|
|
||||||
|
fp := (&gfP12{}).Frobenius(t1)
|
||||||
|
fp2 := (&gfP12{}).FrobeniusP2(t1)
|
||||||
|
fp3 := (&gfP12{}).Frobenius(fp2)
|
||||||
|
|
||||||
|
fu := (&gfP12{}).Exp(t1, u)
|
||||||
|
fu2 := (&gfP12{}).Exp(fu, u)
|
||||||
|
fu3 := (&gfP12{}).Exp(fu2, u)
|
||||||
|
|
||||||
|
y3 := (&gfP12{}).Frobenius(fu)
|
||||||
|
fu2p := (&gfP12{}).Frobenius(fu2)
|
||||||
|
fu3p := (&gfP12{}).Frobenius(fu3)
|
||||||
|
y2 := (&gfP12{}).FrobeniusP2(fu2)
|
||||||
|
|
||||||
|
y0 := &gfP12{}
|
||||||
|
y0.Mul(fp, fp2).Mul(y0, fp3)
|
||||||
|
|
||||||
|
y1 := (&gfP12{}).Conjugate(t1)
|
||||||
|
y5 := (&gfP12{}).Conjugate(fu2)
|
||||||
|
y3.Conjugate(y3)
|
||||||
|
y4 := (&gfP12{}).Mul(fu, fu2p)
|
||||||
|
y4.Conjugate(y4)
|
||||||
|
|
||||||
|
y6 := (&gfP12{}).Mul(fu3, fu3p)
|
||||||
|
y6.Conjugate(y6)
|
||||||
|
|
||||||
|
t0 := (&gfP12{}).Square(y6)
|
||||||
|
t0.Mul(t0, y4).Mul(t0, y5)
|
||||||
|
t1.Mul(y3, y5).Mul(t1, t0)
|
||||||
|
t0.Mul(t0, y2)
|
||||||
|
t1.Square(t1).Mul(t1, t0).Square(t1)
|
||||||
|
t0.Mul(t1, y1)
|
||||||
|
t1.Mul(t1, y0)
|
||||||
|
t0.Square(t0).Mul(t0, t1)
|
||||||
|
|
||||||
|
return t0
|
||||||
|
}
|
||||||
|
|
||||||
|
func optimalAte(a *twistPoint, b *curvePoint) *gfP12 {
|
||||||
|
e := miller(a, b)
|
||||||
|
ret := finalExponentiation(e)
|
||||||
|
|
||||||
|
if a.IsInfinity() || b.IsInfinity() {
|
||||||
|
ret.SetOne()
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
204
crypto/bn256/cloudflare/twist.go
Normal file
204
crypto/bn256/cloudflare/twist.go
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
package bn256
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are
|
||||||
|
// kept in Jacobian form and t=z² when valid. The group G₂ is the set of
|
||||||
|
// n-torsion points of this curve over GF(p²) (where n = Order)
|
||||||
|
type twistPoint struct {
|
||||||
|
x, y, z, t gfP2
|
||||||
|
}
|
||||||
|
|
||||||
|
var twistB = &gfP2{
|
||||||
|
gfP{0x38e7ecccd1dcff67, 0x65f0b37d93ce0d3e, 0xd749d0dd22ac00aa, 0x0141b9ce4a688d4d},
|
||||||
|
gfP{0x3bf938e377b802a8, 0x020b1b273633535d, 0x26b7edf049755260, 0x2514c6324384a86d},
|
||||||
|
}
|
||||||
|
|
||||||
|
// twistGen is the generator of group G₂.
|
||||||
|
var twistGen = &twistPoint{
|
||||||
|
gfP2{
|
||||||
|
gfP{0xafb4737da84c6140, 0x6043dd5a5802d8c4, 0x09e950fc52a02f86, 0x14fef0833aea7b6b},
|
||||||
|
gfP{0x8e83b5d102bc2026, 0xdceb1935497b0172, 0xfbb8264797811adf, 0x19573841af96503b},
|
||||||
|
},
|
||||||
|
gfP2{
|
||||||
|
gfP{0x64095b56c71856ee, 0xdc57f922327d3cbb, 0x55f935be33351076, 0x0da4a0e693fd6482},
|
||||||
|
gfP{0x619dfa9d886be9f6, 0xfe7fd297f59e9b78, 0xff9e1a62231b7dfe, 0x28fd7eebae9e4206},
|
||||||
|
},
|
||||||
|
gfP2{*newGFp(0), *newGFp(1)},
|
||||||
|
gfP2{*newGFp(0), *newGFp(1)},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) String() string {
|
||||||
|
c.MakeAffine()
|
||||||
|
x, y := gfP2Decode(&c.x), gfP2Decode(&c.y)
|
||||||
|
return "(" + x.String() + ", " + y.String() + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) Set(a *twistPoint) {
|
||||||
|
c.x.Set(&a.x)
|
||||||
|
c.y.Set(&a.y)
|
||||||
|
c.z.Set(&a.z)
|
||||||
|
c.t.Set(&a.t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsOnCurve returns true iff c is on the curve.
|
||||||
|
func (c *twistPoint) IsOnCurve() bool {
|
||||||
|
c.MakeAffine()
|
||||||
|
if c.IsInfinity() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
y2, x3 := &gfP2{}, &gfP2{}
|
||||||
|
y2.Square(&c.y)
|
||||||
|
x3.Square(&c.x).Mul(x3, &c.x).Add(x3, twistB)
|
||||||
|
|
||||||
|
if *y2 != *x3 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
cneg := &twistPoint{}
|
||||||
|
cneg.Mul(c, Order)
|
||||||
|
return cneg.z.IsZero()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) SetInfinity() {
|
||||||
|
c.x.SetZero()
|
||||||
|
c.y.SetOne()
|
||||||
|
c.z.SetZero()
|
||||||
|
c.t.SetZero()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) IsInfinity() bool {
|
||||||
|
return c.z.IsZero()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) Add(a, b *twistPoint) {
|
||||||
|
// For additional comments, see the same function in curve.go.
|
||||||
|
|
||||||
|
if a.IsInfinity() {
|
||||||
|
c.Set(b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if b.IsInfinity() {
|
||||||
|
c.Set(a)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
|
||||||
|
z12 := (&gfP2{}).Square(&a.z)
|
||||||
|
z22 := (&gfP2{}).Square(&b.z)
|
||||||
|
u1 := (&gfP2{}).Mul(&a.x, z22)
|
||||||
|
u2 := (&gfP2{}).Mul(&b.x, z12)
|
||||||
|
|
||||||
|
t := (&gfP2{}).Mul(&b.z, z22)
|
||||||
|
s1 := (&gfP2{}).Mul(&a.y, t)
|
||||||
|
|
||||||
|
t.Mul(&a.z, z12)
|
||||||
|
s2 := (&gfP2{}).Mul(&b.y, t)
|
||||||
|
|
||||||
|
h := (&gfP2{}).Sub(u2, u1)
|
||||||
|
xEqual := h.IsZero()
|
||||||
|
|
||||||
|
t.Add(h, h)
|
||||||
|
i := (&gfP2{}).Square(t)
|
||||||
|
j := (&gfP2{}).Mul(h, i)
|
||||||
|
|
||||||
|
t.Sub(s2, s1)
|
||||||
|
yEqual := t.IsZero()
|
||||||
|
if xEqual && yEqual {
|
||||||
|
c.Double(a)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r := (&gfP2{}).Add(t, t)
|
||||||
|
|
||||||
|
v := (&gfP2{}).Mul(u1, i)
|
||||||
|
|
||||||
|
t4 := (&gfP2{}).Square(r)
|
||||||
|
t.Add(v, v)
|
||||||
|
t6 := (&gfP2{}).Sub(t4, j)
|
||||||
|
c.x.Sub(t6, t)
|
||||||
|
|
||||||
|
t.Sub(v, &c.x) // t7
|
||||||
|
t4.Mul(s1, j) // t8
|
||||||
|
t6.Add(t4, t4) // t9
|
||||||
|
t4.Mul(r, t) // t10
|
||||||
|
c.y.Sub(t4, t6)
|
||||||
|
|
||||||
|
t.Add(&a.z, &b.z) // t11
|
||||||
|
t4.Square(t) // t12
|
||||||
|
t.Sub(t4, z12) // t13
|
||||||
|
t4.Sub(t, z22) // t14
|
||||||
|
c.z.Mul(t4, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) Double(a *twistPoint) {
|
||||||
|
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
|
||||||
|
A := (&gfP2{}).Square(&a.x)
|
||||||
|
B := (&gfP2{}).Square(&a.y)
|
||||||
|
C := (&gfP2{}).Square(B)
|
||||||
|
|
||||||
|
t := (&gfP2{}).Add(&a.x, B)
|
||||||
|
t2 := (&gfP2{}).Square(t)
|
||||||
|
t.Sub(t2, A)
|
||||||
|
t2.Sub(t, C)
|
||||||
|
d := (&gfP2{}).Add(t2, t2)
|
||||||
|
t.Add(A, A)
|
||||||
|
e := (&gfP2{}).Add(t, A)
|
||||||
|
f := (&gfP2{}).Square(e)
|
||||||
|
|
||||||
|
t.Add(d, d)
|
||||||
|
c.x.Sub(f, t)
|
||||||
|
|
||||||
|
t.Add(C, C)
|
||||||
|
t2.Add(t, t)
|
||||||
|
t.Add(t2, t2)
|
||||||
|
c.y.Sub(d, &c.x)
|
||||||
|
t2.Mul(e, &c.y)
|
||||||
|
c.y.Sub(t2, t)
|
||||||
|
|
||||||
|
t.Mul(&a.y, &a.z)
|
||||||
|
c.z.Add(t, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int) {
|
||||||
|
sum, t := &twistPoint{}, &twistPoint{}
|
||||||
|
|
||||||
|
for i := scalar.BitLen(); i >= 0; i-- {
|
||||||
|
t.Double(sum)
|
||||||
|
if scalar.Bit(i) != 0 {
|
||||||
|
sum.Add(t, a)
|
||||||
|
} else {
|
||||||
|
sum.Set(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Set(sum)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) MakeAffine() {
|
||||||
|
if c.z.IsOne() {
|
||||||
|
return
|
||||||
|
} else if c.z.IsZero() {
|
||||||
|
c.x.SetZero()
|
||||||
|
c.y.SetOne()
|
||||||
|
c.t.SetZero()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
zInv := (&gfP2{}).Invert(&c.z)
|
||||||
|
t := (&gfP2{}).Mul(&c.y, zInv)
|
||||||
|
zInv2 := (&gfP2{}).Square(zInv)
|
||||||
|
c.y.Mul(t, zInv2)
|
||||||
|
t.Mul(&c.x, zInv2)
|
||||||
|
c.x.Set(t)
|
||||||
|
c.z.SetOne()
|
||||||
|
c.t.SetOne()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *twistPoint) Neg(a *twistPoint) {
|
||||||
|
c.x.Set(&a.x)
|
||||||
|
c.y.Neg(&a.y)
|
||||||
|
c.z.Set(&a.z)
|
||||||
|
c.t.SetZero()
|
||||||
|
}
|
@ -18,6 +18,7 @@ package bn256
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
)
|
)
|
||||||
@ -115,21 +116,25 @@ func (n *G1) Marshal() []byte {
|
|||||||
|
|
||||||
// Unmarshal sets e to the result of converting the output of Marshal back into
|
// Unmarshal sets e to the result of converting the output of Marshal back into
|
||||||
// a group element and then returns e.
|
// a group element and then returns e.
|
||||||
func (e *G1) Unmarshal(m []byte) (*G1, bool) {
|
func (e *G1) Unmarshal(m []byte) ([]byte, error) {
|
||||||
// Each value is a 256-bit number.
|
// Each value is a 256-bit number.
|
||||||
const numBytes = 256 / 8
|
const numBytes = 256 / 8
|
||||||
|
|
||||||
if len(m) != 2*numBytes {
|
if len(m) != 2*numBytes {
|
||||||
return nil, false
|
return nil, errors.New("bn256: not enough data")
|
||||||
}
|
}
|
||||||
|
// Unmarshal the points and check their caps
|
||||||
if e.p == nil {
|
if e.p == nil {
|
||||||
e.p = newCurvePoint(nil)
|
e.p = newCurvePoint(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.p.x.SetBytes(m[0*numBytes : 1*numBytes])
|
e.p.x.SetBytes(m[0*numBytes : 1*numBytes])
|
||||||
|
if e.p.x.Cmp(P) >= 0 {
|
||||||
|
return nil, errors.New("bn256: coordinate exceeds modulus")
|
||||||
|
}
|
||||||
e.p.y.SetBytes(m[1*numBytes : 2*numBytes])
|
e.p.y.SetBytes(m[1*numBytes : 2*numBytes])
|
||||||
|
if e.p.y.Cmp(P) >= 0 {
|
||||||
|
return nil, errors.New("bn256: coordinate exceeds modulus")
|
||||||
|
}
|
||||||
|
// Ensure the point is on the curve
|
||||||
if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 {
|
if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 {
|
||||||
// This is the point at infinity.
|
// This is the point at infinity.
|
||||||
e.p.y.SetInt64(1)
|
e.p.y.SetInt64(1)
|
||||||
@ -140,11 +145,10 @@ func (e *G1) Unmarshal(m []byte) (*G1, bool) {
|
|||||||
e.p.t.SetInt64(1)
|
e.p.t.SetInt64(1)
|
||||||
|
|
||||||
if !e.p.IsOnCurve() {
|
if !e.p.IsOnCurve() {
|
||||||
return nil, false
|
return nil, errors.New("bn256: malformed point")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return m[2*numBytes:], nil
|
||||||
return e, true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// G2 is an abstract cyclic group. The zero value is suitable for use as the
|
// G2 is an abstract cyclic group. The zero value is suitable for use as the
|
||||||
@ -233,23 +237,33 @@ func (n *G2) Marshal() []byte {
|
|||||||
|
|
||||||
// Unmarshal sets e to the result of converting the output of Marshal back into
|
// Unmarshal sets e to the result of converting the output of Marshal back into
|
||||||
// a group element and then returns e.
|
// a group element and then returns e.
|
||||||
func (e *G2) Unmarshal(m []byte) (*G2, bool) {
|
func (e *G2) Unmarshal(m []byte) ([]byte, error) {
|
||||||
// Each value is a 256-bit number.
|
// Each value is a 256-bit number.
|
||||||
const numBytes = 256 / 8
|
const numBytes = 256 / 8
|
||||||
|
|
||||||
if len(m) != 4*numBytes {
|
if len(m) != 4*numBytes {
|
||||||
return nil, false
|
return nil, errors.New("bn256: not enough data")
|
||||||
}
|
}
|
||||||
|
// Unmarshal the points and check their caps
|
||||||
if e.p == nil {
|
if e.p == nil {
|
||||||
e.p = newTwistPoint(nil)
|
e.p = newTwistPoint(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes])
|
e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes])
|
||||||
|
if e.p.x.x.Cmp(P) >= 0 {
|
||||||
|
return nil, errors.New("bn256: coordinate exceeds modulus")
|
||||||
|
}
|
||||||
e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes])
|
e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes])
|
||||||
|
if e.p.x.y.Cmp(P) >= 0 {
|
||||||
|
return nil, errors.New("bn256: coordinate exceeds modulus")
|
||||||
|
}
|
||||||
e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes])
|
e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes])
|
||||||
|
if e.p.y.x.Cmp(P) >= 0 {
|
||||||
|
return nil, errors.New("bn256: coordinate exceeds modulus")
|
||||||
|
}
|
||||||
e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes])
|
e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes])
|
||||||
|
if e.p.y.y.Cmp(P) >= 0 {
|
||||||
|
return nil, errors.New("bn256: coordinate exceeds modulus")
|
||||||
|
}
|
||||||
|
// Ensure the point is on the curve
|
||||||
if e.p.x.x.Sign() == 0 &&
|
if e.p.x.x.Sign() == 0 &&
|
||||||
e.p.x.y.Sign() == 0 &&
|
e.p.x.y.Sign() == 0 &&
|
||||||
e.p.y.x.Sign() == 0 &&
|
e.p.y.x.Sign() == 0 &&
|
||||||
@ -263,11 +277,10 @@ func (e *G2) Unmarshal(m []byte) (*G2, bool) {
|
|||||||
e.p.t.SetOne()
|
e.p.t.SetOne()
|
||||||
|
|
||||||
if !e.p.IsOnCurve() {
|
if !e.p.IsOnCurve() {
|
||||||
return nil, false
|
return nil, errors.New("bn256: malformed point")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return m[4*numBytes:], nil
|
||||||
return e, true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GT is an abstract cyclic group. The zero value is suitable for use as the
|
// GT is an abstract cyclic group. The zero value is suitable for use as the
|
@ -219,15 +219,16 @@ func TestBilinearity(t *testing.T) {
|
|||||||
func TestG1Marshal(t *testing.T) {
|
func TestG1Marshal(t *testing.T) {
|
||||||
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
|
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
|
||||||
form := g.Marshal()
|
form := g.Marshal()
|
||||||
_, ok := new(G1).Unmarshal(form)
|
_, err := new(G1).Unmarshal(form)
|
||||||
if !ok {
|
if err != nil {
|
||||||
t.Fatalf("failed to unmarshal")
|
t.Fatalf("failed to unmarshal")
|
||||||
}
|
}
|
||||||
|
|
||||||
g.ScalarBaseMult(Order)
|
g.ScalarBaseMult(Order)
|
||||||
form = g.Marshal()
|
form = g.Marshal()
|
||||||
g2, ok := new(G1).Unmarshal(form)
|
|
||||||
if !ok {
|
g2 := new(G1)
|
||||||
|
if _, err = g2.Unmarshal(form); err != nil {
|
||||||
t.Fatalf("failed to unmarshal ∞")
|
t.Fatalf("failed to unmarshal ∞")
|
||||||
}
|
}
|
||||||
if !g2.p.IsInfinity() {
|
if !g2.p.IsInfinity() {
|
||||||
@ -238,15 +239,15 @@ func TestG1Marshal(t *testing.T) {
|
|||||||
func TestG2Marshal(t *testing.T) {
|
func TestG2Marshal(t *testing.T) {
|
||||||
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
|
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
|
||||||
form := g.Marshal()
|
form := g.Marshal()
|
||||||
_, ok := new(G2).Unmarshal(form)
|
_, err := new(G2).Unmarshal(form)
|
||||||
if !ok {
|
if err != nil {
|
||||||
t.Fatalf("failed to unmarshal")
|
t.Fatalf("failed to unmarshal")
|
||||||
}
|
}
|
||||||
|
|
||||||
g.ScalarBaseMult(Order)
|
g.ScalarBaseMult(Order)
|
||||||
form = g.Marshal()
|
form = g.Marshal()
|
||||||
g2, ok := new(G2).Unmarshal(form)
|
g2 := new(G2)
|
||||||
if !ok {
|
if _, err = g2.Unmarshal(form); err != nil {
|
||||||
t.Fatalf("failed to unmarshal ∞")
|
t.Fatalf("failed to unmarshal ∞")
|
||||||
}
|
}
|
||||||
if !g2.p.IsInfinity() {
|
if !g2.p.IsInfinity() {
|
||||||
@ -273,12 +274,18 @@ func TestTripartiteDiffieHellman(t *testing.T) {
|
|||||||
b, _ := rand.Int(rand.Reader, Order)
|
b, _ := rand.Int(rand.Reader, Order)
|
||||||
c, _ := rand.Int(rand.Reader, Order)
|
c, _ := rand.Int(rand.Reader, Order)
|
||||||
|
|
||||||
pa, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
|
pa := new(G1)
|
||||||
qa, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
|
pa.Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
|
||||||
pb, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
|
qa := new(G2)
|
||||||
qb, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
|
qa.Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
|
||||||
pc, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
|
pb := new(G1)
|
||||||
qc, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
|
pb.Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
|
||||||
|
qb := new(G2)
|
||||||
|
qb.Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
|
||||||
|
pc := new(G1)
|
||||||
|
pc.Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
|
||||||
|
qc := new(G2)
|
||||||
|
qc.Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
|
||||||
|
|
||||||
k1 := Pair(pb, qc)
|
k1 := Pair(pb, qc)
|
||||||
k1.ScalarMult(k1, a)
|
k1.ScalarMult(k1, a)
|
@ -76,7 +76,13 @@ func (c *twistPoint) IsOnCurve() bool {
|
|||||||
yy.Sub(yy, xxx)
|
yy.Sub(yy, xxx)
|
||||||
yy.Sub(yy, twistB)
|
yy.Sub(yy, twistB)
|
||||||
yy.Minimal()
|
yy.Minimal()
|
||||||
return yy.x.Sign() == 0 && yy.y.Sign() == 0
|
|
||||||
|
if yy.x.Sign() != 0 || yy.y.Sign() != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
cneg := newTwistPoint(pool)
|
||||||
|
cneg.Mul(c, Order, pool)
|
||||||
|
return cneg.z.IsZero()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *twistPoint) SetInfinity() {
|
func (c *twistPoint) SetInfinity() {
|
@ -36,10 +36,10 @@ import (
|
|||||||
|
|
||||||
"github.com/elastic/gosigar"
|
"github.com/elastic/gosigar"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/rcrowley/go-metrics"
|
|
||||||
"golang.org/x/net/websocket"
|
"golang.org/x/net/websocket"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -104,6 +104,18 @@ func (b *EthApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash)
|
|||||||
return core.GetBlockReceipts(b.eth.chainDb, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)), nil
|
return core.GetBlockReceipts(b.eth.chainDb, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *EthApiBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) {
|
||||||
|
receipts := core.GetBlockReceipts(b.eth.chainDb, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash))
|
||||||
|
if receipts == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
logs := make([][]*types.Log, len(receipts))
|
||||||
|
for i, receipt := range receipts {
|
||||||
|
logs[i] = receipt.Logs
|
||||||
|
}
|
||||||
|
return logs, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (b *EthApiBackend) GetTd(blockHash common.Hash) *big.Int {
|
func (b *EthApiBackend) GetTd(blockHash common.Hash) *big.Int {
|
||||||
return b.eth.blockchain.GetTdByHash(blockHash)
|
return b.eth.blockchain.GetTdByHash(blockHash)
|
||||||
}
|
}
|
||||||
|
@ -27,12 +27,13 @@ import (
|
|||||||
|
|
||||||
ethereum "github.com/ethereum/go-ethereum"
|
ethereum "github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/rcrowley/go-metrics"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -221,7 +222,10 @@ func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockC
|
|||||||
quitCh: make(chan struct{}),
|
quitCh: make(chan struct{}),
|
||||||
stateCh: make(chan dataPack),
|
stateCh: make(chan dataPack),
|
||||||
stateSyncStart: make(chan *stateSync),
|
stateSyncStart: make(chan *stateSync),
|
||||||
trackStateReq: make(chan *stateReq),
|
syncStatsState: stateSyncStats{
|
||||||
|
processed: core.GetTrieSyncProgress(stateDb),
|
||||||
|
},
|
||||||
|
trackStateReq: make(chan *stateReq),
|
||||||
}
|
}
|
||||||
go dl.qosTuner()
|
go dl.qosTuner()
|
||||||
go dl.stateFetcher()
|
go dl.stateFetcher()
|
||||||
|
@ -23,21 +23,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
headerInMeter = metrics.NewMeter("eth/downloader/headers/in")
|
headerInMeter = metrics.NewRegisteredMeter("eth/downloader/headers/in", nil)
|
||||||
headerReqTimer = metrics.NewTimer("eth/downloader/headers/req")
|
headerReqTimer = metrics.NewRegisteredTimer("eth/downloader/headers/req", nil)
|
||||||
headerDropMeter = metrics.NewMeter("eth/downloader/headers/drop")
|
headerDropMeter = metrics.NewRegisteredMeter("eth/downloader/headers/drop", nil)
|
||||||
headerTimeoutMeter = metrics.NewMeter("eth/downloader/headers/timeout")
|
headerTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/headers/timeout", nil)
|
||||||
|
|
||||||
bodyInMeter = metrics.NewMeter("eth/downloader/bodies/in")
|
bodyInMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/in", nil)
|
||||||
bodyReqTimer = metrics.NewTimer("eth/downloader/bodies/req")
|
bodyReqTimer = metrics.NewRegisteredTimer("eth/downloader/bodies/req", nil)
|
||||||
bodyDropMeter = metrics.NewMeter("eth/downloader/bodies/drop")
|
bodyDropMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/drop", nil)
|
||||||
bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout")
|
bodyTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/timeout", nil)
|
||||||
|
|
||||||
receiptInMeter = metrics.NewMeter("eth/downloader/receipts/in")
|
receiptInMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/in", nil)
|
||||||
receiptReqTimer = metrics.NewTimer("eth/downloader/receipts/req")
|
receiptReqTimer = metrics.NewRegisteredTimer("eth/downloader/receipts/req", nil)
|
||||||
receiptDropMeter = metrics.NewMeter("eth/downloader/receipts/drop")
|
receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil)
|
||||||
receiptTimeoutMeter = metrics.NewMeter("eth/downloader/receipts/timeout")
|
receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil)
|
||||||
|
|
||||||
stateInMeter = metrics.NewMeter("eth/downloader/states/in")
|
stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil)
|
||||||
stateDropMeter = metrics.NewMeter("eth/downloader/states/drop")
|
stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil)
|
||||||
)
|
)
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
@ -466,4 +467,7 @@ func (s *stateSync) updateStats(written, duplicate, unexpected int, duration tim
|
|||||||
if written > 0 || duplicate > 0 || unexpected > 0 {
|
if written > 0 || duplicate > 0 || unexpected > 0 {
|
||||||
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "retry", len(s.tasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
|
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "retry", len(s.tasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
|
||||||
}
|
}
|
||||||
|
if written > 0 {
|
||||||
|
core.WriteTrieSyncProgress(s.d.stateDB, s.d.syncStatsState.processed)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,21 +23,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
propAnnounceInMeter = metrics.NewMeter("eth/fetcher/prop/announces/in")
|
propAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/in", nil)
|
||||||
propAnnounceOutTimer = metrics.NewTimer("eth/fetcher/prop/announces/out")
|
propAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/announces/out", nil)
|
||||||
propAnnounceDropMeter = metrics.NewMeter("eth/fetcher/prop/announces/drop")
|
propAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/drop", nil)
|
||||||
propAnnounceDOSMeter = metrics.NewMeter("eth/fetcher/prop/announces/dos")
|
propAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/dos", nil)
|
||||||
|
|
||||||
propBroadcastInMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/in")
|
propBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/in", nil)
|
||||||
propBroadcastOutTimer = metrics.NewTimer("eth/fetcher/prop/broadcasts/out")
|
propBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/broadcasts/out", nil)
|
||||||
propBroadcastDropMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/drop")
|
propBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/drop", nil)
|
||||||
propBroadcastDOSMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/dos")
|
propBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/dos", nil)
|
||||||
|
|
||||||
headerFetchMeter = metrics.NewMeter("eth/fetcher/fetch/headers")
|
headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/headers", nil)
|
||||||
bodyFetchMeter = metrics.NewMeter("eth/fetcher/fetch/bodies")
|
bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/bodies", nil)
|
||||||
|
|
||||||
headerFilterInMeter = metrics.NewMeter("eth/fetcher/filter/headers/in")
|
headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/in", nil)
|
||||||
headerFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/headers/out")
|
headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/out", nil)
|
||||||
bodyFilterInMeter = metrics.NewMeter("eth/fetcher/filter/bodies/in")
|
bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/in", nil)
|
||||||
bodyFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/bodies/out")
|
bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/out", nil)
|
||||||
)
|
)
|
||||||
|
@ -34,6 +34,7 @@ type Backend interface {
|
|||||||
EventMux() *event.TypeMux
|
EventMux() *event.TypeMux
|
||||||
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
|
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
|
||||||
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
|
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
|
||||||
|
GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)
|
||||||
|
|
||||||
SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription
|
SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription
|
||||||
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
|
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
|
||||||
@ -201,16 +202,28 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e
|
|||||||
// match the filter criteria. This function is called when the bloom filter signals a potential match.
|
// match the filter criteria. This function is called when the bloom filter signals a potential match.
|
||||||
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {
|
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {
|
||||||
// Get the logs of the block
|
// Get the logs of the block
|
||||||
receipts, err := f.backend.GetReceipts(ctx, header.Hash())
|
logsList, err := f.backend.GetLogs(ctx, header.Hash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var unfiltered []*types.Log
|
var unfiltered []*types.Log
|
||||||
for _, receipt := range receipts {
|
for _, logs := range logsList {
|
||||||
unfiltered = append(unfiltered, receipt.Logs...)
|
unfiltered = append(unfiltered, logs...)
|
||||||
}
|
}
|
||||||
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
||||||
if len(logs) > 0 {
|
if len(logs) > 0 {
|
||||||
|
// We have matching logs, check if we need to resolve full logs via the light client
|
||||||
|
if logs[0].TxHash == (common.Hash{}) {
|
||||||
|
receipts, err := f.backend.GetReceipts(ctx, header.Hash())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
unfiltered = unfiltered[:0]
|
||||||
|
for _, receipt := range receipts {
|
||||||
|
unfiltered = append(unfiltered, receipt.Logs...)
|
||||||
|
}
|
||||||
|
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
||||||
|
}
|
||||||
return logs, nil
|
return logs, nil
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -375,19 +375,35 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.
|
|||||||
// Get the logs of the block
|
// Get the logs of the block
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
receipts, err := es.backend.GetReceipts(ctx, header.Hash())
|
logsList, err := es.backend.GetLogs(ctx, header.Hash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var unfiltered []*types.Log
|
var unfiltered []*types.Log
|
||||||
for _, receipt := range receipts {
|
for _, logs := range logsList {
|
||||||
for _, log := range receipt.Logs {
|
for _, log := range logs {
|
||||||
logcopy := *log
|
logcopy := *log
|
||||||
logcopy.Removed = remove
|
logcopy.Removed = remove
|
||||||
unfiltered = append(unfiltered, &logcopy)
|
unfiltered = append(unfiltered, &logcopy)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logs := filterLogs(unfiltered, nil, nil, addresses, topics)
|
logs := filterLogs(unfiltered, nil, nil, addresses, topics)
|
||||||
|
if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) {
|
||||||
|
// We have matching but non-derived logs
|
||||||
|
receipts, err := es.backend.GetReceipts(ctx, header.Hash())
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
unfiltered = unfiltered[:0]
|
||||||
|
for _, receipt := range receipts {
|
||||||
|
for _, log := range receipt.Logs {
|
||||||
|
logcopy := *log
|
||||||
|
logcopy.Removed = remove
|
||||||
|
unfiltered = append(unfiltered, &logcopy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logs = filterLogs(unfiltered, nil, nil, addresses, topics)
|
||||||
|
}
|
||||||
return logs
|
return logs
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -69,8 +69,19 @@ func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumbe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
|
func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
|
||||||
num := core.GetBlockNumber(b.db, blockHash)
|
number := core.GetBlockNumber(b.db, blockHash)
|
||||||
return core.GetBlockReceipts(b.db, blockHash, num), nil
|
return core.GetBlockReceipts(b.db, blockHash, number), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *testBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) {
|
||||||
|
number := core.GetBlockNumber(b.db, blockHash)
|
||||||
|
receipts := core.GetBlockReceipts(b.db, blockHash, number)
|
||||||
|
|
||||||
|
logs := make([][]*types.Log, len(receipts))
|
||||||
|
for i, receipt := range receipts {
|
||||||
|
logs[i] = receipt.Logs
|
||||||
|
}
|
||||||
|
return logs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
||||||
|
@ -249,7 +249,8 @@ func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *p
|
|||||||
// handle is the callback invoked to manage the life cycle of an eth peer. When
|
// handle is the callback invoked to manage the life cycle of an eth peer. When
|
||||||
// this function terminates, the peer is disconnected.
|
// this function terminates, the peer is disconnected.
|
||||||
func (pm *ProtocolManager) handle(p *peer) error {
|
func (pm *ProtocolManager) handle(p *peer) error {
|
||||||
if pm.peers.Len() >= pm.maxPeers {
|
// Ignore maxPeers if this is a trusted peer
|
||||||
|
if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
|
||||||
return p2p.DiscTooManyPeers
|
return p2p.DiscTooManyPeers
|
||||||
}
|
}
|
||||||
p.Log().Debug("Ethereum peer connected", "name", p.Name())
|
p.Log().Debug("Ethereum peer connected", "name", p.Name())
|
||||||
|
@ -22,38 +22,38 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
propTxnInPacketsMeter = metrics.NewMeter("eth/prop/txns/in/packets")
|
propTxnInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/txns/in/packets", nil)
|
||||||
propTxnInTrafficMeter = metrics.NewMeter("eth/prop/txns/in/traffic")
|
propTxnInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/txns/in/traffic", nil)
|
||||||
propTxnOutPacketsMeter = metrics.NewMeter("eth/prop/txns/out/packets")
|
propTxnOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/txns/out/packets", nil)
|
||||||
propTxnOutTrafficMeter = metrics.NewMeter("eth/prop/txns/out/traffic")
|
propTxnOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/txns/out/traffic", nil)
|
||||||
propHashInPacketsMeter = metrics.NewMeter("eth/prop/hashes/in/packets")
|
propHashInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/hashes/in/packets", nil)
|
||||||
propHashInTrafficMeter = metrics.NewMeter("eth/prop/hashes/in/traffic")
|
propHashInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/hashes/in/traffic", nil)
|
||||||
propHashOutPacketsMeter = metrics.NewMeter("eth/prop/hashes/out/packets")
|
propHashOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/hashes/out/packets", nil)
|
||||||
propHashOutTrafficMeter = metrics.NewMeter("eth/prop/hashes/out/traffic")
|
propHashOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/hashes/out/traffic", nil)
|
||||||
propBlockInPacketsMeter = metrics.NewMeter("eth/prop/blocks/in/packets")
|
propBlockInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/blocks/in/packets", nil)
|
||||||
propBlockInTrafficMeter = metrics.NewMeter("eth/prop/blocks/in/traffic")
|
propBlockInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/blocks/in/traffic", nil)
|
||||||
propBlockOutPacketsMeter = metrics.NewMeter("eth/prop/blocks/out/packets")
|
propBlockOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/blocks/out/packets", nil)
|
||||||
propBlockOutTrafficMeter = metrics.NewMeter("eth/prop/blocks/out/traffic")
|
propBlockOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/blocks/out/traffic", nil)
|
||||||
reqHeaderInPacketsMeter = metrics.NewMeter("eth/req/headers/in/packets")
|
reqHeaderInPacketsMeter = metrics.NewRegisteredMeter("eth/req/headers/in/packets", nil)
|
||||||
reqHeaderInTrafficMeter = metrics.NewMeter("eth/req/headers/in/traffic")
|
reqHeaderInTrafficMeter = metrics.NewRegisteredMeter("eth/req/headers/in/traffic", nil)
|
||||||
reqHeaderOutPacketsMeter = metrics.NewMeter("eth/req/headers/out/packets")
|
reqHeaderOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/headers/out/packets", nil)
|
||||||
reqHeaderOutTrafficMeter = metrics.NewMeter("eth/req/headers/out/traffic")
|
reqHeaderOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/headers/out/traffic", nil)
|
||||||
reqBodyInPacketsMeter = metrics.NewMeter("eth/req/bodies/in/packets")
|
reqBodyInPacketsMeter = metrics.NewRegisteredMeter("eth/req/bodies/in/packets", nil)
|
||||||
reqBodyInTrafficMeter = metrics.NewMeter("eth/req/bodies/in/traffic")
|
reqBodyInTrafficMeter = metrics.NewRegisteredMeter("eth/req/bodies/in/traffic", nil)
|
||||||
reqBodyOutPacketsMeter = metrics.NewMeter("eth/req/bodies/out/packets")
|
reqBodyOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/bodies/out/packets", nil)
|
||||||
reqBodyOutTrafficMeter = metrics.NewMeter("eth/req/bodies/out/traffic")
|
reqBodyOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/bodies/out/traffic", nil)
|
||||||
reqStateInPacketsMeter = metrics.NewMeter("eth/req/states/in/packets")
|
reqStateInPacketsMeter = metrics.NewRegisteredMeter("eth/req/states/in/packets", nil)
|
||||||
reqStateInTrafficMeter = metrics.NewMeter("eth/req/states/in/traffic")
|
reqStateInTrafficMeter = metrics.NewRegisteredMeter("eth/req/states/in/traffic", nil)
|
||||||
reqStateOutPacketsMeter = metrics.NewMeter("eth/req/states/out/packets")
|
reqStateOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/states/out/packets", nil)
|
||||||
reqStateOutTrafficMeter = metrics.NewMeter("eth/req/states/out/traffic")
|
reqStateOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/states/out/traffic", nil)
|
||||||
reqReceiptInPacketsMeter = metrics.NewMeter("eth/req/receipts/in/packets")
|
reqReceiptInPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/in/packets", nil)
|
||||||
reqReceiptInTrafficMeter = metrics.NewMeter("eth/req/receipts/in/traffic")
|
reqReceiptInTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/in/traffic", nil)
|
||||||
reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets")
|
reqReceiptOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/packets", nil)
|
||||||
reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")
|
reqReceiptOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/traffic", nil)
|
||||||
miscInPacketsMeter = metrics.NewMeter("eth/misc/in/packets")
|
miscInPacketsMeter = metrics.NewRegisteredMeter("eth/misc/in/packets", nil)
|
||||||
miscInTrafficMeter = metrics.NewMeter("eth/misc/in/traffic")
|
miscInTrafficMeter = metrics.NewRegisteredMeter("eth/misc/in/traffic", nil)
|
||||||
miscOutPacketsMeter = metrics.NewMeter("eth/misc/out/packets")
|
miscOutPacketsMeter = metrics.NewRegisteredMeter("eth/misc/out/packets", nil)
|
||||||
miscOutTrafficMeter = metrics.NewMeter("eth/misc/out/traffic")
|
miscOutTrafficMeter = metrics.NewRegisteredMeter("eth/misc/out/traffic", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
|
// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
|
||||||
|
@ -29,8 +29,6 @@ import (
|
|||||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||||
|
|
||||||
gometrics "github.com/rcrowley/go-metrics"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var OpenFileLimit = 64
|
var OpenFileLimit = 64
|
||||||
@ -39,15 +37,15 @@ type LDBDatabase struct {
|
|||||||
fn string // filename for reporting
|
fn string // filename for reporting
|
||||||
db *leveldb.DB // LevelDB instance
|
db *leveldb.DB // LevelDB instance
|
||||||
|
|
||||||
getTimer gometrics.Timer // Timer for measuring the database get request counts and latencies
|
getTimer metrics.Timer // Timer for measuring the database get request counts and latencies
|
||||||
putTimer gometrics.Timer // Timer for measuring the database put request counts and latencies
|
putTimer metrics.Timer // Timer for measuring the database put request counts and latencies
|
||||||
delTimer gometrics.Timer // Timer for measuring the database delete request counts and latencies
|
delTimer metrics.Timer // Timer for measuring the database delete request counts and latencies
|
||||||
missMeter gometrics.Meter // Meter for measuring the missed database get requests
|
missMeter metrics.Meter // Meter for measuring the missed database get requests
|
||||||
readMeter gometrics.Meter // Meter for measuring the database get request data usage
|
readMeter metrics.Meter // Meter for measuring the database get request data usage
|
||||||
writeMeter gometrics.Meter // Meter for measuring the database put request data usage
|
writeMeter metrics.Meter // Meter for measuring the database put request data usage
|
||||||
compTimeMeter gometrics.Meter // Meter for measuring the total time spent in database compaction
|
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
|
||||||
compReadMeter gometrics.Meter // Meter for measuring the data read during compaction
|
compReadMeter metrics.Meter // Meter for measuring the data read during compaction
|
||||||
compWriteMeter gometrics.Meter // Meter for measuring the data written during compaction
|
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
|
||||||
|
|
||||||
quitLock sync.Mutex // Mutex protecting the quit channel access
|
quitLock sync.Mutex // Mutex protecting the quit channel access
|
||||||
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
|
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
|
||||||
@ -180,15 +178,15 @@ func (db *LDBDatabase) Meter(prefix string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Initialize all the metrics collector at the requested prefix
|
// Initialize all the metrics collector at the requested prefix
|
||||||
db.getTimer = metrics.NewTimer(prefix + "user/gets")
|
db.getTimer = metrics.NewRegisteredTimer(prefix+"user/gets", nil)
|
||||||
db.putTimer = metrics.NewTimer(prefix + "user/puts")
|
db.putTimer = metrics.NewRegisteredTimer(prefix+"user/puts", nil)
|
||||||
db.delTimer = metrics.NewTimer(prefix + "user/dels")
|
db.delTimer = metrics.NewRegisteredTimer(prefix+"user/dels", nil)
|
||||||
db.missMeter = metrics.NewMeter(prefix + "user/misses")
|
db.missMeter = metrics.NewRegisteredMeter(prefix+"user/misses", nil)
|
||||||
db.readMeter = metrics.NewMeter(prefix + "user/reads")
|
db.readMeter = metrics.NewRegisteredMeter(prefix+"user/reads", nil)
|
||||||
db.writeMeter = metrics.NewMeter(prefix + "user/writes")
|
db.writeMeter = metrics.NewRegisteredMeter(prefix+"user/writes", nil)
|
||||||
db.compTimeMeter = metrics.NewMeter(prefix + "compact/time")
|
db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
|
||||||
db.compReadMeter = metrics.NewMeter(prefix + "compact/input")
|
db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
|
||||||
db.compWriteMeter = metrics.NewMeter(prefix + "compact/output")
|
db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
|
||||||
|
|
||||||
// Create a quit channel for the periodic collector and run it
|
// Create a quit channel for the periodic collector and run it
|
||||||
db.quitLock.Lock()
|
db.quitLock.Lock()
|
||||||
|
@ -140,10 +140,9 @@ func (h *HandlerT) GoTrace(file string, nsec uint) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockProfile turns on CPU profiling for nsec seconds and writes
|
// BlockProfile turns on goroutine profiling for nsec seconds and writes profile data to
|
||||||
// profile data to file. It uses a profile rate of 1 for most accurate
|
// file. It uses a profile rate of 1 for most accurate information. If a different rate is
|
||||||
// information. If a different rate is desired, set the rate
|
// desired, set the rate and write the profile manually.
|
||||||
// and write the profile manually.
|
|
||||||
func (*HandlerT) BlockProfile(file string, nsec uint) error {
|
func (*HandlerT) BlockProfile(file string, nsec uint) error {
|
||||||
runtime.SetBlockProfileRate(1)
|
runtime.SetBlockProfileRate(1)
|
||||||
time.Sleep(time.Duration(nsec) * time.Second)
|
time.Sleep(time.Duration(nsec) * time.Second)
|
||||||
@ -162,6 +161,26 @@ func (*HandlerT) WriteBlockProfile(file string) error {
|
|||||||
return writeProfile("block", file)
|
return writeProfile("block", file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MutexProfile turns on mutex profiling for nsec seconds and writes profile data to file.
|
||||||
|
// It uses a profile rate of 1 for most accurate information. If a different rate is
|
||||||
|
// desired, set the rate and write the profile manually.
|
||||||
|
func (*HandlerT) MutexProfile(file string, nsec uint) error {
|
||||||
|
runtime.SetMutexProfileFraction(1)
|
||||||
|
time.Sleep(time.Duration(nsec) * time.Second)
|
||||||
|
defer runtime.SetMutexProfileFraction(0)
|
||||||
|
return writeProfile("mutex", file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMutexProfileFraction sets the rate of mutex profiling.
|
||||||
|
func (*HandlerT) SetMutexProfileFraction(rate int) {
|
||||||
|
runtime.SetMutexProfileFraction(rate)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteMutexProfile writes a goroutine blocking profile to the given file.
|
||||||
|
func (*HandlerT) WriteMutexProfile(file string) error {
|
||||||
|
return writeProfile("mutex", file)
|
||||||
|
}
|
||||||
|
|
||||||
// WriteMemProfile writes an allocation profile to the given file.
|
// WriteMemProfile writes an allocation profile to the given file.
|
||||||
// Note that the profiling rate cannot be set through the API,
|
// Note that the profiling rate cannot be set through the API,
|
||||||
// it must be set on the command line.
|
// it must be set on the command line.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user