Compare commits

..

36 Commits

Author SHA1 Message Date
Jeffrey Wilcke
94ad694a26 Merge branch 'release/1.4' 2016-05-17 14:59:51 +02:00
Péter Szilágyi
4c6953606e [release/1.4.4] eth: skip transaction handling during fast sync
(cherry picked from commit d87f7a1e81)
2016-05-17 14:59:12 +02:00
Jeffrey Wilcke
fc0638f9d8 Merge branch 'release/1.4' 2016-05-13 12:10:31 +02:00
Jeffrey Wilcke
4b11f207cb VERSION, cmd/geth: bumped version 1.4.4 2016-05-12 21:10:38 +02:00
Jeffrey Wilcke
7e5c49cafa [release/1.4.4] event: fixed subscribtions to stopped event mux
This fixes an issue where the following would lead to a panic due to a
channel being closed twice:

* Start mux
* Stop mux
* Sub to mux
* Unsub

This is fixed by setting the subscriptions status to closed resulting in
the Unsubscribe to ignore the request when called.

(cherry picked from commit 7c1f74713e)
2016-05-12 20:53:31 +02:00
Péter Szilágyi
efcfa2209b [release/1.4.4] eth/downloader: bound fork ancestry and allow heavy short forks 2016-05-12 17:32:06 +02:00
Jeffrey Wilcke
aa18aad7ad [release/1.4.4] core: fixed pointer assignment
This fixes an issue where it's theoretical possible to cause a consensus
failure when hitting the lower end of the difficulty, though pratically
impossible it's worth a fix.
2016-05-12 17:31:57 +02:00
Péter Szilágyi
594328c112 [release/1.4.4] accounts/abi/bind: fix multi-value anonymous unmarshalling
(cherry picked from commit cc21706c50)
2016-05-12 17:19:29 +02:00
Jeffrey Wilcke
2e6b9c141b [release/1.4.4] accounts/abi: fixed unpacking in to already slice interfaces
Previously it was assumed that wheneven type `[]interface{}` was given
that the interface was empty. The abigen rightfully assumed that
interface slices which already have pre-allocated variable sets to be
assigned.

This PR fixes that by checking that the given `[]interface{}` is larger
than zero and assigns each value using the generic `set` function (this
function has also been moved to abi/reflect.go) and checks whether the
assignment was possible.

The generic assignment function `set` now also deals with pointers
(useful for interface slice mentioned above) by dereferencing the
pointer until it finds a setable type.

(cherry picked from commit 91a7a4a786)
2016-05-12 17:19:29 +02:00
Bas van Kervel
b38cea6654 [release/1.4.4] rpc: HTTP origin case insensitive
(cherry picked from commit 5479097790)
2016-05-12 17:19:25 +02:00
Jeffrey Wilcke
dd083aa34e Merge branch 'release/1.4'
Conflicts:
	VERSION
	cmd/geth/main.go
2016-05-10 13:48:11 +02:00
Jeffrey Wilcke
f213a9d8e8 VERSION, cmd/geth: bumped version 1.4.3 2016-05-10 13:46:03 +02:00
Jeffrey Wilcke
6826b2040f [release/1.4.3] miner: fixed pending state by not shutting down update loop
(cherry picked from commit a824c3f02f)
2016-05-10 13:46:03 +02:00
Jeffrey Wilcke
2a3657d8d9 VERSION, cmd/geth: bumped version 1.4.2 2016-05-10 13:46:02 +02:00
Jeffrey Wilcke
566af6ef92 VERSION, cmd/geth: bumped version 1.4.2 2016-05-10 10:32:27 +02:00
Jeffrey Wilcke
290e851f57 VERSION, cmd/geth: bumped version 1.4.2 2016-05-09 22:06:27 +02:00
Jeffrey Wilcke
8f96d66241 Merge branch 'develop' into release/1.4 2016-05-09 22:04:40 +02:00
Péter Szilágyi
5782164a35 Merge pull request #2529 from fjl/fdlimit-bsd
cmd/utils: fix build on *BSD
2016-05-09 16:39:34 +03:00
Péter Szilágyi
27f657478f Merge pull request #2524 from fjl/windows-ci-scripts
build: add CI scripts for windows
2016-05-09 15:59:49 +03:00
Péter Szilágyi
756b62988c Merge pull request #2523 from fjl/shutdown
core, eth, miner: improve shutdown synchronisation
2016-05-09 15:59:41 +03:00
Felix Lange
f61e203c10 cmd/utils: fix build on *BSD 2016-05-09 13:13:44 +02:00
Felix Lange
56ed6152a1 core, eth, miner: improve shutdown synchronisation
Shutting down geth prints hundreds of annoying error messages in some
cases. The errors appear because the Stop method of eth.ProtocolManager,
miner.Miner and core.TxPool is asynchronous. Left over peer sessions
generate events which are processed after Stop even though the database
has already been closed.

The fix is to make Stop synchronous using sync.WaitGroup.

For eth.ProtocolManager, in order to make use of WaitGroup safe, we need
a way to stop new peer sessions from being added while waiting on the
WaitGroup. The eth protocol Run function now selects on a signaling
channel and adds to the WaitGroup only if ProtocolManager is not
shutting down.

For miner.worker and core.TxPool the number of goroutines is static,
WaitGroup can be used in the usual way without additional
synchronisation.
2016-05-09 13:03:08 +02:00
Péter Szilágyi
dc7f202ecd Merge pull request #2528 from karalabe/fix-web3-eth_syncing
jsre: hotfix web3 for the console eth.syncing formatting
2016-05-09 12:46:48 +03:00
Péter Szilágyi
1d42061e2c jsre: hotfix web3 for the console eth.syncing formatting 2016-05-09 11:59:59 +03:00
Jeffrey Wilcke
b6135a72dd Merge pull request #2527 from obscuren/trace-transaction-fix
eth: fixed tracing functions using the current header instead of parent
2016-05-09 10:34:53 +02:00
Jeffrey Wilcke
7d59c5c58d eth: fixed tracing functions using the current header instead of parent
Fixes #2525
2016-05-09 10:24:31 +02:00
Péter Szilágyi
8aa4597c9e Merge pull request #2520 from karalabe/makefile-cross-update
Makefile: go build instead of install (solves cross compile issues)
2016-05-09 11:12:28 +03:00
Péter Szilágyi
57ba1824ac Merge pull request #2514 from bas-vk/startRPC_WS
node: start RPC/WS interface on localhost by default
2016-05-08 17:37:35 +03:00
Felix Lange
c89f4352d0 build: add CI scripts for windows 2016-05-08 01:23:07 +02:00
Péter Szilágyi
6a00a3ade1 Makefile: go build instead of install (solves cross compile issues) 2016-05-06 16:56:22 +03:00
Péter Szilágyi
f821b0188a Merge pull request #2518 from fjl/debug-go-trace
internal/debug: also rename debug_startTrace to debug_startGoTrace
2016-05-06 13:05:54 +03:00
Bas van Kervel
d79f2f2656 node: start RPC/WS interface on localhost by default 2016-05-06 12:00:47 +02:00
Felix Lange
130bccc763 cmd/utils: flush trace and CPU profile data when force-qutting
Also reduce log messages a little bit.
2016-05-06 11:16:47 +02:00
Felix Lange
ae9ed5c420 internal/debug: also rename debug_startTrace to debug_startGoTrace
This was missing from the previous change.
2016-05-06 11:15:05 +02:00
Jeffrey Wilcke
a1c201a5ac Merge pull request #2517 from fjl/debug-go-trace
internal/debug: rename debug_trace to debug_goTrace
2016-05-06 11:09:15 +02:00
Felix Lange
844e911129 internal/debug: rename debug_trace to debug_goTrace
Reduces confusion with EVM execution tracing methods.
2016-05-06 10:27:24 +02:00
35 changed files with 608 additions and 198 deletions

View File

@@ -13,7 +13,7 @@ GOBIN = build/bin
GO ?= latest
geth:
build/env.sh go install -v $(shell build/flags.sh) ./cmd/geth
build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/geth ./cmd/geth
@echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth."
@@ -103,7 +103,9 @@ evm:
@echo "Run \"$(GOBIN)/evm to start the evm."
all:
build/env.sh go install -v $(shell build/flags.sh) ./...
for cmd in `ls ./cmd/`; do \
build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/$$cmd ./cmd/$$cmd; \
done
test: all
build/env.sh go test ./...

View File

@@ -1 +1 @@
1.4.1
1.4.4

View File

@@ -238,8 +238,16 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
return fmt.Errorf("abi: unmarshalling empty output")
}
value := reflect.ValueOf(v).Elem()
typ := value.Type()
// make sure the passed value is a pointer
valueOf := reflect.ValueOf(v)
if reflect.Ptr != valueOf.Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
}
var (
value = valueOf.Elem()
typ = value.Type()
)
if len(method.Outputs) > 1 {
switch value.Kind() {
@@ -268,6 +276,25 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
}
// if the slice already contains values, set those instead of the interface slice itself.
if value.Len() > 0 {
if len(method.Outputs) > value.Len() {
return fmt.Errorf("abi: cannot marshal in to slices of unequal size (require: %v, got: %v)", len(method.Outputs), value.Len())
}
for i := 0; i < len(method.Outputs); i++ {
marshalledValue, err := toGoType(i, method.Outputs[i], output)
if err != nil {
return err
}
reflectValue := reflect.ValueOf(marshalledValue)
if err := set(value.Index(i).Elem(), reflectValue, method.Outputs[i]); err != nil {
return err
}
}
return nil
}
// create a new slice and start appending the unmarshalled
// values to the new interface slice.
z := reflect.MakeSlice(typ, 0, len(method.Outputs))
@@ -296,34 +323,6 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
return nil
}
// set attempts to assign src to dst by either setting, copying or otherwise.
//
// set is a bit more lenient when it comes to assignment and doesn't force an as
// strict ruleset as bare `reflect` does.
func set(dst, src reflect.Value, output Argument) error {
dstType := dst.Type()
srcType := src.Type()
switch {
case dstType.AssignableTo(src.Type()):
dst.Set(src)
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
if !dstType.Elem().AssignableTo(r_byte) {
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
}
if dst.Len() < output.Type.SliceSize {
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
}
reflect.Copy(dst, src)
case dstType.Kind() == reflect.Interface:
dst.Set(src)
default:
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
}
return nil
}
func (abi *ABI) UnmarshalJSON(data []byte) error {
var fields []struct {
Type string

View File

@@ -289,6 +289,37 @@ func TestSimpleMethodUnpack(t *testing.T) {
}
}
func TestUnpackSetInterfaceSlice(t *testing.T) {
var (
var1 = new(uint8)
var2 = new(uint8)
)
out := []interface{}{var1, var2}
abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint8"}, {"type":"uint8"}]}]`))
if err != nil {
t.Fatal(err)
}
marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
err = abi.Unpack(&out, "ints", marshalledReturn)
if err != nil {
t.Fatal(err)
}
if *var1 != 1 {
t.Errorf("expected var1 to be 1, got", *var1)
}
if *var2 != 2 {
t.Errorf("expected var2 to be 2, got", *var2)
}
out = []interface{}{var1}
err = abi.Unpack(&out, "ints", marshalledReturn)
expErr := "abi: cannot marshal in to slices of unequal size (require: 2, got: 1)"
if err == nil || err.Error() != expErr {
t.Error("expected err:", expErr, "Got:", err)
}
}
func TestPack(t *testing.T) {
for i, test := range []struct {
typ string

View File

@@ -194,12 +194,44 @@ var bindTests = []struct {
}
`,
},
// Tests that plain values can be properly returned and deserialized
{
`Getter`,
`
contract Getter {
function getter() constant returns (string, int, bytes32) {
return ("Hi", 1, sha3(""));
}
}
`,
`606060405260dc8060106000396000f3606060405260e060020a6000350463993a04b78114601a575b005b600060605260c0604052600260809081527f486900000000000000000000000000000000000000000000000000000000000060a05260017fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060e0829052610100819052606060c0908152600261012081905281906101409060a09080838184600060046012f1505081517fffff000000000000000000000000000000000000000000000000000000000000169091525050604051610160819003945092505050f3`,
`[{"constant":true,"inputs":[],"name":"getter","outputs":[{"name":"","type":"string"},{"name":"","type":"int256"},{"name":"","type":"bytes32"}],"type":"function"}]`,
`
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
// Deploy a tuple tester contract and execute a structured call on it
_, _, getter, err := DeployGetter(auth, sim)
if err != nil {
t.Fatalf("Failed to deploy getter contract: %v", err)
}
sim.Commit()
if str, num, _, err := getter.Getter(nil); err != nil {
t.Fatalf("Failed to call anonymous field retriever: %v", err)
} else if str != "Hi" || num.Cmp(big.NewInt(1)) != 0 {
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", str, num, "Hi", 1)
}
`,
},
// Tests that tuples can be properly returned and deserialized
{
`Tupler`,
`
contract Tupler {
function tuple() returns (string a, int b, bytes32 c) {
function tuple() constant returns (string a, int b, bytes32 c) {
return ("Hi", 1, sha3(""));
}
}
@@ -219,8 +251,10 @@ var bindTests = []struct {
}
sim.Commit()
if _, err := tupler.Tuple(nil); err != nil {
if res, err := tupler.Tuple(nil); err != nil {
t.Fatalf("Failed to call structure retriever: %v", err)
} else if res.A != "Hi" || res.B.Cmp(big.NewInt(1)) != 0 {
t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", res.A, res.B, "Hi", 1)
}
`,
},

View File

@@ -211,7 +211,7 @@ package {{.Package}}
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}} = new({{bindtype .Type}})
{{end}}
){{end}}
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}[]interface{}{
out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}&[]interface{}{
{{range $i, $_ := .Normalized.Outputs}}ret{{$i}},
{{end}}
}{{end}}{{end}}

View File

@@ -16,7 +16,10 @@
package abi
import "reflect"
import (
"fmt"
"reflect"
)
// indirect recursively dereferences the value until it either gets the value
// or finds a big.Int
@@ -62,3 +65,33 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
reflect.Copy(slice, value)
return slice
}
// set attempts to assign src to dst by either setting, copying or otherwise.
//
// set is a bit more lenient when it comes to assignment and doesn't force an as
// strict ruleset as bare `reflect` does.
func set(dst, src reflect.Value, output Argument) error {
dstType := dst.Type()
srcType := src.Type()
switch {
case dstType.AssignableTo(src.Type()):
dst.Set(src)
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
if !dstType.Elem().AssignableTo(r_byte) {
return fmt.Errorf("abi: cannot unmarshal %v in to array of elem %v", src.Type(), dstType.Elem())
}
if dst.Len() < output.Type.SliceSize {
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
}
reflect.Copy(dst, src)
case dstType.Kind() == reflect.Interface:
dst.Set(src)
case dstType.Kind() == reflect.Ptr:
return set(dst.Elem(), src, output)
default:
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
}
return nil
}

26
build/win-ci-compile.bat Normal file
View File

@@ -0,0 +1,26 @@
@echo off
if not exist .\build\win-ci-compile.bat (
echo This script must be run from the root of the repository.
exit /b
)
if not defined GOPATH (
echo GOPATH is not set.
exit /b
)
set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
set GOBIN=%cd%\build\bin
rem set gitCommit when running from a Git checkout.
set goLinkFlags=""
if exist ".git\HEAD" (
where /q git
if not errorlevel 1 (
for /f %%h in ('git rev-parse HEAD') do (
set goLinkFlags="-X main.gitCommit=%%h"
)
)
)
@echo on
go install -v -ldflags %goLinkFlags% ./...

15
build/win-ci-test.bat Normal file
View File

@@ -0,0 +1,15 @@
@echo off
if not exist .\build\win-ci-test.bat (
echo This script must be run from the root of the repository.
exit /b
)
if not defined GOPATH (
echo GOPATH is not set.
exit /b
)
set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
set GOBIN=%cd%\build\bin
@echo on
go test ./...

View File

@@ -47,11 +47,11 @@ import (
)
const (
clientIdentifier = "Geth" // Client identifier to advertise over the network
versionMajor = 1 // Major version component of the current release
versionMinor = 4 // Minor version component of the current release
versionPatch = 1 // Patch version component of the current release
versionMeta = "rc" // Version metadata to append to the version string
clientIdentifier = "Geth" // Client identifier to advertise over the network
versionMajor = 1 // Major version component of the current release
versionMinor = 4 // Minor version component of the current release
versionPatch = 4 // Patch version component of the current release
versionMeta = "stable" // Version metadata to append to the version string
versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle
)

View File

@@ -73,15 +73,13 @@ func StartNode(stack *node.Node) {
<-sigc
glog.V(logger.Info).Infoln("Got interrupt, shutting down...")
go stack.Stop()
logger.Flush()
for i := 10; i > 0; i-- {
<-sigc
if i > 1 {
glog.V(logger.Info).Infoln("Already shutting down, please be patient.")
glog.V(logger.Info).Infoln("Interrupt", i-1, "more times to induce panic.")
glog.V(logger.Info).Infof("Already shutting down, interrupt %d more times for panic.", i-1)
}
}
glog.V(logger.Error).Infof("Force quitting: this might not end so well.")
debug.Exit() // ensure trace and CPU profile data is flushed.
debug.LoudPanic("boom")
}()
}

View File

@@ -0,0 +1,54 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// +build freebsd
package utils
import "syscall"
// This file is largely identical to fdlimit_unix.go,
// but Rlimit fields have type int64 on FreeBSD so it needs
// an extra conversion.
// raiseFdLimit tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
func raiseFdLimit(max uint64) error {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
if limit.Cur > int64(max) {
limit.Cur = int64(max)
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
}
return nil
}
// getFdLimit retrieves the number of file descriptors allowed to be opened by this
// process.
func getFdLimit() (int, error) {
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return int(limit.Cur), nil
}

View File

@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// +build linux darwin
// +build linux darwin netbsd openbsd solaris
package utils

View File

@@ -292,7 +292,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
// minimum difficulty can ever be (before exponential factor)
if x.Cmp(params.MinimumDifficulty) < 0 {
x = params.MinimumDifficulty
x.Set(params.MinimumDifficulty)
}
// for the exponential factor
@@ -325,7 +325,7 @@ func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *b
diff.Sub(parentDiff, adjust)
}
if diff.Cmp(params.MinimumDifficulty) < 0 {
diff = params.MinimumDifficulty
diff.Set(params.MinimumDifficulty)
}
periodCount := new(big.Int).Add(parentNumber, common.Big1)

View File

@@ -60,8 +60,7 @@ type stateFn func() (*state.StateDB, error)
// two states over time as they are received and processed.
type TxPool struct {
config *ChainConfig
quit chan bool // Quitting channel
currentState stateFn // The state function which will allow us to do some pre checks
currentState stateFn // The state function which will allow us to do some pre checks
pendingState *state.ManagedState
gasLimit func() *big.Int // The current gas limit function callback
minGasPrice *big.Int
@@ -72,6 +71,8 @@ type TxPool struct {
pending map[common.Hash]*types.Transaction // processable transactions
queue map[common.Address]map[common.Hash]*types.Transaction
wg sync.WaitGroup // for shutdown sync
homestead bool
}
@@ -80,7 +81,6 @@ func NewTxPool(config *ChainConfig, eventMux *event.TypeMux, currentStateFn stat
config: config,
pending: make(map[common.Hash]*types.Transaction),
queue: make(map[common.Address]map[common.Hash]*types.Transaction),
quit: make(chan bool),
eventMux: eventMux,
currentState: currentStateFn,
gasLimit: gasLimitFn,
@@ -90,12 +90,15 @@ func NewTxPool(config *ChainConfig, eventMux *event.TypeMux, currentStateFn stat
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}, RemovedTransactionEvent{}),
}
pool.wg.Add(1)
go pool.eventLoop()
return pool
}
func (pool *TxPool) eventLoop() {
defer pool.wg.Done()
// Track chain events. When a chain events occurs (new chain canon block)
// we need to know the new state. The new state will help us determine
// the nonces in the managed state
@@ -155,8 +158,8 @@ func (pool *TxPool) resetState() {
}
func (pool *TxPool) Stop() {
close(pool.quit)
pool.events.Unsubscribe()
pool.wg.Wait()
glog.V(logger.Info).Infoln("Transaction pool stopped")
}

View File

@@ -1841,7 +1841,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
}
// Mutate the state if we haven't reached the tracing transaction yet
if uint64(idx) < txIndex {
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, parent.Header(), vm.Config{})
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{})
_, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil {
return nil, fmt.Errorf("mutation failed: %v", err)
@@ -1849,7 +1849,7 @@ func (s *PrivateDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogCon
continue
}
// Otherwise trace the transaction and return
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, parent.Header(), vm.Config{Debug: true, Logger: *logger})
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
ret, gas, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil {
return nil, fmt.Errorf("tracing failed: %v", err)

View File

@@ -416,6 +416,7 @@ func (s *Ethereum) Stop() error {
s.blockchain.Stop()
s.protocolManager.Stop()
s.txPool.Stop()
s.miner.Stop()
s.eventMux.Stop()
s.StopAutoDAG()

View File

@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
"github.com/rcrowley/go-metrics"
)
@@ -45,6 +46,8 @@ var (
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
hashTTL = 3 * time.Second // [eth/61] Time it takes for a hash request to time out
blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request
blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
@@ -79,6 +82,7 @@ var (
errEmptyHeaderSet = errors.New("empty header set by peer")
errPeersUnavailable = errors.New("no peers available or all tried for download")
errAlreadyInPool = errors.New("hash already in pool")
errInvalidAncestor = errors.New("retrieved ancestor is invalid")
errInvalidChain = errors.New("retrieved hash chain is invalid")
errInvalidBlock = errors.New("retrieved block is invalid")
errInvalidBody = errors.New("retrieved block body is invalid")
@@ -266,7 +270,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
case errBusy:
glog.V(logger.Detail).Infof("Synchronisation already in progress")
case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidChain:
case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidAncestor, errInvalidChain:
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
d.dropPeer(id)
@@ -353,7 +357,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
if err != nil {
return err
}
origin, err := d.findAncestor61(p)
origin, err := d.findAncestor61(p, latest)
if err != nil {
return err
}
@@ -380,7 +384,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
if err != nil {
return err
}
origin, err := d.findAncestor(p)
origin, err := d.findAncestor(p, latest)
if err != nil {
return err
}
@@ -536,11 +540,19 @@ func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
// on the correct chain, checking the top N blocks should already get us a match.
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
// the head blocks match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
func (d *Downloader) findAncestor61(p *peer, height uint64) (uint64, error) {
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
// Request out head blocks to short circuit ancestor location
head := d.headBlock().NumberU64()
// Figure out the valid ancestor range to prevent rewrite attacks
floor, ceil := int64(-1), d.headBlock().NumberU64()
if ceil >= MaxForkAncestry {
floor = int64(ceil - MaxForkAncestry)
}
// Request the topmost blocks to short circuit binary ancestor lookup
head := ceil
if head > height {
head = height
}
from := int64(head) - int64(MaxHashFetch) + 1
if from < 0 {
from = 0
@@ -600,11 +612,18 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
}
// If the head fetch already found an ancestor, return
if !common.EmptyHash(hash) {
if int64(number) <= floor {
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
return 0, errInvalidAncestor
}
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
return number, nil
}
// Ancestor not found, we need to binary search over our chain
start, end := uint64(0), head
if floor > 0 {
start = uint64(floor)
}
for start+1 < end {
// Split our chain interval in two, and request the hash to cross check
check := (start + end) / 2
@@ -660,6 +679,12 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
}
}
}
// Ensure valid ancestry and return
if int64(start) <= floor {
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
return 0, errInvalidAncestor
}
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
return start, nil
}
@@ -961,15 +986,23 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) {
// on the correct chain, checking the top N links should already get us a match.
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
// the head links match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor(p *peer) (uint64, error) {
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
// Request our head headers to short circuit ancestor location
head := d.headHeader().Number.Uint64()
// Figure out the valid ancestor range to prevent rewrite attacks
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
if d.mode == FullSync {
head = d.headBlock().NumberU64()
ceil = d.headBlock().NumberU64()
} else if d.mode == FastSync {
head = d.headFastBlock().NumberU64()
ceil = d.headFastBlock().NumberU64()
}
if ceil >= MaxForkAncestry {
floor = int64(ceil - MaxForkAncestry)
}
// Request the topmost blocks to short circuit binary ancestor lookup
head := ceil
if head > height {
head = height
}
from := int64(head) - int64(MaxHeaderFetch) + 1
if from < 0 {
@@ -1040,11 +1073,18 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
}
// If the head fetch already found an ancestor, return
if !common.EmptyHash(hash) {
if int64(number) <= floor {
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
return 0, errInvalidAncestor
}
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
return number, nil
}
// Ancestor not found, we need to binary search over our chain
start, end := uint64(0), head
if floor > 0 {
start = uint64(floor)
}
for start+1 < end {
// Split our chain interval in two, and request the hash to cross check
check := (start + end) / 2
@@ -1100,6 +1140,12 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
}
}
}
// Ensure valid ancestry and return
if int64(start) <= floor {
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
return 0, errInvalidAncestor
}
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
return start, nil
}

View File

@@ -43,8 +43,9 @@ var (
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
)
// Reduce the block cache limit, otherwise the tests will be very heavy.
// Reduce some of the parameters to make the tester faster.
func init() {
MaxForkAncestry = uint64(10000)
blockCacheLimit = 1024
}
@@ -52,11 +53,15 @@ func init() {
// the returned hash chain is ordered head->parent. In addition, every 3rd block
// contains a transaction and every 5th an uncle to allow testing correct block
// reassembly.
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts, heavy bool) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
// Generate the block chain
blocks, receipts := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
block.SetCoinbase(common.Address{seed})
// If a heavy chain is requested, delay blocks to raise difficulty
if heavy {
block.OffsetTime(-1)
}
// If the block number is multiple of 3, send a bonus transaction to the miner
if parent == genesis && i%3 == 0 {
tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
@@ -97,15 +102,19 @@ func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Recei
// makeChainFork creates two chains of length n, such that h1[:f] and
// h2[:f] are different but have a common suffix of length n-f.
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts, balanced bool) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
// Create the common suffix
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts)
hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts, false)
// Create the forks
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]])
// Create the forks, making the second heavyer if non balanced forks were requested
hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
hashes1 = append(hashes1, hashes[1:]...)
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]])
heavy := false
if !balanced {
heavy = true
}
hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]], heavy)
hashes2 = append(hashes2, hashes[1:]...)
for hash, header := range headers {
@@ -712,7 +721,7 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
tester := newTester()
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
@@ -736,7 +745,7 @@ func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
// Create a long block chain to download and the tester
targetBlocks := 8 * blockCacheLimit
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
tester := newTester()
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
@@ -810,20 +819,20 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
// Tests that simple synchronization against a forked chain works correctly. In
// this test common ancestor lookup should *not* be short circuited, and a full
// binary search should be executed.
func TestForkedSynchronisation61(t *testing.T) { testForkedSynchronisation(t, 61, FullSync) }
func TestForkedSynchronisation62(t *testing.T) { testForkedSynchronisation(t, 62, FullSync) }
func TestForkedSynchronisation63Full(t *testing.T) { testForkedSynchronisation(t, 63, FullSync) }
func TestForkedSynchronisation63Fast(t *testing.T) { testForkedSynchronisation(t, 63, FastSync) }
func TestForkedSynchronisation64Full(t *testing.T) { testForkedSynchronisation(t, 64, FullSync) }
func TestForkedSynchronisation64Fast(t *testing.T) { testForkedSynchronisation(t, 64, FastSync) }
func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(t, 64, LightSync) }
func TestForkedSync61(t *testing.T) { testForkedSync(t, 61, FullSync) }
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a long enough forked chain
common, fork := MaxHashFetch, 2*MaxHashFetch
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
tester := newTester()
tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
@@ -842,6 +851,40 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1})
}
// Tests that synchronising against a much shorter but much heavyer fork works
// corrently and is not dropped.
func TestHeavyForkedSync61(t *testing.T) { testHeavyForkedSync(t, 61, FullSync) }
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a long enough forked chain
common, fork := MaxHashFetch, 4*MaxHashFetch
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
tester := newTester()
tester.newPeer("light", protocol, hashesA, headersA, blocksA, receiptsA)
tester.newPeer("heavy", protocol, hashesB[fork/2:], headersB, blocksB, receiptsB)
// Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("light", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
assertOwnChain(t, tester, common+fork+1)
// Synchronise with the second peer and make sure that fork is pulled too
if err := tester.sync("heavy", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
}
// Tests that an inactive downloader will not accept incoming hashes and blocks.
func TestInactiveDownloader61(t *testing.T) {
t.Parallel()
@@ -856,6 +899,74 @@ func TestInactiveDownloader61(t *testing.T) {
}
}
// Tests that chain forks are contained within a certain interval of the current
// chain head, ensuring that malicious peers cannot waste resources by feeding
// long dead chains.
func TestBoundedForkedSync61(t *testing.T) { testBoundedForkedSync(t, 61, FullSync) }
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a long enough forked chain
common, fork := 13, int(MaxForkAncestry+17)
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
tester := newTester()
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
tester.newPeer("rewriter", protocol, hashesB, headersB, blocksB, receiptsB)
// Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("original", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
assertOwnChain(t, tester, common+fork+1)
// Synchronise with the second peer and ensure that the fork is rejected to being too old
if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
}
}
// Tests that chain forks are contained within a certain interval of the current
// chain head for short but heavy forks too. These are a bit special because they
// take different ancestor lookup paths.
func TestBoundedHeavyForkedSync61(t *testing.T) { testBoundedHeavyForkedSync(t, 61, FullSync) }
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
// Create a long enough forked chain
common, fork := 13, int(MaxForkAncestry+17)
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, false)
tester := newTester()
tester.newPeer("original", protocol, hashesA, headersA, blocksA, receiptsA)
tester.newPeer("heavy-rewriter", protocol, hashesB[MaxForkAncestry-17:], headersB, blocksB, receiptsB) // Root the fork below the ancestor limit
// Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("original", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
assertOwnChain(t, tester, common+fork+1)
// Synchronise with the second peer and ensure that the fork is rejected to being too old
if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
}
}
// Tests that an inactive downloader will not accept incoming block headers and
// bodies.
func TestInactiveDownloader62(t *testing.T) {
@@ -909,7 +1020,7 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
if targetBlocks >= MaxHeaderFetch {
targetBlocks = MaxHeaderFetch - 15
}
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
tester := newTester()
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
@@ -944,7 +1055,7 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Create various peers with various parts of the chain
targetPeers := 8
targetBlocks := targetPeers*blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
tester := newTester()
for i := 0; i < targetPeers; i++ {
@@ -972,7 +1083,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
// Create peers of every type
tester := newTester()
@@ -1010,7 +1121,7 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
// Create a block chain to download
targetBlocks := 2*blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
tester := newTester()
tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
@@ -1063,7 +1174,7 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
tester := newTester()
@@ -1095,7 +1206,7 @@ func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 6
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
tester := newTester()
@@ -1126,7 +1237,7 @@ func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download
targetBlocks := 3*fsHeaderSafetyNet + fsMinFullBlocks
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
tester := newTester()
@@ -1217,7 +1328,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil, false)
tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
@@ -1247,6 +1358,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
@@ -1294,7 +1406,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
// Set a sync init hook to catch progress changes
starting := make(chan struct{})
@@ -1366,7 +1478,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Create a forked chain to simulate origin revertal
common, fork := MaxHashFetch, 2*MaxHashFetch
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil, true)
// Set a sync init hook to catch progress changes
starting := make(chan struct{})
@@ -1441,7 +1553,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil, false)
// Set a sync init hook to catch progress changes
starting := make(chan struct{})
@@ -1517,7 +1629,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Create a small block chain
targetBlocks := blockCacheLimit - 15
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil, false)
// Set a sync init hook to catch progress changes
starting := make(chan struct{})
@@ -1590,7 +1702,7 @@ func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64,
func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil)
hashes, headers, blocks, receipts := makeChain(5, 0, genesis, nil, false)
fakeHeads := []*types.Header{{}, {}, {}, {}}
for i := 0; i < 200; i++ {
tester := newTester()

View File

@@ -22,6 +22,7 @@ import (
"math"
"math/big"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -58,7 +59,7 @@ type blockFetcherFn func([]common.Hash) error
type ProtocolManager struct {
networkId int
fastSync bool
fastSync uint32
txpool txPool
blockchain *core.BlockChain
chaindb ethdb.Database
@@ -74,36 +75,39 @@ type ProtocolManager struct {
minedBlockSub event.Subscription
// channels for fetcher, syncer, txsyncLoop
newPeerCh chan *peer
txsyncCh chan *txsync
quitSync chan struct{}
newPeerCh chan *peer
txsyncCh chan *txsync
quitSync chan struct{}
noMorePeers chan struct{}
// wait group is used for graceful shutdowns during downloading
// and processing
wg sync.WaitGroup
quit bool
wg sync.WaitGroup
}
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkId: networkId,
eventMux: mux,
txpool: txpool,
blockchain: blockchain,
chaindb: chaindb,
peers: newPeerSet(),
newPeerCh: make(chan *peer),
noMorePeers: make(chan struct{}),
txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}),
}
// Figure out whether to allow fast sync or not
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
fastSync = false
}
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkId: networkId,
fastSync: fastSync,
eventMux: mux,
txpool: txpool,
blockchain: blockchain,
chaindb: chaindb,
peers: newPeerSet(),
newPeerCh: make(chan *peer, 1),
txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}),
if fastSync {
manager.fastSync = uint32(1)
}
// Initiate a sub-protocol for every implemented version we can handle
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
@@ -120,8 +124,14 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
Length: ProtocolLengths[i],
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(int(version), p, rw)
manager.newPeerCh <- peer
return manager.handle(peer)
select {
case manager.newPeerCh <- peer:
manager.wg.Add(1)
defer manager.wg.Done()
return manager.handle(peer)
case <-manager.quitSync:
return p2p.DiscQuitting
}
},
NodeInfo: func() interface{} {
return manager.NodeInfo()
@@ -187,16 +197,25 @@ func (pm *ProtocolManager) Start() {
}
func (pm *ProtocolManager) Stop() {
// Showing a log message. During download / process this could actually
// take between 5 to 10 seconds and therefor feedback is required.
glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")
pm.quit = true
pm.txSub.Unsubscribe() // quits txBroadcastLoop
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
close(pm.quitSync) // quits syncer, fetcher, txsyncLoop
// Wait for any process action
// Quit the sync loop.
// After this send has completed, no new peers will be accepted.
pm.noMorePeers <- struct{}{}
// Quit fetcher, txsyncLoop.
close(pm.quitSync)
// Disconnect existing sessions.
// This also closes the gate for any new registrations on the peer set.
// sessions which are already established but not added to pm.peers yet
// will exit when they try to register.
pm.peers.Close()
// Wait for all peer handler goroutines and the loops to come down.
pm.wg.Wait()
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
@@ -662,7 +681,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
case msg.Code == TxMsg:
// Transactions arrived, parse all of them and deliver to the pool
// Transactions arrived, make sure we have a valid chain to handle them
if atomic.LoadUint32(&pm.fastSync) == 1 {
break
}
// Transactions can be processed, parse all of them and deliver to the pool
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)

View File

@@ -140,14 +140,14 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
// Start the peer on a new thread
errc := make(chan error, 1)
go func() {
pm.newPeerCh <- peer
errc <- pm.handle(peer)
select {
case pm.newPeerCh <- peer:
errc <- pm.handle(peer)
case <-pm.quitSync:
errc <- p2p.DiscQuitting
}
}()
tp := &testPeer{
app: app,
net: net,
peer: peer,
}
tp := &testPeer{app: app, net: net, peer: peer}
// Execute any implicitly requested handshakes and return
if shake {
td, head, genesis := pm.blockchain.Status()

View File

@@ -34,6 +34,7 @@ import (
)
var (
errClosed = errors.New("peer set is closed")
errAlreadyRegistered = errors.New("peer is already registered")
errNotRegistered = errors.New("peer is not registered")
)
@@ -351,8 +352,9 @@ func (p *peer) String() string {
// peerSet represents the collection of active peers currently participating in
// the Ethereum sub-protocol.
type peerSet struct {
peers map[string]*peer
lock sync.RWMutex
peers map[string]*peer
lock sync.RWMutex
closed bool
}
// newPeerSet creates a new peer set to track the active participants.
@@ -368,6 +370,9 @@ func (ps *peerSet) Register(p *peer) error {
ps.lock.Lock()
defer ps.lock.Unlock()
if ps.closed {
return errClosed
}
if _, ok := ps.peers[p.id]; ok {
return errAlreadyRegistered
}
@@ -450,3 +455,15 @@ func (ps *peerSet) BestPeer() *peer {
}
return bestPeer
}
// Close disconnects all peers.
// No new peers can be registered after Close has returned.
func (ps *peerSet) Close() {
ps.lock.Lock()
defer ps.lock.Unlock()
for _, p := range ps.peers {
p.Disconnect(p2p.DiscQuitting)
}
ps.closed = true
}

View File

@@ -18,6 +18,7 @@ package eth
import (
"math/rand"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -148,7 +149,7 @@ func (pm *ProtocolManager) syncer() {
// Force a sync even if not enough peers are present
go pm.synchronise(pm.peers.BestPeer())
case <-pm.quitSync:
case <-pm.noMorePeers:
return
}
}
@@ -167,18 +168,18 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
}
// Otherwise try to sync with the downloader
mode := downloader.FullSync
if pm.fastSync {
if atomic.LoadUint32(&pm.fastSync) == 1 {
mode = downloader.FastSync
}
if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil {
return
}
// If fast sync was enabled, and we synced up, disable it
if pm.fastSync {
if atomic.LoadUint32(&pm.fastSync) == 1 {
// Disable fast sync if we indeed have something in our chain
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
pm.fastSync = false
atomic.StoreUint32(&pm.fastSync, 0)
}
}
}

View File

@@ -17,6 +17,7 @@
package eth
import (
"sync/atomic"
"testing"
"time"
@@ -29,12 +30,12 @@ import (
func TestFastSyncDisabling(t *testing.T) {
// Create a pristine protocol manager, check that fast sync is left enabled
pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil)
if !pmEmpty.fastSync {
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
t.Fatalf("fast sync disabled on pristine blockchain")
}
// Create a full protocol manager, check that fast sync gets disabled
pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil)
if pmFull.fastSync {
if atomic.LoadUint32(&pmFull.fastSync) == 1 {
t.Fatalf("fast sync not disabled on non-empty blockchain")
}
// Sync up the two peers
@@ -47,7 +48,7 @@ func TestFastSyncDisabling(t *testing.T) {
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
// Check that fast sync was disabled
if pmEmpty.fastSync {
if atomic.LoadUint32(&pmEmpty.fastSync) == 1 {
t.Fatalf("fast sync not disabled after successful synchronisation")
}
}

View File

@@ -66,6 +66,9 @@ func (mux *TypeMux) Subscribe(types ...interface{}) Subscription {
mux.mutex.Lock()
defer mux.mutex.Unlock()
if mux.stopped {
// set the status to closed so that calling Unsubscribe after this
// call will short curuit
sub.closed = true
close(sub.postC)
} else {
if mux.subm == nil {

View File

@@ -25,6 +25,14 @@ import (
type testEvent int
func TestSubCloseUnsub(t *testing.T) {
// the point of this test is **not** to panic
var mux TypeMux
mux.Stop()
sub := mux.Subscribe(int(0))
sub.Unsubscribe()
}
func TestSub(t *testing.T) {
mux := new(TypeMux)
defer mux.Stop()

View File

@@ -51,7 +51,7 @@ type HandlerT struct {
traceFile string
}
// Verbosity sets the glog verbosity floor.
// Verbosity sets the glog verbosity ceiling.
// The verbosity of individual packages and source files
// can be raised using Vmodule.
func (*HandlerT) Verbosity(level int) {
@@ -131,14 +131,14 @@ func (h *HandlerT) StopCPUProfile() error {
return nil
}
// Trace turns on tracing for nsec seconds and writes
// GoTrace turns on tracing for nsec seconds and writes
// trace data to file.
func (h *HandlerT) Trace(file string, nsec uint) error {
if err := h.StartTrace(file); err != nil {
func (h *HandlerT) GoTrace(file string, nsec uint) error {
if err := h.StartGoTrace(file); err != nil {
return err
}
time.Sleep(time.Duration(nsec) * time.Second)
h.StopTrace()
h.StopGoTrace()
return nil
}

View File

@@ -89,7 +89,7 @@ func Setup(ctx *cli.Context) error {
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name))
if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" {
if err := Handler.StartTrace(traceFile); err != nil {
if err := Handler.StartGoTrace(traceFile); err != nil {
return err
}
}
@@ -114,5 +114,5 @@ func Setup(ctx *cli.Context) error {
// respective file.
func Exit() {
Handler.StopCPUProfile()
Handler.StopTrace()
Handler.StopGoTrace()
}

View File

@@ -27,8 +27,8 @@ import (
"github.com/ethereum/go-ethereum/logger/glog"
)
// StartTrace turns on tracing, writing to the given file.
func (h *HandlerT) StartTrace(file string) error {
// StartGoTrace turns on tracing, writing to the given file.
func (h *HandlerT) StartGoTrace(file string) error {
h.mu.Lock()
defer h.mu.Unlock()
if h.traceW != nil {
@@ -49,7 +49,7 @@ func (h *HandlerT) StartTrace(file string) error {
}
// StopTrace stops an ongoing trace.
func (h *HandlerT) StopTrace() error {
func (h *HandlerT) StopGoTrace() error {
h.mu.Lock()
defer h.mu.Unlock()
trace.Stop()

View File

@@ -22,10 +22,10 @@ package debug
import "errors"
func (*HandlerT) StartTrace(string) error {
func (*HandlerT) StartGoTrace(string) error {
return errors.New("tracing is not supported on Go < 1.5")
}
func (*HandlerT) StopTrace() error {
func (*HandlerT) StopGoTrace() error {
return errors.New("tracing is not supported on Go < 1.5")
}

View File

@@ -366,18 +366,18 @@ web3._extend({
params: 0
}),
new web3._extend.Method({
name: 'trace',
call: 'debug_trace',
name: 'goTrace',
call: 'debug_goTrace',
params: 2
}),
new web3._extend.Method({
name: 'startTrace',
call: 'debug_startTrace',
name: 'startGoTrace',
call: 'debug_startGoTrace',
params: 1
}),
new web3._extend.Method({
name: 'stopTrace',
call: 'debug_stopTrace',
name: 'stopGoTrace',
call: 'debug_stopGoTrace',
params: 0
}),
new web3._extend.Method({

View File

@@ -3911,7 +3911,12 @@ var outputSyncingFormatter = function(result) {
result.startingBlock = utils.toDecimal(result.startingBlock);
result.currentBlock = utils.toDecimal(result.currentBlock);
result.highestBlock = utils.toDecimal(result.highestBlock);
if (result.knownStates !== undefined) {
result.knownStates = utils.toDecimal(result.knownStates);
}
if (result.pulledStates !== undefined) {
result.pulledStates = utils.toDecimal(result.pulledStates);
}
return result;
};

View File

@@ -94,10 +94,13 @@ type worker struct {
mu sync.Mutex
// update loop
mux *event.TypeMux
events event.Subscription
wg sync.WaitGroup
agents map[Agent]struct{}
recv chan *Result
mux *event.TypeMux
quit chan struct{}
pow pow.PoW
eth core.Backend
@@ -138,13 +141,13 @@ func newWorker(config *core.ChainConfig, coinbase common.Address, eth core.Backe
possibleUncles: make(map[common.Hash]*types.Block),
coinbase: coinbase,
txQueue: make(map[common.Hash]*types.Transaction),
quit: make(chan struct{}),
agents: make(map[Agent]struct{}),
fullValidation: false,
}
worker.events = worker.mux.Subscribe(core.ChainHeadEvent{}, core.ChainSideEvent{}, core.TxPreEvent{})
go worker.update()
go worker.wait()
go worker.wait()
worker.commitNewWork()
return worker
@@ -184,9 +187,10 @@ func (self *worker) start() {
}
func (self *worker) stop() {
self.wg.Wait()
self.mu.Lock()
defer self.mu.Unlock()
if atomic.LoadInt32(&self.mining) == 1 {
// Stop all agents.
for agent := range self.agents {
@@ -217,36 +221,22 @@ func (self *worker) unregister(agent Agent) {
}
func (self *worker) update() {
eventSub := self.mux.Subscribe(core.ChainHeadEvent{}, core.ChainSideEvent{}, core.TxPreEvent{})
defer eventSub.Unsubscribe()
eventCh := eventSub.Chan()
for {
select {
case event, ok := <-eventCh:
if !ok {
// Event subscription closed, set the channel to nil to stop spinning
eventCh = nil
continue
for event := range self.events.Chan() {
// A real event arrived, process interesting content
switch ev := event.Data.(type) {
case core.ChainHeadEvent:
self.commitNewWork()
case core.ChainSideEvent:
self.uncleMu.Lock()
self.possibleUncles[ev.Block.Hash()] = ev.Block
self.uncleMu.Unlock()
case core.TxPreEvent:
// Apply transaction to the pending state if we're not mining
if atomic.LoadInt32(&self.mining) == 0 {
self.currentMu.Lock()
self.current.commitTransactions(self.mux, types.Transactions{ev.Tx}, self.gasPrice, self.chain)
self.currentMu.Unlock()
}
// A real event arrived, process interesting content
switch ev := event.Data.(type) {
case core.ChainHeadEvent:
self.commitNewWork()
case core.ChainSideEvent:
self.uncleMu.Lock()
self.possibleUncles[ev.Block.Hash()] = ev.Block
self.uncleMu.Unlock()
case core.TxPreEvent:
// Apply transaction to the pending state if we're not mining
if atomic.LoadInt32(&self.mining) == 0 {
self.currentMu.Lock()
self.current.commitTransactions(self.mux, types.Transactions{ev.Tx}, self.gasPrice, self.chain)
self.currentMu.Unlock()
}
}
case <-self.quit:
return
}
}
}

View File

@@ -68,7 +68,11 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *rpc.HexNumber, cors *st
}
if host == nil {
host = &api.node.httpHost
h := common.DefaultHTTPHost
if api.node.httpHost != "" {
h = api.node.httpHost
}
host = &h
}
if port == nil {
port = rpc.NewHexNumber(api.node.httpPort)
@@ -113,7 +117,11 @@ func (api *PrivateAdminAPI) StartWS(host *string, port *rpc.HexNumber, allowedOr
}
if host == nil {
host = &api.node.wsHost
h := common.DefaultWSHost
if api.node.wsHost != "" {
h = api.node.wsHost
}
host = &h
}
if port == nil {
port = rpc.NewHexNumber(api.node.wsPort)

View File

@@ -61,22 +61,22 @@ func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http
allowAllOrigins = true
}
if origin != "" {
origins.Add(origin)
origins.Add(strings.ToLower(origin))
}
}
// allow localhost if no allowedOrigins are specified
// allow localhost if no allowedOrigins are specified.
if len(origins.List()) == 0 {
origins.Add("http://localhost")
if hostname, err := os.Hostname(); err == nil {
origins.Add("http://" + hostname)
origins.Add("http://" + strings.ToLower(hostname))
}
}
glog.V(logger.Debug).Infof("Allowed origin(s) for WS RPC interface %v\n", origins.List())
f := func(cfg *websocket.Config, req *http.Request) error {
origin := req.Header.Get("Origin")
origin := strings.ToLower(req.Header.Get("Origin"))
if allowAllOrigins || origins.Has(origin) {
return nil
}