eth: fix megacheck warnings

This commit is contained in:
Egon Elbre 2017-08-06 08:54:25 +03:00
parent 971079822e
commit 8f06b7980d
7 changed files with 17 additions and 40 deletions

@ -465,26 +465,6 @@ func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConf
return true, structLogger.StructLogs(), nil return true, structLogger.StructLogs(), nil
} }
// callmsg is the message type used for call transitions.
type callmsg struct {
addr common.Address
to *common.Address
gas, gasPrice *big.Int
value *big.Int
data []byte
}
// accessor boilerplate to implement core.Message
func (m callmsg) From() (common.Address, error) { return m.addr, nil }
func (m callmsg) FromFrontier() (common.Address, error) { return m.addr, nil }
func (m callmsg) Nonce() uint64 { return 0 }
func (m callmsg) CheckNonce() bool { return false }
func (m callmsg) To() *common.Address { return m.to }
func (m callmsg) GasPrice() *big.Int { return m.gasPrice }
func (m callmsg) Gas() *big.Int { return m.gas }
func (m callmsg) Value() *big.Int { return m.value }
func (m callmsg) Data() []byte { return m.data }
// formatError formats a Go error into either an empty string or the data content // formatError formats a Go error into either an empty string or the data content
// of the error itself. // of the error itself.
func formatError(err error) string { func formatError(err error) string {

@ -403,8 +403,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
dl.lock.Lock() dl.lock.Lock()
defer dl.lock.Unlock() defer dl.lock.Unlock()
var err error var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl, id, delay})
err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl, id, delay})
if err == nil { if err == nil {
// Assign the owned hashes, headers and blocks to the peer (deep copy) // Assign the owned hashes, headers and blocks to the peer (deep copy)
dl.peerHashes[id] = make([]common.Hash, len(hashes)) dl.peerHashes[id] = make([]common.Hash, len(hashes))
@ -1381,7 +1380,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() { go func() {
defer pending.Done() defer pending.Done()
if err := tester.sync("peer-half", nil, mode); err != nil { if err := tester.sync("peer-half", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
} }
}() }()
<-starting <-starting
@ -1398,7 +1397,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() { go func() {
defer pending.Done() defer pending.Done()
if err := tester.sync("peer-full", nil, mode); err != nil { if err := tester.sync("peer-full", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
} }
}() }()
<-starting <-starting
@ -1454,7 +1453,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() { go func() {
defer pending.Done() defer pending.Done()
if err := tester.sync("fork A", nil, mode); err != nil { if err := tester.sync("fork A", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
} }
}() }()
<-starting <-starting
@ -1474,7 +1473,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() { go func() {
defer pending.Done() defer pending.Done()
if err := tester.sync("fork B", nil, mode); err != nil { if err := tester.sync("fork B", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
} }
}() }()
<-starting <-starting
@ -1535,7 +1534,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() { go func() {
defer pending.Done() defer pending.Done()
if err := tester.sync("faulty", nil, mode); err == nil { if err := tester.sync("faulty", nil, mode); err == nil {
t.Fatalf("succeeded faulty synchronisation") panic("succeeded faulty synchronisation")
} }
}() }()
<-starting <-starting
@ -1552,7 +1551,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() { go func() {
defer pending.Done() defer pending.Done()
if err := tester.sync("valid", nil, mode); err != nil { if err := tester.sync("valid", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
} }
}() }()
<-starting <-starting
@ -1613,7 +1612,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() { go func() {
defer pending.Done() defer pending.Done()
if err := tester.sync("attack", nil, mode); err == nil { if err := tester.sync("attack", nil, mode); err == nil {
t.Fatalf("succeeded attacker synchronisation") panic("succeeded attacker synchronisation")
} }
}() }()
<-starting <-starting
@ -1630,7 +1629,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() { go func() {
defer pending.Done() defer pending.Done()
if err := tester.sync("valid", nil, mode); err != nil { if err := tester.sync("valid", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
} }
}() }()
<-starting <-starting

@ -54,7 +54,6 @@ type PublicFilterAPI struct {
backend Backend backend Backend
useMipMap bool useMipMap bool
mux *event.TypeMux mux *event.TypeMux
quit chan struct{}
chainDb ethdb.Database chainDb ethdb.Database
events *EventSystem events *EventSystem
filtersMu sync.Mutex filtersMu sync.Mutex

@ -20,7 +20,6 @@ import (
"context" "context"
"math" "math"
"math/big" "math/big"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -42,8 +41,6 @@ type Filter struct {
backend Backend backend Backend
useMipMap bool useMipMap bool
created time.Time
db ethdb.Database db ethdb.Database
begin, end int64 begin, end int64
addresses []common.Address addresses []common.Address

@ -74,7 +74,6 @@ type subscription struct {
// subscription which match the subscription criteria. // subscription which match the subscription criteria.
type EventSystem struct { type EventSystem struct {
mux *event.TypeMux mux *event.TypeMux
sub *event.TypeMuxSubscription
backend Backend backend Backend
lightMode bool lightMode bool
lastHead *types.Header lastHead *types.Header

@ -18,6 +18,7 @@ package filters
import ( import (
"context" "context"
"fmt"
"math/big" "math/big"
"reflect" "reflect"
"testing" "testing"
@ -439,15 +440,15 @@ func TestPendingLogsSubscription(t *testing.T) {
} }
if len(fetched) != len(tt.expected) { if len(fetched) != len(tt.expected) {
t.Fatalf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
} }
for l := range fetched { for l := range fetched {
if fetched[l].Removed { if fetched[l].Removed {
t.Errorf("expected log not to be removed for log %d in case %d", l, i) panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
} }
if !reflect.DeepEqual(fetched[l], tt.expected[l]) { if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
t.Errorf("invalid log on index %d for case %d", l, i) panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
} }
} }
}() }()

@ -138,7 +138,9 @@ func (pm *ProtocolManager) syncer() {
defer pm.downloader.Terminate() defer pm.downloader.Terminate()
// Wait for different events to fire synchronisation operations // Wait for different events to fire synchronisation operations
forceSync := time.Tick(forceSyncCycle) forceSync := time.NewTicker(forceSyncCycle)
defer forceSync.Stop()
for { for {
select { select {
case <-pm.newPeerCh: case <-pm.newPeerCh:
@ -148,7 +150,7 @@ func (pm *ProtocolManager) syncer() {
} }
go pm.synchronise(pm.peers.BestPeer()) go pm.synchronise(pm.peers.BestPeer())
case <-forceSync: case <-forceSync.C:
// Force a sync even if not enough peers are present // Force a sync even if not enough peers are present
go pm.synchronise(pm.peers.BestPeer()) go pm.synchronise(pm.peers.BestPeer())