2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2015-06-16 14:02:43 +03:00
|
|
|
package fetcher
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"math/big"
|
2015-06-17 17:08:32 +03:00
|
|
|
"sync"
|
2015-06-16 14:02:43 +03:00
|
|
|
"sync/atomic"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2015-06-27 03:57:53 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2015-06-16 14:02:43 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-08-14 21:25:41 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2015-06-27 03:57:53 +03:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2015-08-03 03:46:34 +03:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2015-06-16 14:02:43 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2015-06-27 03:57:53 +03:00
|
|
|
testdb, _ = ethdb.NewMemDatabase()
|
2015-08-14 21:25:41 +03:00
|
|
|
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
|
|
|
|
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
|
2015-08-03 03:46:34 +03:00
|
|
|
unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil)
|
2015-06-16 14:02:43 +03:00
|
|
|
)
|
|
|
|
|
2015-06-27 03:57:53 +03:00
|
|
|
// makeChain creates a chain of n blocks starting at and including parent.
|
2015-08-14 21:25:41 +03:00
|
|
|
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
|
|
|
// contains a transaction and every 5th an uncle to allow testing correct block
|
|
|
|
// reassembly.
|
2015-06-27 03:57:53 +03:00
|
|
|
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
|
2016-10-20 14:36:29 +03:00
|
|
|
blocks, _ := core.GenerateChain(params.TestChainConfig, parent, testdb, n, func(i int, block *core.BlockGen) {
|
2015-08-14 21:25:41 +03:00
|
|
|
block.SetCoinbase(common.Address{seed})
|
|
|
|
|
|
|
|
// If the block number is multiple of 3, send a bonus transaction to the miner
|
|
|
|
if parent == genesis && i%3 == 0 {
|
|
|
|
tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testKey)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
// If the block number is a multiple of 5, add a bonus uncle to the block
|
|
|
|
if i%5 == 0 {
|
|
|
|
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
|
|
|
|
}
|
2015-06-27 03:57:53 +03:00
|
|
|
})
|
|
|
|
hashes := make([]common.Hash, n+1)
|
|
|
|
hashes[len(hashes)-1] = parent.Hash()
|
|
|
|
blockm := make(map[common.Hash]*types.Block, n+1)
|
|
|
|
blockm[parent.Hash()] = parent
|
|
|
|
for i, b := range blocks {
|
|
|
|
hashes[len(hashes)-i-2] = b.Hash()
|
|
|
|
blockm[b.Hash()] = b
|
|
|
|
}
|
|
|
|
return hashes, blockm
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// fetcherTester is a test simulator for mocking out local block chain.
|
|
|
|
type fetcherTester struct {
|
|
|
|
fetcher *Fetcher
|
|
|
|
|
2015-06-17 17:08:32 +03:00
|
|
|
hashes []common.Hash // Hash chain belonging to the tester
|
|
|
|
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
2015-08-14 21:25:41 +03:00
|
|
|
drops map[string]bool // Map of peers dropped by the fetcher
|
2015-06-17 17:08:32 +03:00
|
|
|
|
|
|
|
lock sync.RWMutex
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// newTester creates a new fetcher test mocker.
|
|
|
|
func newTester() *fetcherTester {
|
|
|
|
tester := &fetcherTester{
|
2015-06-27 03:57:53 +03:00
|
|
|
hashes: []common.Hash{genesis.Hash()},
|
|
|
|
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
2015-08-14 21:25:41 +03:00
|
|
|
drops: make(map[string]bool),
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
2015-06-18 18:00:19 +03:00
|
|
|
tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
2015-06-16 14:02:43 +03:00
|
|
|
tester.fetcher.Start()
|
|
|
|
|
|
|
|
return tester
|
|
|
|
}
|
|
|
|
|
2015-06-18 18:00:19 +03:00
|
|
|
// getBlock retrieves a block from the tester's block chain.
|
|
|
|
func (f *fetcherTester) getBlock(hash common.Hash) *types.Block {
|
2015-06-17 17:08:32 +03:00
|
|
|
f.lock.RLock()
|
|
|
|
defer f.lock.RUnlock()
|
|
|
|
|
2015-06-18 18:00:19 +03:00
|
|
|
return f.blocks[hash]
|
|
|
|
}
|
|
|
|
|
|
|
|
// verifyBlock is a nop placeholder for the block header verification.
|
|
|
|
func (f *fetcherTester) verifyBlock(block *types.Block, parent *types.Block) error {
|
|
|
|
return nil
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
|
|
|
|
2015-06-17 18:25:23 +03:00
|
|
|
// broadcastBlock is a nop placeholder for the block broadcasting.
|
2015-06-18 18:00:19 +03:00
|
|
|
func (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) {
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
|
|
|
|
2015-06-16 17:39:04 +03:00
|
|
|
// chainHeight retrieves the current height (block number) of the chain.
|
|
|
|
func (f *fetcherTester) chainHeight() uint64 {
|
2015-06-17 17:08:32 +03:00
|
|
|
f.lock.RLock()
|
|
|
|
defer f.lock.RUnlock()
|
|
|
|
|
|
|
|
return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64()
|
2015-06-16 17:39:04 +03:00
|
|
|
}
|
|
|
|
|
2015-06-17 18:25:23 +03:00
|
|
|
// insertChain injects a new blocks into the simulated chain.
|
|
|
|
func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
|
|
|
|
f.lock.Lock()
|
|
|
|
defer f.lock.Unlock()
|
|
|
|
|
|
|
|
for i, block := range blocks {
|
|
|
|
// Make sure the parent in known
|
|
|
|
if _, ok := f.blocks[block.ParentHash()]; !ok {
|
|
|
|
return i, errors.New("unknown parent")
|
|
|
|
}
|
|
|
|
// Discard any new blocks if the same height already exists
|
|
|
|
if block.NumberU64() <= f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() {
|
|
|
|
return i, nil
|
|
|
|
}
|
|
|
|
// Otherwise build our current chain
|
|
|
|
f.hashes = append(f.hashes, block.Hash())
|
|
|
|
f.blocks[block.Hash()] = block
|
|
|
|
}
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
// dropPeer is an emulator for the peer removal, simply accumulating the various
|
|
|
|
// peers dropped by the fetcher.
|
2015-06-17 18:25:23 +03:00
|
|
|
func (f *fetcherTester) dropPeer(peer string) {
|
2015-10-13 12:04:25 +03:00
|
|
|
f.lock.Lock()
|
|
|
|
defer f.lock.Unlock()
|
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
f.drops[peer] = true
|
2015-06-17 18:25:23 +03:00
|
|
|
}
|
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.
|
|
|
|
func (f *fetcherTester) makeHeaderFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn {
|
|
|
|
closure := make(map[common.Hash]*types.Block)
|
|
|
|
for hash, block := range blocks {
|
|
|
|
closure[hash] = block
|
|
|
|
}
|
|
|
|
// Create a function that return a header from the closure
|
|
|
|
return func(hash common.Hash) error {
|
|
|
|
// Gather the blocks to return
|
|
|
|
headers := make([]*types.Header, 0, 1)
|
|
|
|
if block, ok := closure[hash]; ok {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
|
|
|
// Return on a new thread
|
|
|
|
go f.fetcher.FilterHeaders(headers, time.Now().Add(drift))
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer.
|
|
|
|
func (f *fetcherTester) makeBodyFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn {
|
|
|
|
closure := make(map[common.Hash]*types.Block)
|
|
|
|
for hash, block := range blocks {
|
|
|
|
closure[hash] = block
|
|
|
|
}
|
|
|
|
// Create a function that returns blocks from the closure
|
|
|
|
return func(hashes []common.Hash) error {
|
|
|
|
// Gather the block bodies to return
|
|
|
|
transactions := make([][]*types.Transaction, 0, len(hashes))
|
|
|
|
uncles := make([][]*types.Header, 0, len(hashes))
|
|
|
|
|
|
|
|
for _, hash := range hashes {
|
|
|
|
if block, ok := closure[hash]; ok {
|
|
|
|
transactions = append(transactions, block.Transactions())
|
|
|
|
uncles = append(uncles, block.Uncles())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Return on a new thread
|
|
|
|
go f.fetcher.FilterBodies(transactions, uncles, time.Now().Add(drift))
|
2015-06-16 14:02:43 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
// verifyFetchingEvent verifies that one single event arrive on an fetching channel.
|
|
|
|
func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) {
|
|
|
|
if arrive {
|
|
|
|
select {
|
|
|
|
case <-fetching:
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatalf("fetching timeout")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
select {
|
|
|
|
case <-fetching:
|
|
|
|
t.Fatalf("fetching invoked")
|
|
|
|
case <-time.After(10 * time.Millisecond):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// verifyCompletingEvent verifies that one single event arrive on an completing channel.
|
|
|
|
func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) {
|
|
|
|
if arrive {
|
|
|
|
select {
|
|
|
|
case <-completing:
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatalf("completing timeout")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
select {
|
|
|
|
case <-completing:
|
|
|
|
t.Fatalf("completing invoked")
|
|
|
|
case <-time.After(10 * time.Millisecond):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-22 18:28:38 +03:00
|
|
|
// verifyImportEvent verifies that one single event arrive on an import channel.
|
2015-08-14 21:25:41 +03:00
|
|
|
func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) {
|
|
|
|
if arrive {
|
|
|
|
select {
|
|
|
|
case <-imported:
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatalf("import timeout")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
select {
|
|
|
|
case <-imported:
|
|
|
|
t.Fatalf("import invoked")
|
|
|
|
case <-time.After(10 * time.Millisecond):
|
|
|
|
}
|
2015-06-22 18:28:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// verifyImportCount verifies that exactly count number of events arrive on an
|
|
|
|
// import hook channel.
|
|
|
|
func verifyImportCount(t *testing.T, imported chan *types.Block, count int) {
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
select {
|
|
|
|
case <-imported:
|
|
|
|
case <-time.After(time.Second):
|
2015-08-14 21:25:41 +03:00
|
|
|
t.Fatalf("block %d: import timeout", i+1)
|
2015-06-22 18:28:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
verifyImportDone(t, imported)
|
|
|
|
}
|
|
|
|
|
|
|
|
// verifyImportDone verifies that no more events are arriving on an import channel.
|
|
|
|
func verifyImportDone(t *testing.T, imported chan *types.Block) {
|
|
|
|
select {
|
|
|
|
case <-imported:
|
|
|
|
t.Fatalf("extra block imported")
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-16 14:02:43 +03:00
|
|
|
// Tests that a fetcher accepts block announcements and initiates retrievals for
|
|
|
|
// them, successfully importing into the local chain.
|
2015-08-14 21:25:41 +03:00
|
|
|
func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) }
|
|
|
|
func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) }
|
|
|
|
func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) }
|
|
|
|
|
|
|
|
func testSequentialAnnouncements(t *testing.T, protocol int) {
|
2015-06-16 14:02:43 +03:00
|
|
|
// Create a chain of blocks to import
|
2015-06-22 18:08:28 +03:00
|
|
|
targetBlocks := 4 * hashLimit
|
2015-06-27 03:57:53 +03:00
|
|
|
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
2015-06-16 14:02:43 +03:00
|
|
|
|
|
|
|
tester := newTester()
|
2015-08-14 21:25:41 +03:00
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
2015-06-16 14:02:43 +03:00
|
|
|
|
|
|
|
// Iteratively announce blocks until all are imported
|
2015-06-22 18:08:28 +03:00
|
|
|
imported := make(chan *types.Block)
|
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
|
|
|
|
|
|
|
for i := len(hashes) - 2; i >= 0; i-- {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-08-14 21:25:41 +03:00
|
|
|
verifyImportEvent(t, imported, true)
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportDone(t, imported)
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that if blocks are announced by multiple peers (or even the same buggy
|
|
|
|
// peer), they will only get downloaded at most once.
|
2015-08-14 21:25:41 +03:00
|
|
|
func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) }
|
|
|
|
func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) }
|
|
|
|
func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) }
|
|
|
|
|
|
|
|
func testConcurrentAnnouncements(t *testing.T, protocol int) {
|
2015-06-16 14:02:43 +03:00
|
|
|
// Create a chain of blocks to import
|
2015-06-22 18:08:28 +03:00
|
|
|
targetBlocks := 4 * hashLimit
|
2015-06-27 03:57:53 +03:00
|
|
|
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
2015-06-16 14:02:43 +03:00
|
|
|
|
|
|
|
// Assemble a tester with a built in counter for the requests
|
|
|
|
tester := newTester()
|
2015-08-14 21:25:41 +03:00
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
2015-06-16 14:02:43 +03:00
|
|
|
|
|
|
|
counter := uint32(0)
|
2015-08-14 21:25:41 +03:00
|
|
|
headerWrapper := func(hash common.Hash) error {
|
|
|
|
atomic.AddUint32(&counter, 1)
|
|
|
|
return headerFetcher(hash)
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
|
|
|
// Iteratively announce blocks until all are imported
|
2015-06-22 18:08:28 +03:00
|
|
|
imported := make(chan *types.Block)
|
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
|
|
|
|
|
|
|
for i := len(hashes) - 2; i >= 0; i-- {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
|
|
|
|
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), headerWrapper, bodyFetcher)
|
|
|
|
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), headerWrapper, bodyFetcher)
|
2015-08-14 21:25:41 +03:00
|
|
|
verifyImportEvent(t, imported, true)
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportDone(t, imported)
|
|
|
|
|
2015-06-16 14:02:43 +03:00
|
|
|
// Make sure no blocks were retrieved twice
|
|
|
|
if int(counter) != targetBlocks {
|
|
|
|
t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that announcements arriving while a previous is being fetched still
|
|
|
|
// results in a valid import.
|
2015-08-14 21:25:41 +03:00
|
|
|
func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) }
|
|
|
|
func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) }
|
|
|
|
func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) }
|
|
|
|
|
|
|
|
func testOverlappingAnnouncements(t *testing.T, protocol int) {
|
2015-06-16 14:02:43 +03:00
|
|
|
// Create a chain of blocks to import
|
2015-06-22 18:08:28 +03:00
|
|
|
targetBlocks := 4 * hashLimit
|
2015-06-27 03:57:53 +03:00
|
|
|
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
2015-06-16 14:02:43 +03:00
|
|
|
|
|
|
|
tester := newTester()
|
2015-08-14 21:25:41 +03:00
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
2015-06-16 14:02:43 +03:00
|
|
|
|
|
|
|
// Iteratively announce blocks, but overlap them continuously
|
2015-08-14 21:25:41 +03:00
|
|
|
overlap := 16
|
2015-06-22 18:08:28 +03:00
|
|
|
imported := make(chan *types.Block, len(hashes)-1)
|
2015-08-14 21:25:41 +03:00
|
|
|
for i := 0; i < overlap; i++ {
|
|
|
|
imported <- nil
|
|
|
|
}
|
2015-06-22 18:08:28 +03:00
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
2015-06-16 14:02:43 +03:00
|
|
|
|
2015-06-22 18:08:28 +03:00
|
|
|
for i := len(hashes) - 2; i >= 0; i-- {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-06-22 18:08:28 +03:00
|
|
|
select {
|
2015-08-14 21:25:41 +03:00
|
|
|
case <-imported:
|
2015-06-22 18:08:28 +03:00
|
|
|
case <-time.After(time.Second):
|
2015-08-14 21:25:41 +03:00
|
|
|
t.Fatalf("block %d: import timeout", len(hashes)-i)
|
2015-06-22 18:08:28 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Wait for all the imports to complete and check count
|
2015-08-14 21:25:41 +03:00
|
|
|
verifyImportCount(t, imported, overlap)
|
2015-06-16 14:02:43 +03:00
|
|
|
}
|
2015-06-16 15:35:15 +03:00
|
|
|
|
|
|
|
// Tests that announces already being retrieved will not be duplicated.
|
2015-08-14 21:25:41 +03:00
|
|
|
func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) }
|
|
|
|
func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) }
|
|
|
|
func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) }
|
|
|
|
|
|
|
|
func testPendingDeduplication(t *testing.T, protocol int) {
|
2015-06-16 15:35:15 +03:00
|
|
|
// Create a hash and corresponding block
|
2015-06-27 03:57:53 +03:00
|
|
|
hashes, blocks := makeChain(1, 0, genesis)
|
2015-06-16 15:35:15 +03:00
|
|
|
|
|
|
|
// Assemble a tester with a built in counter and delayed fetcher
|
|
|
|
tester := newTester()
|
2015-08-14 21:25:41 +03:00
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
2015-06-16 15:35:15 +03:00
|
|
|
|
|
|
|
delay := 50 * time.Millisecond
|
|
|
|
counter := uint32(0)
|
2015-08-14 21:25:41 +03:00
|
|
|
headerWrapper := func(hash common.Hash) error {
|
|
|
|
atomic.AddUint32(&counter, 1)
|
|
|
|
|
|
|
|
// Simulate a long running fetch
|
|
|
|
go func() {
|
|
|
|
time.Sleep(delay)
|
|
|
|
headerFetcher(hash)
|
2015-06-16 15:35:15 +03:00
|
|
|
}()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Announce the same block many times until it's fetched (wait for any pending ops)
|
2015-06-18 18:00:19 +03:00
|
|
|
for tester.getBlock(hashes[0]) == nil {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
|
2015-06-16 15:35:15 +03:00
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
}
|
|
|
|
time.Sleep(delay)
|
|
|
|
|
|
|
|
// Check that all blocks were imported and none fetched twice
|
2015-06-17 17:08:32 +03:00
|
|
|
if imported := len(tester.blocks); imported != 2 {
|
2015-06-16 15:35:15 +03:00
|
|
|
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, 2)
|
|
|
|
}
|
|
|
|
if int(counter) != 1 {
|
|
|
|
t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1)
|
|
|
|
}
|
|
|
|
}
|
2015-06-16 17:39:04 +03:00
|
|
|
|
|
|
|
// Tests that announcements retrieved in a random order are cached and eventually
|
|
|
|
// imported when all the gaps are filled in.
|
2015-08-14 21:25:41 +03:00
|
|
|
func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) }
|
|
|
|
func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) }
|
|
|
|
func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) }
|
|
|
|
|
|
|
|
func testRandomArrivalImport(t *testing.T, protocol int) {
|
2015-06-16 17:39:04 +03:00
|
|
|
// Create a chain of blocks to import, and choose one to delay
|
2015-06-27 03:57:53 +03:00
|
|
|
targetBlocks := maxQueueDist
|
|
|
|
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
|
|
|
skip := targetBlocks / 2
|
2015-06-16 17:39:04 +03:00
|
|
|
|
|
|
|
tester := newTester()
|
2015-08-14 21:25:41 +03:00
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
2015-06-16 17:39:04 +03:00
|
|
|
|
|
|
|
// Iteratively announce blocks, skipping one entry
|
2015-06-22 18:08:28 +03:00
|
|
|
imported := make(chan *types.Block, len(hashes)-1)
|
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
|
|
|
|
2015-06-16 17:39:04 +03:00
|
|
|
for i := len(hashes) - 1; i >= 0; i-- {
|
|
|
|
if i != skip {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-06-22 18:08:28 +03:00
|
|
|
time.Sleep(time.Millisecond)
|
2015-06-16 17:39:04 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Finally announce the skipped entry and check full import
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportCount(t, imported, len(hashes)-1)
|
2015-06-16 17:39:04 +03:00
|
|
|
}
|
2015-06-16 18:14:52 +03:00
|
|
|
|
|
|
|
// Tests that direct block enqueues (due to block propagation vs. hash announce)
|
|
|
|
// are correctly schedule, filling and import queue gaps.
|
2015-08-14 21:25:41 +03:00
|
|
|
func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) }
|
|
|
|
func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) }
|
|
|
|
func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) }
|
|
|
|
|
|
|
|
func testQueueGapFill(t *testing.T, protocol int) {
|
2015-06-16 18:14:52 +03:00
|
|
|
// Create a chain of blocks to import, and choose one to not announce at all
|
2015-06-27 03:57:53 +03:00
|
|
|
targetBlocks := maxQueueDist
|
|
|
|
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
|
|
|
skip := targetBlocks / 2
|
2015-06-16 18:14:52 +03:00
|
|
|
|
|
|
|
tester := newTester()
|
2015-08-14 21:25:41 +03:00
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
2015-06-16 18:14:52 +03:00
|
|
|
|
|
|
|
// Iteratively announce blocks, skipping one entry
|
2015-06-22 18:08:28 +03:00
|
|
|
imported := make(chan *types.Block, len(hashes)-1)
|
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
|
|
|
|
2015-06-16 18:14:52 +03:00
|
|
|
for i := len(hashes) - 1; i >= 0; i-- {
|
|
|
|
if i != skip {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-06-22 18:08:28 +03:00
|
|
|
time.Sleep(time.Millisecond)
|
2015-06-16 18:14:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Fill the missing block directly as if propagated
|
|
|
|
tester.fetcher.Enqueue("valid", blocks[hashes[skip]])
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportCount(t, imported, len(hashes)-1)
|
2015-06-16 18:14:52 +03:00
|
|
|
}
|
2015-06-16 18:43:58 +03:00
|
|
|
|
|
|
|
// Tests that blocks arriving from various sources (multiple propagations, hash
|
|
|
|
// announces, etc) do not get scheduled for import multiple times.
|
2015-08-14 21:25:41 +03:00
|
|
|
func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) }
|
|
|
|
func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) }
|
|
|
|
func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) }
|
|
|
|
|
|
|
|
func testImportDeduplication(t *testing.T, protocol int) {
|
2015-06-16 18:43:58 +03:00
|
|
|
// Create two blocks to import (one for duplication, the other for stalling)
|
2015-06-27 03:57:53 +03:00
|
|
|
hashes, blocks := makeChain(2, 0, genesis)
|
2015-06-16 18:43:58 +03:00
|
|
|
|
|
|
|
// Create the tester and wrap the importer with a counter
|
|
|
|
tester := newTester()
|
2015-08-14 21:25:41 +03:00
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
2015-06-16 18:43:58 +03:00
|
|
|
|
|
|
|
counter := uint32(0)
|
2015-06-17 18:25:23 +03:00
|
|
|
tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) {
|
|
|
|
atomic.AddUint32(&counter, uint32(len(blocks)))
|
|
|
|
return tester.insertChain(blocks)
|
2015-06-16 18:43:58 +03:00
|
|
|
}
|
2015-06-22 18:08:28 +03:00
|
|
|
// Instrument the fetching and imported events
|
|
|
|
fetching := make(chan []common.Hash)
|
|
|
|
imported := make(chan *types.Block, len(hashes)-1)
|
|
|
|
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
|
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
|
|
|
|
2015-06-16 18:43:58 +03:00
|
|
|
// Announce the duplicating block, wait for retrieval, and also propagate directly
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-06-22 18:08:28 +03:00
|
|
|
<-fetching
|
2015-06-16 18:43:58 +03:00
|
|
|
|
|
|
|
tester.fetcher.Enqueue("valid", blocks[hashes[0]])
|
|
|
|
tester.fetcher.Enqueue("valid", blocks[hashes[0]])
|
|
|
|
tester.fetcher.Enqueue("valid", blocks[hashes[0]])
|
|
|
|
|
|
|
|
// Fill the missing block directly as if propagated, and check import uniqueness
|
|
|
|
tester.fetcher.Enqueue("valid", blocks[hashes[1]])
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportCount(t, imported, 2)
|
|
|
|
|
2015-06-16 18:43:58 +03:00
|
|
|
if counter != 2 {
|
|
|
|
t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2)
|
|
|
|
}
|
|
|
|
}
|
2015-06-16 23:18:01 +03:00
|
|
|
|
|
|
|
// Tests that blocks with numbers much lower or higher than out current head get
|
2015-08-14 21:25:41 +03:00
|
|
|
// discarded to prevent wasting resources on useless blocks from faulty peers.
|
|
|
|
func TestDistantPropagationDiscarding(t *testing.T) {
|
|
|
|
// Create a long chain to import and define the discard boundaries
|
2015-06-27 03:57:53 +03:00
|
|
|
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
|
2015-06-16 23:18:01 +03:00
|
|
|
head := hashes[len(hashes)/2]
|
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
|
|
|
|
|
2015-06-16 23:18:01 +03:00
|
|
|
// Create a tester and simulate a head block being the middle of the above chain
|
|
|
|
tester := newTester()
|
2015-10-13 12:04:25 +03:00
|
|
|
|
|
|
|
tester.lock.Lock()
|
2015-06-17 17:08:32 +03:00
|
|
|
tester.hashes = []common.Hash{head}
|
|
|
|
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.lock.Unlock()
|
2015-06-16 23:18:01 +03:00
|
|
|
|
|
|
|
// Ensure that a block with a lower number than the threshold is discarded
|
2015-08-14 21:25:41 +03:00
|
|
|
tester.fetcher.Enqueue("lower", blocks[hashes[low]])
|
2015-06-16 23:18:01 +03:00
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
if !tester.fetcher.queue.Empty() {
|
|
|
|
t.Fatalf("fetcher queued stale block")
|
|
|
|
}
|
|
|
|
// Ensure that a block with a higher number than the threshold is discarded
|
2015-08-14 21:25:41 +03:00
|
|
|
tester.fetcher.Enqueue("higher", blocks[hashes[high]])
|
2015-06-16 23:18:01 +03:00
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
if !tester.fetcher.queue.Empty() {
|
|
|
|
t.Fatalf("fetcher queued future block")
|
|
|
|
}
|
|
|
|
}
|
2015-06-22 14:07:08 +03:00
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
// Tests that announcements with numbers much lower or higher than out current
|
|
|
|
// head get discarded to prevent wasting resources on useless blocks from faulty
|
|
|
|
// peers.
|
|
|
|
func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) }
|
|
|
|
func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) }
|
|
|
|
func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) }
|
|
|
|
|
|
|
|
func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
|
|
|
|
// Create a long chain to import and define the discard boundaries
|
|
|
|
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
|
|
|
|
head := hashes[len(hashes)/2]
|
|
|
|
|
|
|
|
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
|
|
|
|
|
|
|
|
// Create a tester and simulate a head block being the middle of the above chain
|
|
|
|
tester := newTester()
|
2015-10-13 12:04:25 +03:00
|
|
|
|
|
|
|
tester.lock.Lock()
|
2015-08-14 21:25:41 +03:00
|
|
|
tester.hashes = []common.Hash{head}
|
|
|
|
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.lock.Unlock()
|
2015-08-14 21:25:41 +03:00
|
|
|
|
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
|
|
|
|
|
|
|
fetching := make(chan struct{}, 2)
|
|
|
|
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} }
|
|
|
|
|
|
|
|
// Ensure that a block with a lower number than the threshold is discarded
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-08-14 21:25:41 +03:00
|
|
|
select {
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
case <-fetching:
|
|
|
|
t.Fatalf("fetcher requested stale header")
|
|
|
|
}
|
|
|
|
// Ensure that a block with a higher number than the threshold is discarded
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-08-14 21:25:41 +03:00
|
|
|
select {
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
case <-fetching:
|
|
|
|
t.Fatalf("fetcher requested future header")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that peers announcing blocks with invalid numbers (i.e. not matching
|
|
|
|
// the headers provided afterwards) get dropped as malicious.
|
|
|
|
func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) }
|
|
|
|
func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) }
|
|
|
|
func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) }
|
|
|
|
|
|
|
|
func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
|
|
|
|
// Create a single block to import and check numbers against
|
|
|
|
hashes, blocks := makeChain(1, 0, genesis)
|
|
|
|
|
|
|
|
tester := newTester()
|
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
|
|
|
|
|
|
|
imported := make(chan *types.Block)
|
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
|
|
|
|
|
|
|
// Announce a block with a bad number, check for immediate drop
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-08-14 21:25:41 +03:00
|
|
|
verifyImportEvent(t, imported, false)
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.lock.RLock()
|
|
|
|
dropped := tester.drops["bad"]
|
|
|
|
tester.lock.RUnlock()
|
|
|
|
|
|
|
|
if !dropped {
|
2015-08-14 21:25:41 +03:00
|
|
|
t.Fatalf("peer with invalid numbered announcement not dropped")
|
|
|
|
}
|
|
|
|
// Make sure a good announcement passes without a drop
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-08-14 21:25:41 +03:00
|
|
|
verifyImportEvent(t, imported, true)
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.lock.RLock()
|
|
|
|
dropped = tester.drops["good"]
|
|
|
|
tester.lock.RUnlock()
|
|
|
|
|
|
|
|
if dropped {
|
2015-08-14 21:25:41 +03:00
|
|
|
t.Fatalf("peer with valid numbered announcement dropped")
|
|
|
|
}
|
|
|
|
verifyImportDone(t, imported)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that if a block is empty (i.e. header only), no body request should be
|
|
|
|
// made, and instead the header should be assembled into a whole block in itself.
|
|
|
|
func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) }
|
|
|
|
func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) }
|
|
|
|
func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) }
|
|
|
|
|
|
|
|
func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
|
|
|
|
// Create a chain of blocks to import
|
|
|
|
hashes, blocks := makeChain(32, 0, genesis)
|
|
|
|
|
|
|
|
tester := newTester()
|
|
|
|
headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
bodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
|
|
|
|
|
|
|
// Add a monitoring hook for all internal events
|
|
|
|
fetching := make(chan []common.Hash)
|
|
|
|
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
|
|
|
|
|
|
|
|
completing := make(chan []common.Hash)
|
|
|
|
tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes }
|
|
|
|
|
|
|
|
imported := make(chan *types.Block)
|
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
|
|
|
|
|
|
|
// Iteratively announce blocks until all are imported
|
|
|
|
for i := len(hashes) - 2; i >= 0; i-- {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
|
2015-08-14 21:25:41 +03:00
|
|
|
|
|
|
|
// All announces should fetch the header
|
|
|
|
verifyFetchingEvent(t, fetching, true)
|
|
|
|
|
|
|
|
// Only blocks with data contents should request bodies
|
|
|
|
verifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0)
|
|
|
|
|
|
|
|
// Irrelevant of the construct, import should succeed
|
|
|
|
verifyImportEvent(t, imported, true)
|
|
|
|
}
|
|
|
|
verifyImportDone(t, imported)
|
|
|
|
}
|
|
|
|
|
2015-06-22 14:07:08 +03:00
|
|
|
// Tests that a peer is unable to use unbounded memory with sending infinite
|
|
|
|
// block announcements to a node, but that even in the face of such an attack,
|
|
|
|
// the fetcher remains operational.
|
2015-08-14 21:25:41 +03:00
|
|
|
func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) }
|
|
|
|
func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) }
|
|
|
|
func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) }
|
|
|
|
|
|
|
|
func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
|
2015-06-22 18:08:28 +03:00
|
|
|
// Create a tester with instrumented import hooks
|
2015-06-22 14:07:08 +03:00
|
|
|
tester := newTester()
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
imported, announces := make(chan *types.Block), int32(0)
|
2015-06-22 18:08:28 +03:00
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) {
|
|
|
|
if added {
|
|
|
|
atomic.AddInt32(&announces, 1)
|
|
|
|
} else {
|
|
|
|
atomic.AddInt32(&announces, -1)
|
|
|
|
}
|
|
|
|
}
|
2015-06-22 14:07:08 +03:00
|
|
|
// Create a valid chain and an infinite junk chain
|
2015-06-27 03:57:53 +03:00
|
|
|
targetBlocks := hashLimit + 2*maxQueueDist
|
|
|
|
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
2015-08-14 21:25:41 +03:00
|
|
|
validHeaderFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
|
|
|
|
validBodyFetcher := tester.makeBodyFetcher(blocks, 0)
|
2015-06-22 14:07:08 +03:00
|
|
|
|
2015-06-27 03:57:53 +03:00
|
|
|
attack, _ := makeChain(targetBlocks, 0, unknownBlock)
|
2015-08-14 21:25:41 +03:00
|
|
|
attackerHeaderFetcher := tester.makeHeaderFetcher(nil, -gatherSlack)
|
|
|
|
attackerBodyFetcher := tester.makeBodyFetcher(nil, 0)
|
2015-06-22 14:07:08 +03:00
|
|
|
|
|
|
|
// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
|
|
|
|
for i := 0; i < len(attack); i++ {
|
|
|
|
if i < maxQueueDist {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), validHeaderFetcher, validBodyFetcher)
|
2015-06-22 14:07:08 +03:00
|
|
|
}
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher)
|
2015-06-22 14:07:08 +03:00
|
|
|
}
|
2015-10-13 12:04:25 +03:00
|
|
|
if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist {
|
|
|
|
t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist)
|
2015-06-22 14:07:08 +03:00
|
|
|
}
|
2015-06-22 18:08:28 +03:00
|
|
|
// Wait for fetches to complete
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportCount(t, imported, maxQueueDist)
|
|
|
|
|
2015-06-22 14:07:08 +03:00
|
|
|
// Feed the remaining valid hashes to ensure DOS protection state remains clean
|
2015-06-22 18:08:28 +03:00
|
|
|
for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- {
|
2016-07-21 12:36:38 +03:00
|
|
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), validHeaderFetcher, validBodyFetcher)
|
2015-08-14 21:25:41 +03:00
|
|
|
verifyImportEvent(t, imported, true)
|
2015-06-22 14:07:08 +03:00
|
|
|
}
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportDone(t, imported)
|
2015-06-22 14:07:08 +03:00
|
|
|
}
|
2015-06-22 16:49:47 +03:00
|
|
|
|
|
|
|
// Tests that blocks sent to the fetcher (either through propagation or via hash
|
|
|
|
// announces and retrievals) don't pile up indefinitely, exhausting available
|
|
|
|
// system memory.
|
|
|
|
func TestBlockMemoryExhaustionAttack(t *testing.T) {
|
2015-06-22 18:08:28 +03:00
|
|
|
// Create a tester with instrumented import hooks
|
2015-06-22 16:49:47 +03:00
|
|
|
tester := newTester()
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
imported, enqueued := make(chan *types.Block), int32(0)
|
2015-06-22 18:08:28 +03:00
|
|
|
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) {
|
|
|
|
if added {
|
|
|
|
atomic.AddInt32(&enqueued, 1)
|
|
|
|
} else {
|
|
|
|
atomic.AddInt32(&enqueued, -1)
|
|
|
|
}
|
|
|
|
}
|
2015-06-22 16:49:47 +03:00
|
|
|
// Create a valid chain and a batch of dangling (but in range) blocks
|
2015-06-27 03:57:53 +03:00
|
|
|
targetBlocks := hashLimit + 2*maxQueueDist
|
|
|
|
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
2015-06-22 16:49:47 +03:00
|
|
|
attack := make(map[common.Hash]*types.Block)
|
2015-06-27 03:57:53 +03:00
|
|
|
for i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ {
|
|
|
|
hashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock)
|
2015-06-22 16:49:47 +03:00
|
|
|
for _, hash := range hashes[:maxQueueDist-2] {
|
|
|
|
attack[hash] = blocks[hash]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Try to feed all the attacker blocks make sure only a limited batch is accepted
|
|
|
|
for _, block := range attack {
|
|
|
|
tester.fetcher.Enqueue("attacker", block)
|
|
|
|
}
|
2015-06-27 03:57:53 +03:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2015-10-13 12:04:25 +03:00
|
|
|
if queued := atomic.LoadInt32(&enqueued); queued != blockLimit {
|
2015-06-22 16:49:47 +03:00
|
|
|
t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit)
|
|
|
|
}
|
|
|
|
// Queue up a batch of valid blocks, and check that a new peer is allowed to do so
|
|
|
|
for i := 0; i < maxQueueDist-1; i++ {
|
|
|
|
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]])
|
|
|
|
}
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2015-10-13 12:04:25 +03:00
|
|
|
if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 {
|
2015-06-22 16:49:47 +03:00
|
|
|
t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1)
|
|
|
|
}
|
|
|
|
// Insert the missing piece (and sanity check the import)
|
|
|
|
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]])
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportCount(t, imported, maxQueueDist)
|
|
|
|
|
2015-06-22 16:49:47 +03:00
|
|
|
// Insert the remaining blocks in chunks to ensure clean DOS protection
|
|
|
|
for i := maxQueueDist; i < len(hashes)-1; i++ {
|
|
|
|
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]])
|
2015-08-14 21:25:41 +03:00
|
|
|
verifyImportEvent(t, imported, true)
|
2015-06-22 16:49:47 +03:00
|
|
|
}
|
2015-06-22 18:28:38 +03:00
|
|
|
verifyImportDone(t, imported)
|
2015-06-22 16:49:47 +03:00
|
|
|
}
|