2016-04-14 19:18:24 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2015-07-02 19:55:18 +03:00
|
|
|
package eth
|
|
|
|
|
|
|
|
import (
|
2018-09-29 23:17:06 +03:00
|
|
|
"fmt"
|
2016-10-05 16:31:48 +03:00
|
|
|
"math"
|
2015-07-02 19:55:18 +03:00
|
|
|
"math/big"
|
|
|
|
"math/rand"
|
|
|
|
"testing"
|
2016-07-08 20:59:11 +03:00
|
|
|
"time"
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2017-04-05 01:16:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
2015-07-02 19:55:18 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2018-09-24 15:57:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-07-02 19:55:18 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2017-01-17 14:19:50 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2015-07-02 19:55:18 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
|
|
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
2016-07-08 20:59:11 +03:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2015-07-02 19:55:18 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
|
|
|
"github.com/ethereum/go-ethereum/params"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
|
|
|
func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
|
2019-09-30 21:28:50 +03:00
|
|
|
func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
func testGetBlockHeaders(t *testing.T, protocol int) {
|
2018-02-05 19:40:32 +03:00
|
|
|
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxHashFetch+15, nil, nil)
|
2015-07-02 19:55:18 +03:00
|
|
|
peer, _ := newTestPeer("peer", protocol, pm, true)
|
|
|
|
defer peer.close()
|
|
|
|
|
|
|
|
// Create a "random" unknown hash for testing
|
|
|
|
var unknown common.Hash
|
2017-01-06 17:52:03 +03:00
|
|
|
for i := range unknown {
|
2015-07-02 19:55:18 +03:00
|
|
|
unknown[i] = byte(i)
|
|
|
|
}
|
|
|
|
// Create a batch of tests for various scenarios
|
|
|
|
limit := uint64(downloader.MaxHeaderFetch)
|
|
|
|
tests := []struct {
|
|
|
|
query *getBlockHeadersData // The query to execute for header retrieval
|
|
|
|
expect []common.Hash // The hashes of the block whose headers are expected
|
|
|
|
}{
|
|
|
|
// A single random block should be retrievable by hash and number too
|
|
|
|
{
|
2015-08-31 18:09:50 +03:00
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
|
|
|
|
[]common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()},
|
2015-07-02 19:55:18 +03:00
|
|
|
}, {
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
|
2015-08-31 18:09:50 +03:00
|
|
|
[]common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()},
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
// Multiple headers should be retrievable in both directions
|
|
|
|
{
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
|
|
|
|
[]common.Hash{
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(limit / 2).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(limit/2 + 1).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(limit/2 + 2).Hash(),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
}, {
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
|
|
|
|
[]common.Hash{
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(limit / 2).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(limit/2 - 1).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(limit/2 - 2).Hash(),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
},
|
|
|
|
// Multiple headers with skip lists should be retrievable
|
|
|
|
{
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
|
|
|
|
[]common.Hash{
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(limit / 2).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(limit/2 + 4).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(limit/2 + 8).Hash(),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
}, {
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
|
|
|
|
[]common.Hash{
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(limit / 2).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(limit/2 - 4).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(limit/2 - 8).Hash(),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
},
|
|
|
|
// The chain endpoints should be retrievable
|
|
|
|
{
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
|
2015-08-31 18:09:50 +03:00
|
|
|
[]common.Hash{pm.blockchain.GetBlockByNumber(0).Hash()},
|
2015-07-02 19:55:18 +03:00
|
|
|
}, {
|
2015-08-31 18:09:50 +03:00
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64()}, Amount: 1},
|
|
|
|
[]common.Hash{pm.blockchain.CurrentBlock().Hash()},
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
// Ensure protocol limits are honored
|
|
|
|
{
|
2015-08-31 18:09:50 +03:00
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
|
|
|
|
pm.blockchain.GetBlockHashesFromHash(pm.blockchain.CurrentBlock().Hash(), limit),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
// Check that requesting more than available is handled gracefully
|
|
|
|
{
|
2015-08-31 18:09:50 +03:00
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
|
2015-07-02 19:55:18 +03:00
|
|
|
[]common.Hash{
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64()).Hash(),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
}, {
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
|
|
|
|
[]common.Hash{
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(4).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(0).Hash(),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
},
|
|
|
|
// Check that requesting more than available is handled gracefully, even if mid skip
|
|
|
|
{
|
2015-08-31 18:09:50 +03:00
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
|
2015-07-02 19:55:18 +03:00
|
|
|
[]common.Hash{
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 1).Hash(),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
}, {
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
|
|
|
|
[]common.Hash{
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(4).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(1).Hash(),
|
2015-07-02 19:55:18 +03:00
|
|
|
},
|
|
|
|
},
|
2015-12-15 19:22:48 +02:00
|
|
|
// Check a corner case where requesting more can iterate past the endpoints
|
|
|
|
{
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: 2}, Amount: 5, Reverse: true},
|
|
|
|
[]common.Hash{
|
|
|
|
pm.blockchain.GetBlockByNumber(2).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(1).Hash(),
|
|
|
|
pm.blockchain.GetBlockByNumber(0).Hash(),
|
|
|
|
},
|
|
|
|
},
|
2016-10-05 16:31:48 +03:00
|
|
|
// Check a corner case where skipping overflow loops back into the chain start
|
|
|
|
{
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
|
|
|
|
[]common.Hash{
|
|
|
|
pm.blockchain.GetBlockByNumber(3).Hash(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Check a corner case where skipping overflow loops back to the same header
|
|
|
|
{
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
|
|
|
|
[]common.Hash{
|
|
|
|
pm.blockchain.GetBlockByNumber(1).Hash(),
|
|
|
|
},
|
|
|
|
},
|
2015-07-02 19:55:18 +03:00
|
|
|
// Check that non existing headers aren't returned
|
|
|
|
{
|
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
|
|
|
|
[]common.Hash{},
|
|
|
|
}, {
|
2015-08-31 18:09:50 +03:00
|
|
|
&getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() + 1}, Amount: 1},
|
2015-07-02 19:55:18 +03:00
|
|
|
[]common.Hash{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// Run each of the tests and verify the results against the chain
|
|
|
|
for i, tt := range tests {
|
|
|
|
// Collect the headers to expect in the response
|
|
|
|
headers := []*types.Header{}
|
|
|
|
for _, hash := range tt.expect {
|
2016-04-05 16:22:04 +03:00
|
|
|
headers = append(headers, pm.blockchain.GetBlockByHash(hash).Header())
|
2015-07-02 19:55:18 +03:00
|
|
|
}
|
|
|
|
// Send the hash request and verify the response
|
|
|
|
p2p.Send(peer.app, 0x03, tt.query)
|
|
|
|
if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil {
|
|
|
|
t.Errorf("test %d: headers mismatch: %v", i, err)
|
|
|
|
}
|
2015-12-15 19:22:48 +02:00
|
|
|
// If the test used number origins, repeat with hashes as the too
|
|
|
|
if tt.query.Origin.Hash == (common.Hash{}) {
|
|
|
|
if origin := pm.blockchain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
|
|
|
|
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
|
|
|
|
|
|
|
|
p2p.Send(peer.app, 0x03, tt.query)
|
|
|
|
if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil {
|
|
|
|
t.Errorf("test %d: headers mismatch: %v", i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-07-02 19:55:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
|
|
|
func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) }
|
2019-09-30 21:28:50 +03:00
|
|
|
func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
func testGetBlockBodies(t *testing.T, protocol int) {
|
2018-02-05 19:40:32 +03:00
|
|
|
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxBlockFetch+15, nil, nil)
|
2015-07-02 19:55:18 +03:00
|
|
|
peer, _ := newTestPeer("peer", protocol, pm, true)
|
|
|
|
defer peer.close()
|
|
|
|
|
|
|
|
// Create a batch of tests for various scenarios
|
|
|
|
limit := downloader.MaxBlockFetch
|
|
|
|
tests := []struct {
|
|
|
|
random int // Number of blocks to fetch randomly from the chain
|
|
|
|
explicit []common.Hash // Explicitly requested blocks
|
|
|
|
available []bool // Availability of explicitly requested blocks
|
|
|
|
expected int // Total number of existing blocks to expect
|
|
|
|
}{
|
2018-09-19 11:47:09 +03:00
|
|
|
{1, nil, nil, 1}, // A single random block should be retrievable
|
|
|
|
{10, nil, nil, 10}, // Multiple random blocks should be retrievable
|
|
|
|
{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
|
|
|
|
{limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
|
2015-08-31 18:09:50 +03:00
|
|
|
{0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
|
|
|
|
{0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
|
2017-01-06 17:52:03 +03:00
|
|
|
{0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
// Existing and non-existing blocks interleaved should not cause problems
|
|
|
|
{0, []common.Hash{
|
2017-01-06 17:52:03 +03:00
|
|
|
{},
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(1).Hash(),
|
2017-01-06 17:52:03 +03:00
|
|
|
{},
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(10).Hash(),
|
2017-01-06 17:52:03 +03:00
|
|
|
{},
|
2015-08-31 18:09:50 +03:00
|
|
|
pm.blockchain.GetBlockByNumber(100).Hash(),
|
2017-01-06 17:52:03 +03:00
|
|
|
{},
|
2015-07-02 19:55:18 +03:00
|
|
|
}, []bool{false, true, false, true, false, true, false}, 3},
|
|
|
|
}
|
|
|
|
// Run each of the tests and verify the results against the chain
|
|
|
|
for i, tt := range tests {
|
|
|
|
// Collect the hashes to request, and the response to expect
|
|
|
|
hashes, seen := []common.Hash{}, make(map[int64]bool)
|
|
|
|
bodies := []*blockBody{}
|
|
|
|
|
|
|
|
for j := 0; j < tt.random; j++ {
|
|
|
|
for {
|
2015-08-31 18:09:50 +03:00
|
|
|
num := rand.Int63n(int64(pm.blockchain.CurrentBlock().NumberU64()))
|
2015-07-02 19:55:18 +03:00
|
|
|
if !seen[num] {
|
|
|
|
seen[num] = true
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
block := pm.blockchain.GetBlockByNumber(uint64(num))
|
2015-07-02 19:55:18 +03:00
|
|
|
hashes = append(hashes, block.Hash())
|
|
|
|
if len(bodies) < tt.expected {
|
|
|
|
bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for j, hash := range tt.explicit {
|
|
|
|
hashes = append(hashes, hash)
|
|
|
|
if tt.available[j] && len(bodies) < tt.expected {
|
2016-04-05 16:22:04 +03:00
|
|
|
block := pm.blockchain.GetBlockByHash(hash)
|
2015-07-02 19:55:18 +03:00
|
|
|
bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Send the hash request and verify the response
|
|
|
|
p2p.Send(peer.app, 0x05, hashes)
|
|
|
|
if err := p2p.ExpectMsg(peer.app, 0x06, bodies); err != nil {
|
|
|
|
t.Errorf("test %d: bodies mismatch: %v", i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that the node state database can be retrieved based on hashes.
|
|
|
|
func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) }
|
2019-09-30 21:28:50 +03:00
|
|
|
func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
func testGetNodeData(t *testing.T, protocol int) {
|
|
|
|
// Define three accounts to simulate transactions with
|
|
|
|
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
|
|
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
|
|
|
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
|
|
|
|
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
|
|
|
|
|
2016-11-02 15:44:13 +03:00
|
|
|
signer := types.HomesteadSigner{}
|
2016-03-15 20:27:49 +02:00
|
|
|
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)
|
2015-07-02 19:55:18 +03:00
|
|
|
generator := func(i int, block *core.BlockGen) {
|
|
|
|
switch i {
|
|
|
|
case 0:
|
|
|
|
// In block 1, the test bank sends account #1 some ether.
|
2017-11-13 14:47:27 +03:00
|
|
|
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey)
|
2015-07-02 19:55:18 +03:00
|
|
|
block.AddTx(tx)
|
|
|
|
case 1:
|
|
|
|
// In block 2, the test bank sends some more ether to account #1.
|
|
|
|
// acc1Addr passes it on to account #2.
|
2017-11-13 14:47:27 +03:00
|
|
|
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
|
|
|
|
tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
|
2015-07-02 19:55:18 +03:00
|
|
|
block.AddTx(tx1)
|
|
|
|
block.AddTx(tx2)
|
|
|
|
case 2:
|
|
|
|
// Block 3 is empty but was mined by account #2.
|
|
|
|
block.SetCoinbase(acc2Addr)
|
|
|
|
block.SetExtra([]byte("yeehaw"))
|
|
|
|
case 3:
|
|
|
|
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
|
|
|
b2 := block.PrevBlock(1).Header()
|
|
|
|
b2.Extra = []byte("foo")
|
|
|
|
block.AddUncle(b2)
|
|
|
|
b3 := block.PrevBlock(2).Header()
|
|
|
|
b3.Extra = []byte("foo")
|
|
|
|
block.AddUncle(b3)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Assemble the test environment
|
2018-02-05 19:40:32 +03:00
|
|
|
pm, db := newTestProtocolManagerMust(t, downloader.FullSync, 4, generator, nil)
|
2015-07-02 19:55:18 +03:00
|
|
|
peer, _ := newTestPeer("peer", protocol, pm, true)
|
|
|
|
defer peer.close()
|
|
|
|
|
|
|
|
// Fetch for now the entire chain db
|
|
|
|
hashes := []common.Hash{}
|
2018-09-24 15:57:49 +03:00
|
|
|
|
|
|
|
it := db.NewIterator()
|
|
|
|
for it.Next() {
|
|
|
|
if key := it.Key(); len(key) == common.HashLength {
|
2015-10-13 12:04:25 +03:00
|
|
|
hashes = append(hashes, common.BytesToHash(key))
|
|
|
|
}
|
2015-07-02 19:55:18 +03:00
|
|
|
}
|
2018-09-24 15:57:49 +03:00
|
|
|
it.Release()
|
|
|
|
|
2015-07-02 19:55:18 +03:00
|
|
|
p2p.Send(peer.app, 0x0d, hashes)
|
|
|
|
msg, err := peer.app.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to read node data response: %v", err)
|
|
|
|
}
|
|
|
|
if msg.Code != 0x0e {
|
|
|
|
t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, 0x0c)
|
|
|
|
}
|
|
|
|
var data [][]byte
|
|
|
|
if err := msg.Decode(&data); err != nil {
|
|
|
|
t.Fatalf("failed to decode response node data: %v", err)
|
|
|
|
}
|
|
|
|
// Verify that all hashes correspond to the requested data, and reconstruct a state tree
|
|
|
|
for i, want := range hashes {
|
2016-02-21 20:40:27 +02:00
|
|
|
if hash := crypto.Keccak256Hash(data[i]); hash != want {
|
2016-04-15 12:06:57 +03:00
|
|
|
t.Errorf("data hash mismatch: have %x, want %x", hash, want)
|
2015-07-02 19:55:18 +03:00
|
|
|
}
|
|
|
|
}
|
2018-09-24 15:57:49 +03:00
|
|
|
statedb := rawdb.NewMemoryDatabase()
|
2015-07-02 19:55:18 +03:00
|
|
|
for i := 0; i < len(data); i++ {
|
|
|
|
statedb.Put(hashes[i].Bytes(), data[i])
|
|
|
|
}
|
2017-03-02 16:03:33 +03:00
|
|
|
accounts := []common.Address{testBank, acc1Addr, acc2Addr}
|
2015-08-31 18:09:50 +03:00
|
|
|
for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ {
|
2017-06-27 16:57:06 +03:00
|
|
|
trie, _ := state.New(pm.blockchain.GetBlockByNumber(i).Root(), state.NewDatabase(statedb))
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
for j, acc := range accounts {
|
2015-10-06 17:35:55 +03:00
|
|
|
state, _ := pm.blockchain.State()
|
|
|
|
bw := state.GetBalance(acc)
|
2015-07-02 19:55:18 +03:00
|
|
|
bh := trie.GetBalance(acc)
|
|
|
|
|
|
|
|
if (bw != nil && bh == nil) || (bw == nil && bh != nil) {
|
|
|
|
t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
|
|
|
|
}
|
|
|
|
if bw != nil && bh != nil && bw.Cmp(bw) != 0 {
|
|
|
|
t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that the transaction receipts can be retrieved based on hashes.
|
|
|
|
func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) }
|
2019-09-30 21:28:50 +03:00
|
|
|
func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) }
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
func testGetReceipt(t *testing.T, protocol int) {
|
|
|
|
// Define three accounts to simulate transactions with
|
|
|
|
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
|
|
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
|
|
|
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
|
|
|
|
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
|
|
|
|
|
2016-11-02 15:44:13 +03:00
|
|
|
signer := types.HomesteadSigner{}
|
2016-03-15 20:27:49 +02:00
|
|
|
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)
|
2015-07-02 19:55:18 +03:00
|
|
|
generator := func(i int, block *core.BlockGen) {
|
|
|
|
switch i {
|
|
|
|
case 0:
|
|
|
|
// In block 1, the test bank sends account #1 some ether.
|
2017-11-13 14:47:27 +03:00
|
|
|
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey)
|
2015-07-02 19:55:18 +03:00
|
|
|
block.AddTx(tx)
|
|
|
|
case 1:
|
|
|
|
// In block 2, the test bank sends some more ether to account #1.
|
|
|
|
// acc1Addr passes it on to account #2.
|
2017-11-13 14:47:27 +03:00
|
|
|
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
|
|
|
|
tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
|
2015-07-02 19:55:18 +03:00
|
|
|
block.AddTx(tx1)
|
|
|
|
block.AddTx(tx2)
|
|
|
|
case 2:
|
|
|
|
// Block 3 is empty but was mined by account #2.
|
|
|
|
block.SetCoinbase(acc2Addr)
|
|
|
|
block.SetExtra([]byte("yeehaw"))
|
|
|
|
case 3:
|
|
|
|
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
|
|
|
b2 := block.PrevBlock(1).Header()
|
|
|
|
b2.Extra = []byte("foo")
|
|
|
|
block.AddUncle(b2)
|
|
|
|
b3 := block.PrevBlock(2).Header()
|
|
|
|
b3.Extra = []byte("foo")
|
|
|
|
block.AddUncle(b3)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Assemble the test environment
|
2018-02-05 19:40:32 +03:00
|
|
|
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 4, generator, nil)
|
2015-07-02 19:55:18 +03:00
|
|
|
peer, _ := newTestPeer("peer", protocol, pm, true)
|
|
|
|
defer peer.close()
|
|
|
|
|
|
|
|
// Collect the hashes to request, and the response to expect
|
2015-09-28 19:27:31 +03:00
|
|
|
hashes, receipts := []common.Hash{}, []types.Receipts{}
|
2015-08-31 18:09:50 +03:00
|
|
|
for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ {
|
2015-09-28 19:27:31 +03:00
|
|
|
block := pm.blockchain.GetBlockByNumber(i)
|
|
|
|
|
|
|
|
hashes = append(hashes, block.Hash())
|
2018-02-05 19:40:32 +03:00
|
|
|
receipts = append(receipts, pm.blockchain.GetReceiptsByHash(block.Hash()))
|
2015-07-02 19:55:18 +03:00
|
|
|
}
|
|
|
|
// Send the hash request and verify the response
|
|
|
|
p2p.Send(peer.app, 0x0f, hashes)
|
|
|
|
if err := p2p.ExpectMsg(peer.app, 0x10, receipts); err != nil {
|
|
|
|
t.Errorf("receipts mismatch: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2016-07-08 20:59:11 +03:00
|
|
|
|
2019-04-16 13:20:38 +03:00
|
|
|
// Tests that post eth protocol handshake, clients perform a mutual checkpoint
|
|
|
|
// challenge to validate each other's chains. Hash mismatches, or missing ones
|
|
|
|
// during a fast sync should lead to the peer getting dropped.
|
|
|
|
func TestCheckpointChallenge(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
syncmode downloader.SyncMode
|
|
|
|
checkpoint bool
|
|
|
|
timeout bool
|
|
|
|
empty bool
|
|
|
|
match bool
|
|
|
|
drop bool
|
|
|
|
}{
|
|
|
|
// If checkpointing is not enabled locally, don't challenge and don't drop
|
|
|
|
{downloader.FullSync, false, false, false, false, false},
|
|
|
|
{downloader.FastSync, false, false, false, false, false},
|
|
|
|
|
|
|
|
// If checkpointing is enabled locally and remote response is empty, only drop during fast sync
|
|
|
|
{downloader.FullSync, true, false, true, false, false},
|
|
|
|
{downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer
|
|
|
|
|
|
|
|
// If checkpointing is enabled locally and remote response mismatches, always drop
|
|
|
|
{downloader.FullSync, true, false, false, false, true},
|
|
|
|
{downloader.FastSync, true, false, false, false, true},
|
|
|
|
|
|
|
|
// If checkpointing is enabled locally and remote response matches, never drop
|
|
|
|
{downloader.FullSync, true, false, false, true, false},
|
|
|
|
{downloader.FastSync, true, false, false, true, false},
|
|
|
|
|
|
|
|
// If checkpointing is enabled locally and remote times out, always drop
|
|
|
|
{downloader.FullSync, true, true, false, true, true},
|
|
|
|
{downloader.FastSync, true, true, false, true, true},
|
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) {
|
|
|
|
testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop)
|
|
|
|
})
|
2016-07-08 20:59:11 +03:00
|
|
|
}
|
2019-04-16 13:20:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) {
|
|
|
|
// Reduce the checkpoint handshake challenge timeout
|
|
|
|
defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout)
|
|
|
|
syncChallengeTimeout = 250 * time.Millisecond
|
|
|
|
|
|
|
|
// Initialize a chain and generate a fake CHT if checkpointing is enabled
|
2016-07-08 20:59:11 +03:00
|
|
|
var (
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 10:34:02 +03:00
|
|
|
db = rawdb.NewMemoryDatabase()
|
|
|
|
config = new(params.ChainConfig)
|
2016-07-08 20:59:11 +03:00
|
|
|
)
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 10:34:02 +03:00
|
|
|
(&core.Genesis{Config: config}).MustCommit(db) // Commit genesis block
|
2019-04-16 13:20:38 +03:00
|
|
|
// If checkpointing is enabled, create and inject a fake CHT and the corresponding
|
|
|
|
// chllenge response.
|
|
|
|
var response *types.Header
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 10:34:02 +03:00
|
|
|
var cht *params.TrustedCheckpoint
|
2019-04-16 13:20:38 +03:00
|
|
|
if checkpoint {
|
|
|
|
index := uint64(rand.Intn(500))
|
|
|
|
number := (index+1)*params.CHTFrequency - 1
|
|
|
|
response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")}
|
|
|
|
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 10:34:02 +03:00
|
|
|
cht = ¶ms.TrustedCheckpoint{
|
2019-04-16 13:20:38 +03:00
|
|
|
SectionIndex: index,
|
|
|
|
SectionHead: response.Hash(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Create a checkpoint aware protocol manager
|
|
|
|
blockchain, err := core.NewBlockChain(db, nil, config, ethash.NewFaker(), vm.Config{}, nil)
|
2018-09-29 23:17:06 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create new blockchain: %v", err)
|
|
|
|
}
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 10:34:02 +03:00
|
|
|
pm, err := NewProtocolManager(config, cht, syncmode, DefaultConfig.NetworkId, new(event.TypeMux), new(testTxPool), ethash.NewFaker(), blockchain, db, 1, nil)
|
2016-07-08 20:59:11 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to start test protocol manager: %v", err)
|
|
|
|
}
|
2017-09-05 19:18:28 +03:00
|
|
|
pm.Start(1000)
|
2016-07-08 20:59:11 +03:00
|
|
|
defer pm.Stop()
|
|
|
|
|
2019-04-16 13:20:38 +03:00
|
|
|
// Connect a new peer and check that we receive the checkpoint challenge
|
2016-07-08 20:59:11 +03:00
|
|
|
peer, _ := newTestPeer("peer", eth63, pm, true)
|
|
|
|
defer peer.close()
|
|
|
|
|
2019-04-16 13:20:38 +03:00
|
|
|
if checkpoint {
|
|
|
|
challenge := &getBlockHeadersData{
|
|
|
|
Origin: hashOrNumber{Number: response.Number.Uint64()},
|
|
|
|
Amount: 1,
|
|
|
|
Skip: 0,
|
|
|
|
Reverse: false,
|
|
|
|
}
|
|
|
|
if err := p2p.ExpectMsg(peer.app, GetBlockHeadersMsg, challenge); err != nil {
|
|
|
|
t.Fatalf("challenge mismatch: %v", err)
|
|
|
|
}
|
|
|
|
// Create a block to reply to the challenge if no timeout is simulated
|
|
|
|
if !timeout {
|
|
|
|
if empty {
|
|
|
|
if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{}); err != nil {
|
|
|
|
t.Fatalf("failed to answer challenge: %v", err)
|
|
|
|
}
|
|
|
|
} else if match {
|
|
|
|
if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{response}); err != nil {
|
|
|
|
t.Fatalf("failed to answer challenge: %v", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{{Number: response.Number}}); err != nil {
|
|
|
|
t.Fatalf("failed to answer challenge: %v", err)
|
|
|
|
}
|
2016-07-08 20:59:11 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-04-16 13:20:38 +03:00
|
|
|
// Wait until the test timeout passes to ensure proper cleanup
|
|
|
|
time.Sleep(syncChallengeTimeout + 100*time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that the remote peer is maintained or dropped
|
|
|
|
if drop {
|
2016-07-08 20:59:11 +03:00
|
|
|
if peers := pm.peers.Len(); peers != 0 {
|
|
|
|
t.Fatalf("peer count mismatch: have %d, want %d", peers, 0)
|
|
|
|
}
|
2019-04-16 13:20:38 +03:00
|
|
|
} else {
|
|
|
|
if peers := pm.peers.Len(); peers != 1 {
|
|
|
|
t.Fatalf("peer count mismatch: have %d, want %d", peers, 1)
|
|
|
|
}
|
2016-07-08 20:59:11 +03:00
|
|
|
}
|
|
|
|
}
|
2018-09-29 23:17:06 +03:00
|
|
|
|
|
|
|
func TestBroadcastBlock(t *testing.T) {
|
|
|
|
var tests = []struct {
|
|
|
|
totalPeers int
|
|
|
|
broadcastExpected int
|
|
|
|
}{
|
|
|
|
{1, 1},
|
|
|
|
{2, 2},
|
|
|
|
{3, 3},
|
|
|
|
{4, 4},
|
|
|
|
{5, 4},
|
|
|
|
{9, 4},
|
|
|
|
{12, 4},
|
|
|
|
{16, 4},
|
|
|
|
{26, 5},
|
|
|
|
{100, 10},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
testBroadcastBlock(t, test.totalPeers, test.broadcastExpected)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
|
|
|
|
var (
|
|
|
|
evmux = new(event.TypeMux)
|
|
|
|
pow = ethash.NewFaker()
|
2018-09-24 15:57:49 +03:00
|
|
|
db = rawdb.NewMemoryDatabase()
|
2018-09-29 23:17:06 +03:00
|
|
|
config = ¶ms.ChainConfig{}
|
|
|
|
gspec = &core.Genesis{Config: config}
|
|
|
|
genesis = gspec.MustCommit(db)
|
|
|
|
)
|
|
|
|
blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create new blockchain: %v", err)
|
|
|
|
}
|
all: on-chain oracle checkpoint syncing (#19543)
* all: implement simple checkpoint syncing
cmd, les, node: remove callback mechanism
cmd, node: remove callback definition
les: simplify the registrar
les: expose checkpoint rpc services in the light client
les, light: don't store untrusted receipt
cmd, contracts, les: discard stale checkpoint
cmd, contracts/registrar: loose restriction of registeration
cmd, contracts: add replay-protection
all: off-chain multi-signature contract
params: deploy checkpoint contract for rinkeby
cmd/registrar: add raw signing mode for registrar
cmd/registrar, contracts/registrar, les: fixed messages
* cmd/registrar, contracts/registrar: fix lints
* accounts/abi/bind, les: address comments
* cmd, contracts, les, light, params: minor checkpoint sync cleanups
* cmd, eth, les, light: move checkpoint config to config file
* cmd, eth, les, params: address comments
* eth, les, params: address comments
* cmd: polish up the checkpoint admin CLI
* cmd, contracts, params: deploy new version contract
* cmd/checkpoint-admin: add another flag for clef mode signing
* cmd, contracts, les: rename and regen checkpoint oracle with abigen
2019-06-28 10:34:02 +03:00
|
|
|
pm, err := NewProtocolManager(config, nil, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db, 1, nil)
|
2018-09-29 23:17:06 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to start test protocol manager: %v", err)
|
|
|
|
}
|
|
|
|
pm.Start(1000)
|
|
|
|
defer pm.Stop()
|
|
|
|
var peers []*testPeer
|
|
|
|
for i := 0; i < totalPeers; i++ {
|
|
|
|
peer, _ := newTestPeer(fmt.Sprintf("peer %d", i), eth63, pm, true)
|
|
|
|
defer peer.close()
|
|
|
|
peers = append(peers, peer)
|
|
|
|
}
|
|
|
|
chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {})
|
|
|
|
pm.BroadcastBlock(chain[0], true /*propagate*/)
|
|
|
|
|
|
|
|
errCh := make(chan error, totalPeers)
|
|
|
|
doneCh := make(chan struct{}, totalPeers)
|
|
|
|
for _, peer := range peers {
|
|
|
|
go func(p *testPeer) {
|
|
|
|
if err := p2p.ExpectMsg(p.app, NewBlockMsg, &newBlockData{Block: chain[0], TD: big.NewInt(131136)}); err != nil {
|
|
|
|
errCh <- err
|
|
|
|
} else {
|
|
|
|
doneCh <- struct{}{}
|
|
|
|
}
|
|
|
|
}(peer)
|
|
|
|
}
|
2019-11-08 11:58:57 +03:00
|
|
|
timeout := time.After(time.Second)
|
2018-09-29 23:17:06 +03:00
|
|
|
var receivedCount int
|
|
|
|
outer:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err = <-errCh:
|
|
|
|
break outer
|
|
|
|
case <-doneCh:
|
|
|
|
receivedCount++
|
|
|
|
if receivedCount == totalPeers {
|
|
|
|
break outer
|
|
|
|
}
|
2018-11-23 12:14:09 +03:00
|
|
|
case <-timeout:
|
2018-09-29 23:17:06 +03:00
|
|
|
break outer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, peer := range peers {
|
|
|
|
peer.app.Close()
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("error matching block by peer: %v", err)
|
|
|
|
}
|
|
|
|
if receivedCount != broadcastExpected {
|
|
|
|
t.Errorf("block broadcast to %d peers, expected %d", receivedCount, broadcastExpected)
|
|
|
|
}
|
|
|
|
}
|