2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2014-12-05 23:14:55 +02:00
|
|
|
package eth
|
|
|
|
|
2015-06-09 13:10:19 +03:00
|
|
|
import (
|
2015-07-02 19:55:18 +03:00
|
|
|
"fmt"
|
2019-09-30 21:28:50 +03:00
|
|
|
"math/big"
|
2015-06-09 13:10:19 +03:00
|
|
|
"sync"
|
2019-10-28 14:59:07 +03:00
|
|
|
"sync/atomic"
|
2015-06-09 13:10:19 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2019-09-30 21:28:50 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
|
|
|
"github.com/ethereum/go-ethereum/core"
|
|
|
|
"github.com/ethereum/go-ethereum/core/forkid"
|
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-06-09 13:10:19 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2019-09-30 21:28:50 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2015-06-09 13:10:19 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2017-04-12 17:27:23 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
2019-09-30 21:28:50 +03:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2015-06-09 13:10:19 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
2019-09-30 21:28:50 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2015-07-02 19:55:18 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2015-06-09 13:10:19 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2017-02-23 21:31:13 +03:00
|
|
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
|
2015-06-09 13:10:19 +03:00
|
|
|
}
|
|
|
|
|
2016-03-02 14:57:15 +02:00
|
|
|
var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
2015-03-28 02:48:37 +02:00
|
|
|
|
2015-07-02 19:55:18 +03:00
|
|
|
// Tests that handshake failures are detected and reported correctly.
|
2019-09-30 21:28:50 +03:00
|
|
|
func TestStatusMsgErrors63(t *testing.T) {
|
2018-02-05 19:40:32 +03:00
|
|
|
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
|
2018-01-30 19:39:32 +03:00
|
|
|
var (
|
|
|
|
genesis = pm.blockchain.Genesis()
|
|
|
|
head = pm.blockchain.CurrentHeader()
|
|
|
|
td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
|
|
|
)
|
2015-06-09 13:10:19 +03:00
|
|
|
defer pm.Stop()
|
2015-01-05 18:10:42 +02:00
|
|
|
|
2015-03-19 16:18:31 +02:00
|
|
|
tests := []struct {
|
2015-06-09 13:10:19 +03:00
|
|
|
code uint64
|
|
|
|
data interface{}
|
|
|
|
wantError error
|
2015-03-19 16:18:31 +02:00
|
|
|
}{
|
|
|
|
{
|
2020-01-22 17:39:43 +03:00
|
|
|
code: TransactionMsg, data: []interface{}{},
|
2015-06-09 13:10:19 +03:00
|
|
|
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
|
2015-03-19 16:18:31 +02:00
|
|
|
},
|
|
|
|
{
|
2019-09-30 21:28:50 +03:00
|
|
|
code: StatusMsg, data: statusData63{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash()},
|
|
|
|
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 63),
|
2015-03-19 16:18:31 +02:00
|
|
|
},
|
|
|
|
{
|
2019-09-30 21:28:50 +03:00
|
|
|
code: StatusMsg, data: statusData63{63, 999, td, head.Hash(), genesis.Hash()},
|
|
|
|
wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId),
|
2015-03-19 16:18:31 +02:00
|
|
|
},
|
|
|
|
{
|
2019-09-30 21:28:50 +03:00
|
|
|
code: StatusMsg, data: statusData63{63, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}},
|
|
|
|
wantError: errResp(ErrGenesisMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]),
|
2015-03-19 16:18:31 +02:00
|
|
|
},
|
|
|
|
}
|
2019-09-30 21:28:50 +03:00
|
|
|
for i, test := range tests {
|
|
|
|
p, errc := newTestPeer("peer", 63, pm, false)
|
|
|
|
// The send call might hang until reset because
|
|
|
|
// the protocol might not read the payload.
|
|
|
|
go p2p.Send(p.app, test.code, test.data)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if err == nil {
|
|
|
|
t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError)
|
|
|
|
} else if err.Error() != test.wantError.Error() {
|
|
|
|
t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError)
|
|
|
|
}
|
|
|
|
case <-time.After(2 * time.Second):
|
|
|
|
t.Errorf("protocol did not shut down within 2 seconds")
|
|
|
|
}
|
|
|
|
p.close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStatusMsgErrors64(t *testing.T) {
|
|
|
|
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
|
|
|
|
var (
|
|
|
|
genesis = pm.blockchain.Genesis()
|
|
|
|
head = pm.blockchain.CurrentHeader()
|
|
|
|
td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
|
|
|
forkID = forkid.NewID(pm.blockchain)
|
|
|
|
)
|
|
|
|
defer pm.Stop()
|
2015-06-09 13:10:19 +03:00
|
|
|
|
2019-09-30 21:28:50 +03:00
|
|
|
tests := []struct {
|
|
|
|
code uint64
|
|
|
|
data interface{}
|
|
|
|
wantError error
|
|
|
|
}{
|
|
|
|
{
|
2020-01-22 17:39:43 +03:00
|
|
|
code: TransactionMsg, data: []interface{}{},
|
2019-09-30 21:28:50 +03:00
|
|
|
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
code: StatusMsg, data: statusData{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkID},
|
|
|
|
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 64),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
code: StatusMsg, data: statusData{64, 999, td, head.Hash(), genesis.Hash(), forkID},
|
|
|
|
wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}, forkID},
|
|
|
|
wantError: errResp(ErrGenesisMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis.Hash()),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}},
|
|
|
|
wantError: errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()),
|
|
|
|
},
|
|
|
|
}
|
2015-06-09 13:10:19 +03:00
|
|
|
for i, test := range tests {
|
2019-09-30 21:28:50 +03:00
|
|
|
p, errc := newTestPeer("peer", 64, pm, false)
|
2015-06-09 13:10:19 +03:00
|
|
|
// The send call might hang until reset because
|
2015-03-19 16:18:31 +02:00
|
|
|
// the protocol might not read the payload.
|
2015-07-02 19:55:18 +03:00
|
|
|
go p2p.Send(p.app, test.code, test.data)
|
2015-01-05 18:10:42 +02:00
|
|
|
|
2015-06-09 13:10:19 +03:00
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if err == nil {
|
2016-04-15 12:06:57 +03:00
|
|
|
t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError)
|
2015-06-09 13:10:19 +03:00
|
|
|
} else if err.Error() != test.wantError.Error() {
|
|
|
|
t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError)
|
|
|
|
}
|
|
|
|
case <-time.After(2 * time.Second):
|
2017-01-06 20:44:35 +03:00
|
|
|
t.Errorf("protocol did not shut down within 2 seconds")
|
2015-06-09 13:10:19 +03:00
|
|
|
}
|
|
|
|
p.close()
|
2015-03-19 16:18:31 +02:00
|
|
|
}
|
2014-12-05 23:14:55 +02:00
|
|
|
}
|
2015-03-28 02:48:37 +02:00
|
|
|
|
2019-09-30 21:28:50 +03:00
|
|
|
func TestForkIDSplit(t *testing.T) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
|
|
|
|
|
|
|
configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)}
|
|
|
|
configProFork = ¶ms.ChainConfig{
|
|
|
|
HomesteadBlock: big.NewInt(1),
|
|
|
|
EIP150Block: big.NewInt(2),
|
|
|
|
EIP155Block: big.NewInt(2),
|
|
|
|
EIP158Block: big.NewInt(2),
|
|
|
|
ByzantiumBlock: big.NewInt(3),
|
|
|
|
}
|
|
|
|
dbNoFork = rawdb.NewMemoryDatabase()
|
|
|
|
dbProFork = rawdb.NewMemoryDatabase()
|
|
|
|
|
|
|
|
gspecNoFork = &core.Genesis{Config: configNoFork}
|
|
|
|
gspecProFork = &core.Genesis{Config: configProFork}
|
|
|
|
|
|
|
|
genesisNoFork = gspecNoFork.MustCommit(dbNoFork)
|
|
|
|
genesisProFork = gspecProFork.MustCommit(dbProFork)
|
|
|
|
|
|
|
|
chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil)
|
|
|
|
chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil)
|
|
|
|
|
|
|
|
blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
|
|
|
|
blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
|
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainNoFork, dbNoFork, 1, nil)
|
|
|
|
ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainProFork, dbProFork, 1, nil)
|
2019-09-30 21:28:50 +03:00
|
|
|
)
|
|
|
|
ethNoFork.Start(1000)
|
|
|
|
ethProFork.Start(1000)
|
|
|
|
|
|
|
|
// Both nodes should allow the other to connect (same genesis, next fork is the same)
|
|
|
|
p2pNoFork, p2pProFork := p2p.MsgPipe()
|
2019-10-28 14:59:07 +03:00
|
|
|
peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
|
|
|
|
peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
|
2019-09-30 21:28:50 +03:00
|
|
|
|
|
|
|
errc := make(chan error, 2)
|
|
|
|
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
|
|
|
go func() { errc <- ethProFork.handle(peerNoFork) }()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("frontier nofork <-> profork failed: %v", err)
|
|
|
|
case <-time.After(250 * time.Millisecond):
|
|
|
|
p2pNoFork.Close()
|
|
|
|
p2pProFork.Close()
|
|
|
|
}
|
|
|
|
// Progress into Homestead. Fork's match, so we don't care what the future holds
|
|
|
|
chainNoFork.InsertChain(blocksNoFork[:1])
|
|
|
|
chainProFork.InsertChain(blocksProFork[:1])
|
|
|
|
|
|
|
|
p2pNoFork, p2pProFork = p2p.MsgPipe()
|
2019-10-28 14:59:07 +03:00
|
|
|
peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
|
|
|
|
peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
|
2019-09-30 21:28:50 +03:00
|
|
|
|
|
|
|
errc = make(chan error, 2)
|
|
|
|
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
|
|
|
go func() { errc <- ethProFork.handle(peerNoFork) }()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("homestead nofork <-> profork failed: %v", err)
|
|
|
|
case <-time.After(250 * time.Millisecond):
|
|
|
|
p2pNoFork.Close()
|
|
|
|
p2pProFork.Close()
|
|
|
|
}
|
|
|
|
// Progress into Spurious. Forks mismatch, signalling differing chains, reject
|
|
|
|
chainNoFork.InsertChain(blocksNoFork[1:2])
|
|
|
|
chainProFork.InsertChain(blocksProFork[1:2])
|
|
|
|
|
|
|
|
p2pNoFork, p2pProFork = p2p.MsgPipe()
|
2019-10-28 14:59:07 +03:00
|
|
|
peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
|
|
|
|
peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
|
2019-09-30 21:28:50 +03:00
|
|
|
|
|
|
|
errc = make(chan error, 2)
|
|
|
|
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
|
|
|
go func() { errc <- ethProFork.handle(peerNoFork) }()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if want := errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()); err.Error() != want.Error() {
|
|
|
|
t.Fatalf("fork ID rejection error mismatch: have %v, want %v", err, want)
|
|
|
|
}
|
|
|
|
case <-time.After(250 * time.Millisecond):
|
|
|
|
t.Fatalf("split peers not rejected")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-09 13:10:19 +03:00
|
|
|
// This test checks that received transactions are added to the local pool.
|
2015-07-02 19:55:18 +03:00
|
|
|
func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
|
2019-09-30 21:28:50 +03:00
|
|
|
func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
|
2019-10-28 14:59:07 +03:00
|
|
|
func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) }
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
func testRecvTransactions(t *testing.T, protocol int) {
|
2015-06-09 13:10:19 +03:00
|
|
|
txAdded := make(chan []*types.Transaction)
|
2018-02-05 19:40:32 +03:00
|
|
|
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, txAdded)
|
2017-04-10 11:43:01 +03:00
|
|
|
pm.acceptTxs = 1 // mark synced to accept transactions
|
2015-07-02 19:55:18 +03:00
|
|
|
p, _ := newTestPeer("peer", protocol, pm, true)
|
2015-06-09 13:10:19 +03:00
|
|
|
defer pm.Stop()
|
|
|
|
defer p.close()
|
2015-03-28 02:48:37 +02:00
|
|
|
|
2015-07-02 19:55:18 +03:00
|
|
|
tx := newTestTransaction(testAccount, 0, 0)
|
2020-01-22 17:39:43 +03:00
|
|
|
if err := p2p.Send(p.app, TransactionMsg, []interface{}{tx}); err != nil {
|
2015-06-09 13:10:19 +03:00
|
|
|
t.Fatalf("send error: %v", err)
|
2015-03-28 02:48:37 +02:00
|
|
|
}
|
|
|
|
select {
|
2015-06-09 13:10:19 +03:00
|
|
|
case added := <-txAdded:
|
|
|
|
if len(added) != 1 {
|
|
|
|
t.Errorf("wrong number of added transactions: got %d, want 1", len(added))
|
|
|
|
} else if added[0].Hash() != tx.Hash() {
|
|
|
|
t.Errorf("added wrong tx hash: got %v, want %v", added[0].Hash(), tx.Hash())
|
2015-03-28 02:48:37 +02:00
|
|
|
}
|
2015-06-09 13:10:19 +03:00
|
|
|
case <-time.After(2 * time.Second):
|
2018-05-18 11:45:52 +03:00
|
|
|
t.Errorf("no NewTxsEvent received within 2 seconds")
|
2015-03-28 02:48:37 +02:00
|
|
|
}
|
|
|
|
}
|
2015-03-28 03:54:23 +02:00
|
|
|
|
2015-06-09 13:10:19 +03:00
|
|
|
// This test checks that pending transactions are sent.
|
2015-07-02 19:55:18 +03:00
|
|
|
func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
|
2019-09-30 21:28:50 +03:00
|
|
|
func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
|
2019-10-28 14:59:07 +03:00
|
|
|
func TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) }
|
2015-07-02 19:55:18 +03:00
|
|
|
|
|
|
|
func testSendTransactions(t *testing.T, protocol int) {
|
2018-02-05 19:40:32 +03:00
|
|
|
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
|
2015-06-09 13:10:19 +03:00
|
|
|
defer pm.Stop()
|
2015-03-28 03:54:23 +02:00
|
|
|
|
2020-01-22 17:39:43 +03:00
|
|
|
// Fill the pool with big transactions (use a subscription to wait until all
|
|
|
|
// the transactions are announced to avoid spurious events causing extra
|
|
|
|
// broadcasts).
|
2015-06-09 13:10:19 +03:00
|
|
|
const txsize = txsyncPackSize / 10
|
|
|
|
alltxs := make([]*types.Transaction, 100)
|
|
|
|
for nonce := range alltxs {
|
2015-07-02 19:55:18 +03:00
|
|
|
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize)
|
2015-03-28 03:54:23 +02:00
|
|
|
}
|
2017-07-05 16:51:55 +03:00
|
|
|
pm.txpool.AddRemotes(alltxs)
|
2020-01-22 17:39:43 +03:00
|
|
|
time.Sleep(100 * time.Millisecond) // Wait until new tx even gets out of the system (lame)
|
2015-03-28 03:54:23 +02:00
|
|
|
|
2015-06-09 13:10:19 +03:00
|
|
|
// Connect several peers. They should all receive the pending transactions.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
checktxs := func(p *testPeer) {
|
|
|
|
defer wg.Done()
|
|
|
|
defer p.close()
|
|
|
|
seen := make(map[common.Hash]bool)
|
|
|
|
for _, tx := range alltxs {
|
|
|
|
seen[tx.Hash()] = false
|
|
|
|
}
|
|
|
|
for n := 0; n < len(alltxs) && !t.Failed(); {
|
2019-10-28 14:59:07 +03:00
|
|
|
var forAllHashes func(callback func(hash common.Hash))
|
|
|
|
switch protocol {
|
|
|
|
case 63:
|
|
|
|
fallthrough
|
|
|
|
case 64:
|
|
|
|
msg, err := p.app.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("%v: read error: %v", p.Peer, err)
|
2020-01-22 17:39:43 +03:00
|
|
|
continue
|
|
|
|
} else if msg.Code != TransactionMsg {
|
2019-10-28 14:59:07 +03:00
|
|
|
t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code)
|
2020-01-22 17:39:43 +03:00
|
|
|
continue
|
2019-10-28 14:59:07 +03:00
|
|
|
}
|
2020-01-22 17:39:43 +03:00
|
|
|
var txs []*types.Transaction
|
2019-10-28 14:59:07 +03:00
|
|
|
if err := msg.Decode(&txs); err != nil {
|
|
|
|
t.Errorf("%v: %v", p.Peer, err)
|
2020-01-22 17:39:43 +03:00
|
|
|
continue
|
2019-10-28 14:59:07 +03:00
|
|
|
}
|
|
|
|
forAllHashes = func(callback func(hash common.Hash)) {
|
|
|
|
for _, tx := range txs {
|
|
|
|
callback(tx.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case 65:
|
|
|
|
msg, err := p.app.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("%v: read error: %v", p.Peer, err)
|
2020-01-22 17:39:43 +03:00
|
|
|
continue
|
2019-10-28 14:59:07 +03:00
|
|
|
} else if msg.Code != NewPooledTransactionHashesMsg {
|
2020-01-22 17:39:43 +03:00
|
|
|
t.Errorf("%v: got code %d, want NewPooledTransactionHashesMsg", p.Peer, msg.Code)
|
|
|
|
continue
|
2019-10-28 14:59:07 +03:00
|
|
|
}
|
2020-01-22 17:39:43 +03:00
|
|
|
var hashes []common.Hash
|
2019-10-28 14:59:07 +03:00
|
|
|
if err := msg.Decode(&hashes); err != nil {
|
|
|
|
t.Errorf("%v: %v", p.Peer, err)
|
2020-01-22 17:39:43 +03:00
|
|
|
continue
|
2019-10-28 14:59:07 +03:00
|
|
|
}
|
|
|
|
forAllHashes = func(callback func(hash common.Hash)) {
|
|
|
|
for _, h := range hashes {
|
|
|
|
callback(h)
|
|
|
|
}
|
|
|
|
}
|
2015-03-28 03:54:23 +02:00
|
|
|
}
|
2019-10-28 14:59:07 +03:00
|
|
|
forAllHashes(func(hash common.Hash) {
|
2015-06-09 13:10:19 +03:00
|
|
|
seentx, want := seen[hash]
|
|
|
|
if seentx {
|
|
|
|
t.Errorf("%v: got tx more than once: %x", p.Peer, hash)
|
|
|
|
}
|
|
|
|
if !want {
|
|
|
|
t.Errorf("%v: got unexpected tx: %x", p.Peer, hash)
|
|
|
|
}
|
|
|
|
seen[hash] = true
|
|
|
|
n++
|
2019-10-28 14:59:07 +03:00
|
|
|
})
|
2015-03-28 03:54:23 +02:00
|
|
|
}
|
|
|
|
}
|
2015-06-09 13:10:19 +03:00
|
|
|
for i := 0; i < 3; i++ {
|
2015-07-02 19:55:18 +03:00
|
|
|
p, _ := newTestPeer(fmt.Sprintf("peer #%d", i), protocol, pm, true)
|
2015-06-09 13:10:19 +03:00
|
|
|
wg.Add(1)
|
|
|
|
go checktxs(p)
|
2015-03-30 15:48:07 +03:00
|
|
|
}
|
2015-06-09 13:10:19 +03:00
|
|
|
wg.Wait()
|
|
|
|
}
|
2015-03-30 15:48:07 +03:00
|
|
|
|
2019-10-28 14:59:07 +03:00
|
|
|
func TestTransactionPropagation(t *testing.T) { testSyncTransaction(t, true) }
|
|
|
|
func TestTransactionAnnouncement(t *testing.T) { testSyncTransaction(t, false) }
|
|
|
|
|
|
|
|
func testSyncTransaction(t *testing.T, propagtion bool) {
|
|
|
|
// Create a protocol manager for transaction fetcher and sender
|
|
|
|
pmFetcher, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
|
|
|
|
defer pmFetcher.Stop()
|
|
|
|
pmSender, _ := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil)
|
|
|
|
pmSender.broadcastTxAnnouncesOnly = !propagtion
|
|
|
|
defer pmSender.Stop()
|
|
|
|
|
|
|
|
// Sync up the two peers
|
|
|
|
io1, io2 := p2p.MsgPipe()
|
|
|
|
|
|
|
|
go pmSender.handle(pmSender.newPeer(65, p2p.NewPeer(enode.ID{}, "sender", nil), io2, pmSender.txpool.Get))
|
|
|
|
go pmFetcher.handle(pmFetcher.newPeer(65, p2p.NewPeer(enode.ID{}, "fetcher", nil), io1, pmFetcher.txpool.Get))
|
|
|
|
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
2020-03-27 16:03:20 +03:00
|
|
|
pmFetcher.doSync(peerToSyncOp(downloader.FullSync, pmFetcher.peers.BestPeer()))
|
2019-10-28 14:59:07 +03:00
|
|
|
atomic.StoreUint32(&pmFetcher.acceptTxs, 1)
|
|
|
|
|
|
|
|
newTxs := make(chan core.NewTxsEvent, 1024)
|
|
|
|
sub := pmFetcher.txpool.SubscribeNewTxsEvent(newTxs)
|
|
|
|
defer sub.Unsubscribe()
|
|
|
|
|
|
|
|
// Fill the pool with new transactions
|
|
|
|
alltxs := make([]*types.Transaction, 1024)
|
|
|
|
for nonce := range alltxs {
|
|
|
|
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), 0)
|
|
|
|
}
|
|
|
|
pmSender.txpool.AddRemotes(alltxs)
|
|
|
|
|
|
|
|
var got int
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ev := <-newTxs:
|
|
|
|
got += len(ev.Txs)
|
|
|
|
if got == 1024 {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
case <-time.NewTimer(time.Second).C:
|
|
|
|
t.Fatal("Failed to retrieve all transaction")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-02 19:55:18 +03:00
|
|
|
// Tests that the custom union field encoder and decoder works correctly.
|
|
|
|
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
|
|
|
|
// Create a "random" hash for testing
|
|
|
|
var hash common.Hash
|
2017-01-06 17:52:03 +03:00
|
|
|
for i := range hash {
|
2015-07-02 19:55:18 +03:00
|
|
|
hash[i] = byte(i)
|
2015-06-09 13:10:19 +03:00
|
|
|
}
|
2015-07-02 19:55:18 +03:00
|
|
|
// Assemble some table driven tests
|
|
|
|
tests := []struct {
|
|
|
|
packet *getBlockHeadersData
|
|
|
|
fail bool
|
|
|
|
}{
|
|
|
|
// Providing the origin as either a hash or a number should both work
|
|
|
|
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}}},
|
|
|
|
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}}},
|
2015-03-30 17:21:41 +03:00
|
|
|
|
2015-07-02 19:55:18 +03:00
|
|
|
// Providing arbitrary query field should also work
|
|
|
|
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
|
|
|
|
{fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
|
2015-06-09 13:10:19 +03:00
|
|
|
|
2015-07-02 19:55:18 +03:00
|
|
|
// Providing both the origin hash and origin number must fail
|
|
|
|
{fail: true, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash, Number: 314}}},
|
|
|
|
}
|
|
|
|
// Iterate over each of the tests and try to encode and then decode
|
|
|
|
for i, tt := range tests {
|
|
|
|
bytes, err := rlp.EncodeToBytes(tt.packet)
|
|
|
|
if err != nil && !tt.fail {
|
|
|
|
t.Fatalf("test %d: failed to encode packet: %v", i, err)
|
|
|
|
} else if err == nil && tt.fail {
|
|
|
|
t.Fatalf("test %d: encode should have failed", i)
|
|
|
|
}
|
|
|
|
if !tt.fail {
|
|
|
|
packet := new(getBlockHeadersData)
|
|
|
|
if err := rlp.DecodeBytes(bytes, packet); err != nil {
|
|
|
|
t.Fatalf("test %d: failed to decode packet: %v", i, err)
|
|
|
|
}
|
|
|
|
if packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount ||
|
|
|
|
packet.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse {
|
|
|
|
t.Fatalf("test %d: encode decode mismatch: have %+v, want %+v", i, packet, tt.packet)
|
|
|
|
}
|
|
|
|
}
|
2015-03-30 17:21:41 +03:00
|
|
|
}
|
|
|
|
}
|