2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2015-04-12 13:38:25 +03:00
|
|
|
package downloader
|
|
|
|
|
|
|
|
import (
|
2015-06-11 18:13:13 +03:00
|
|
|
"fmt"
|
2015-04-12 13:38:25 +03:00
|
|
|
"math/big"
|
2021-11-26 14:26:03 +03:00
|
|
|
"os"
|
2018-11-12 16:18:56 +03:00
|
|
|
"strings"
|
2015-09-09 19:02:54 +03:00
|
|
|
"sync"
|
2015-06-12 13:35:29 +03:00
|
|
|
"sync/atomic"
|
2015-04-12 13:38:25 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-04-16 13:20:38 +03:00
|
|
|
"github.com/ethereum/go-ethereum"
|
2015-04-12 13:38:25 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2021-11-26 14:26:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2018-09-24 15:57:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-04-12 13:38:25 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2021-11-26 14:26:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2021-04-08 18:06:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
2021-11-26 14:26:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
2015-05-15 13:26:34 +03:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2021-11-26 14:26:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/params"
|
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2015-10-05 19:37:56 +03:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2015-04-12 13:38:25 +03:00
|
|
|
)
|
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// downloadTester is a test simulator for mocking out local block chain.
|
|
|
|
type downloadTester struct {
|
2021-11-26 14:26:03 +03:00
|
|
|
freezer string
|
|
|
|
chain *core.BlockChain
|
2016-10-31 14:55:12 +03:00
|
|
|
downloader *Downloader
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
peers map[string]*downloadTesterPeer
|
|
|
|
lock sync.RWMutex
|
2016-10-31 14:55:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// newTester creates a new downloader test mocker.
|
2022-04-08 16:44:55 +03:00
|
|
|
func newTester(t *testing.T) *downloadTester {
|
|
|
|
return newTesterWithNotification(t, nil)
|
2022-04-04 10:10:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// newTester creates a new downloader test mocker.
|
2022-04-08 16:44:55 +03:00
|
|
|
func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
|
|
|
|
freezer := t.TempDir()
|
2023-09-07 11:39:29 +03:00
|
|
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false)
|
2021-11-26 14:26:03 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
t.Cleanup(func() {
|
|
|
|
db.Close()
|
|
|
|
})
|
2022-08-30 19:22:28 +03:00
|
|
|
gspec := &core.Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
2022-08-09 12:44:39 +03:00
|
|
|
Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-08-30 19:22:28 +03:00
|
|
|
chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2021-11-26 14:26:03 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-10-31 14:55:12 +03:00
|
|
|
tester := &downloadTester{
|
2021-11-26 14:26:03 +03:00
|
|
|
freezer: freezer,
|
|
|
|
chain: chain,
|
|
|
|
peers: make(map[string]*downloadTesterPeer),
|
|
|
|
}
|
2023-08-23 12:46:08 +03:00
|
|
|
tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer)
|
2016-10-31 14:55:12 +03:00
|
|
|
return tester
|
2016-02-23 12:32:09 +02:00
|
|
|
}
|
|
|
|
|
2016-06-01 18:07:25 +03:00
|
|
|
// terminate aborts any operations on the embedded downloader and releases all
|
|
|
|
// held resources.
|
|
|
|
func (dl *downloadTester) terminate() {
|
|
|
|
dl.downloader.Terminate()
|
2021-11-26 14:26:03 +03:00
|
|
|
dl.chain.Stop()
|
|
|
|
|
|
|
|
os.RemoveAll(dl.freezer)
|
2016-06-01 18:07:25 +03:00
|
|
|
}
|
|
|
|
|
2015-06-11 20:22:40 +03:00
|
|
|
// sync starts synchronizing with a remote peer, blocking until it completes.
|
2015-10-13 12:04:25 +03:00
|
|
|
func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
|
2021-11-26 14:26:03 +03:00
|
|
|
head := dl.peers[id].chain.CurrentBlock()
|
2015-07-29 13:20:54 +03:00
|
|
|
if td == nil {
|
2021-11-26 14:26:03 +03:00
|
|
|
// If no particular TD was requested, load from the peer's blockchain
|
2023-03-02 09:29:15 +03:00
|
|
|
td = dl.peers[id].chain.GetTd(head.Hash(), head.Number.Uint64())
|
2015-07-29 13:20:54 +03:00
|
|
|
}
|
2016-05-30 12:01:50 +03:00
|
|
|
// Synchronise with the chosen peer and ensure proper cleanup afterwards
|
2022-03-11 15:14:45 +03:00
|
|
|
err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil)
|
2016-05-30 12:01:50 +03:00
|
|
|
select {
|
|
|
|
case <-dl.downloader.cancelCh:
|
|
|
|
// Ok, downloader fully cancelled after sync cycle
|
|
|
|
default:
|
|
|
|
// Downloader is still accepting packets, can block a peer up
|
|
|
|
panic("downloader active post sync cycle") // panic will be caught by tester
|
|
|
|
}
|
|
|
|
return err
|
2015-05-03 17:09:10 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// newPeer registers a new block download source into the downloader.
|
|
|
|
func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer {
|
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
2018-11-16 14:15:05 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
peer := &downloadTesterPeer{
|
|
|
|
dl: dl,
|
|
|
|
id: id,
|
|
|
|
chain: newTestBlockchain(blocks),
|
|
|
|
withholdHeaders: make(map[common.Hash]struct{}),
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
dl.peers[id] = peer
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
if err := dl.downloader.RegisterPeer(id, version, peer); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if err := dl.downloader.SnapSyncer.Register(peer); err != nil {
|
|
|
|
panic(err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
return peer
|
2015-04-12 13:38:25 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// dropPeer simulates a hard peer removal from the connection pool.
|
|
|
|
func (dl *downloadTester) dropPeer(id string) {
|
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
delete(dl.peers, id)
|
|
|
|
dl.downloader.SnapSyncer.Unregister(id)
|
|
|
|
dl.downloader.UnregisterPeer(id)
|
2015-06-11 17:14:45 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
type downloadTesterPeer struct {
|
|
|
|
dl *downloadTester
|
|
|
|
id string
|
|
|
|
chain *core.BlockChain
|
2015-09-28 19:27:31 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
withholdHeaders map[common.Hash]struct{}
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
|
2023-02-09 17:01:44 +03:00
|
|
|
func (dlp *downloadTesterPeer) MarkLagging() {
|
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// Head constructs a function to retrieve a peer's current head hash
|
|
|
|
// and total difficulty.
|
|
|
|
func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
|
|
|
|
head := dlp.chain.CurrentBlock()
|
2023-03-02 09:29:15 +03:00
|
|
|
return head.Hash(), dlp.chain.GetTd(head.Hash(), head.Number.Uint64())
|
2021-11-26 14:26:03 +03:00
|
|
|
}
|
2015-09-09 19:02:54 +03:00
|
|
|
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
|
|
|
|
var headers = make([]*types.Header, len(rlpdata))
|
|
|
|
for i, data := range rlpdata {
|
|
|
|
var h types.Header
|
|
|
|
if err := rlp.DecodeBytes(data, &h); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
headers[i] = &h
|
|
|
|
}
|
|
|
|
return headers
|
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
|
|
|
|
// origin; associated with a particular peer in the download tester. The returned
|
|
|
|
// function can be used to retrieve batches of headers from the particular peer.
|
|
|
|
func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
|
|
|
// Service the header query via the live handler code
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{
|
2021-11-26 14:26:03 +03:00
|
|
|
Origin: eth.HashOrNumber{
|
|
|
|
Hash: origin,
|
|
|
|
},
|
|
|
|
Amount: uint64(amount),
|
|
|
|
Skip: uint64(skip),
|
|
|
|
Reverse: reverse,
|
|
|
|
}, nil)
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
headers := unmarshalRlpHeaders(rlpHeaders)
|
2021-11-26 14:26:03 +03:00
|
|
|
// If a malicious peer is simulated withholding headers, delete them
|
|
|
|
for hash := range dlp.withholdHeaders {
|
|
|
|
for i, header := range headers {
|
|
|
|
if header.Hash() == hash {
|
|
|
|
headers = append(headers[:i], headers[i+1:]...)
|
|
|
|
break
|
2015-10-05 19:37:56 +03:00
|
|
|
}
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
}
|
2021-12-01 21:18:12 +03:00
|
|
|
hashes := make([]common.Hash, len(headers))
|
|
|
|
for i, header := range headers {
|
|
|
|
hashes[i] = header.Hash()
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Deliver the headers to the downloader
|
|
|
|
req := ð.Request{
|
|
|
|
Peer: dlp.id,
|
|
|
|
}
|
|
|
|
res := ð.Response{
|
|
|
|
Req: req,
|
|
|
|
Res: (*eth.BlockHeadersPacket)(&headers),
|
2021-12-01 21:18:12 +03:00
|
|
|
Meta: hashes,
|
2021-11-26 14:26:03 +03:00
|
|
|
Time: 1,
|
|
|
|
Done: make(chan error, 1), // Ignore the returned status
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
sink <- res
|
|
|
|
}()
|
|
|
|
return req, nil
|
2015-06-30 19:05:06 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
|
|
|
|
// origin; associated with a particular peer in the download tester. The returned
|
|
|
|
// function can be used to retrieve batches of headers from the particular peer.
|
|
|
|
func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
|
|
|
// Service the header query via the live handler code
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{
|
2021-11-26 14:26:03 +03:00
|
|
|
Origin: eth.HashOrNumber{
|
|
|
|
Number: origin,
|
|
|
|
},
|
|
|
|
Amount: uint64(amount),
|
|
|
|
Skip: uint64(skip),
|
|
|
|
Reverse: reverse,
|
|
|
|
}, nil)
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
headers := unmarshalRlpHeaders(rlpHeaders)
|
2021-11-26 14:26:03 +03:00
|
|
|
// If a malicious peer is simulated withholding headers, delete them
|
|
|
|
for hash := range dlp.withholdHeaders {
|
|
|
|
for i, header := range headers {
|
|
|
|
if header.Hash() == hash {
|
|
|
|
headers = append(headers[:i], headers[i+1:]...)
|
|
|
|
break
|
|
|
|
}
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
|
|
|
}
|
2021-12-01 21:18:12 +03:00
|
|
|
hashes := make([]common.Hash, len(headers))
|
|
|
|
for i, header := range headers {
|
|
|
|
hashes[i] = header.Hash()
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Deliver the headers to the downloader
|
|
|
|
req := ð.Request{
|
|
|
|
Peer: dlp.id,
|
2015-10-05 19:37:56 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
res := ð.Response{
|
|
|
|
Req: req,
|
|
|
|
Res: (*eth.BlockHeadersPacket)(&headers),
|
2021-12-01 21:18:12 +03:00
|
|
|
Meta: hashes,
|
2021-11-26 14:26:03 +03:00
|
|
|
Time: 1,
|
|
|
|
Done: make(chan error, 1), // Ignore the returned status
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
go func() {
|
|
|
|
sink <- res
|
|
|
|
}()
|
|
|
|
return req, nil
|
2015-09-07 20:43:01 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestBodies constructs a getBlockBodies method associated with a particular
|
|
|
|
// peer in the download tester. The returned function can be used to retrieve
|
|
|
|
// batches of block bodies from the particularly requested peer.
|
|
|
|
func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
|
|
|
blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes)
|
2020-06-09 12:39:19 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
bodies := make([]*eth.BlockBody, len(blobs))
|
|
|
|
for i, blob := range blobs {
|
|
|
|
bodies[i] = new(eth.BlockBody)
|
|
|
|
rlp.DecodeBytes(blob, bodies[i])
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-12-01 21:18:12 +03:00
|
|
|
var (
|
2023-01-25 17:32:25 +03:00
|
|
|
txsHashes = make([]common.Hash, len(bodies))
|
|
|
|
uncleHashes = make([]common.Hash, len(bodies))
|
|
|
|
withdrawalHashes = make([]common.Hash, len(bodies))
|
2021-12-01 21:18:12 +03:00
|
|
|
)
|
|
|
|
hasher := trie.NewStackTrie(nil)
|
|
|
|
for i, body := range bodies {
|
|
|
|
txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
|
|
|
|
uncleHashes[i] = types.CalcUncleHash(body.Uncles)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
req := ð.Request{
|
|
|
|
Peer: dlp.id,
|
2015-06-12 13:35:29 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
res := ð.Response{
|
|
|
|
Req: req,
|
|
|
|
Res: (*eth.BlockBodiesPacket)(&bodies),
|
2023-01-25 17:32:25 +03:00
|
|
|
Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
|
2021-11-26 14:26:03 +03:00
|
|
|
Time: 1,
|
|
|
|
Done: make(chan error, 1), // Ignore the returned status
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
go func() {
|
|
|
|
sink <- res
|
|
|
|
}()
|
|
|
|
return req, nil
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestReceipts constructs a getReceipts method associated with a particular
|
|
|
|
// peer in the download tester. The returned function can be used to retrieve
|
|
|
|
// batches of block receipts from the particularly requested peer.
|
|
|
|
func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
|
|
|
blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes)
|
2015-10-09 16:21:47 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
receipts := make([][]*types.Receipt, len(blobs))
|
|
|
|
for i, blob := range blobs {
|
|
|
|
rlp.DecodeBytes(blob, &receipts[i])
|
2020-08-20 13:01:24 +03:00
|
|
|
}
|
2021-12-01 21:18:12 +03:00
|
|
|
hasher := trie.NewStackTrie(nil)
|
|
|
|
hashes = make([]common.Hash, len(receipts))
|
|
|
|
for i, receipt := range receipts {
|
|
|
|
hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
req := ð.Request{
|
|
|
|
Peer: dlp.id,
|
2020-08-20 13:01:24 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
res := ð.Response{
|
|
|
|
Req: req,
|
|
|
|
Res: (*eth.ReceiptsPacket)(&receipts),
|
2021-12-01 21:18:12 +03:00
|
|
|
Meta: hashes,
|
2021-11-26 14:26:03 +03:00
|
|
|
Time: 1,
|
|
|
|
Done: make(chan error, 1), // Ignore the returned status
|
2020-08-20 13:01:24 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
go func() {
|
|
|
|
sink <- res
|
|
|
|
}()
|
|
|
|
return req, nil
|
2015-10-09 16:21:47 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// ID retrieves the peer's unique identifier.
|
|
|
|
func (dlp *downloadTesterPeer) ID() string {
|
|
|
|
return dlp.id
|
2015-04-12 13:38:25 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestAccountRange fetches a batch of accounts rooted in a specific account
|
|
|
|
// trie, starting with the origin.
|
|
|
|
func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
|
|
|
|
// Create the request and service it
|
|
|
|
req := &snap.GetAccountRangePacket{
|
|
|
|
ID: id,
|
|
|
|
Root: root,
|
|
|
|
Origin: origin,
|
|
|
|
Limit: limit,
|
|
|
|
Bytes: bytes,
|
|
|
|
}
|
|
|
|
slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req)
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// We need to convert to non-slim format, delegate to the packet code
|
|
|
|
res := &snap.AccountRangePacket{
|
|
|
|
ID: id,
|
|
|
|
Accounts: slimaccs,
|
|
|
|
Proof: proofs,
|
|
|
|
}
|
|
|
|
hashes, accounts, _ := res.Unpack()
|
2015-06-11 18:13:13 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs)
|
2021-04-29 17:33:45 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestStorageRanges fetches a batch of storage slots belonging to one or
|
2022-08-19 09:00:21 +03:00
|
|
|
// more accounts. If slots from only one account is requested, an origin marker
|
2021-11-26 14:26:03 +03:00
|
|
|
// may also be used to retrieve from there.
|
|
|
|
func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
|
|
|
|
// Create the request and service it
|
|
|
|
req := &snap.GetStorageRangesPacket{
|
|
|
|
ID: id,
|
|
|
|
Accounts: accounts,
|
|
|
|
Root: root,
|
|
|
|
Origin: origin,
|
|
|
|
Limit: limit,
|
|
|
|
Bytes: bytes,
|
|
|
|
}
|
|
|
|
storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req)
|
2016-07-25 15:14:14 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// We need to convert to demultiplex, delegate to the packet code
|
|
|
|
res := &snap.StorageRangesPacket{
|
|
|
|
ID: id,
|
|
|
|
Slots: storage,
|
|
|
|
Proof: proofs,
|
|
|
|
}
|
|
|
|
hashes, slots := res.Unpack()
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs)
|
2017-06-28 15:25:08 +03:00
|
|
|
return nil
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestByteCodes fetches a batch of bytecodes by hash.
|
|
|
|
func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
|
|
|
|
req := &snap.GetByteCodesPacket{
|
|
|
|
ID: id,
|
|
|
|
Hashes: hashes,
|
|
|
|
Bytes: bytes,
|
|
|
|
}
|
|
|
|
codes := snap.ServiceGetByteCodesQuery(dlp.chain, req)
|
|
|
|
go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes)
|
2017-06-28 15:25:08 +03:00
|
|
|
return nil
|
2015-04-12 13:38:25 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
|
2022-08-19 09:00:21 +03:00
|
|
|
// a specific state trie.
|
2021-11-26 14:26:03 +03:00
|
|
|
func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error {
|
|
|
|
req := &snap.GetTrieNodesPacket{
|
|
|
|
ID: id,
|
|
|
|
Root: root,
|
|
|
|
Paths: paths,
|
|
|
|
Bytes: bytes,
|
|
|
|
}
|
|
|
|
nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now())
|
|
|
|
go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes)
|
2017-06-28 15:25:08 +03:00
|
|
|
return nil
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// Log retrieves the peer's own contextual logger.
|
|
|
|
func (dlp *downloadTesterPeer) Log() log.Logger {
|
|
|
|
return log.New("peer", dlp.id)
|
2015-10-05 19:37:56 +03:00
|
|
|
}
|
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// assertOwnChain checks if the local chain contains the correct number of items
|
|
|
|
// of the various chain components.
|
|
|
|
func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
|
2018-11-16 14:15:05 +03:00
|
|
|
// Mark this method as a helper to report errors at callsite, not in here
|
|
|
|
t.Helper()
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
headers, blocks, receipts := length, length, length
|
2020-06-30 20:43:29 +03:00
|
|
|
if tester.downloader.getMode() == LightSync {
|
2018-02-05 19:40:32 +03:00
|
|
|
blocks, receipts = 1, 1
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers {
|
2015-09-28 19:27:31 +03:00
|
|
|
t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
|
|
|
|
}
|
2023-03-02 09:29:15 +03:00
|
|
|
if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != blocks {
|
2015-09-28 19:27:31 +03:00
|
|
|
t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
|
|
|
|
}
|
2023-03-02 09:29:15 +03:00
|
|
|
if rs := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1; rs != receipts {
|
2018-02-05 19:40:32 +03:00
|
|
|
t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) }
|
|
|
|
func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) }
|
2015-09-28 19:27:31 +03:00
|
|
|
|
2021-02-18 19:54:29 +03:00
|
|
|
func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// Create a small enough block chain to download
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer", protocol, chain.blocks[1:])
|
2015-05-10 01:34:07 +03:00
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// Synchronise with the peer and make sure all relevant data was retrieved
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("peer", nil, mode); err != nil {
|
2015-06-30 19:05:06 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
2015-05-29 19:47:00 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-05-10 01:34:07 +03:00
|
|
|
}
|
|
|
|
|
2015-06-30 19:05:06 +03:00
|
|
|
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
|
|
|
// until the cached blocks are retrieved.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) }
|
|
|
|
func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2021-11-26 14:26:03 +03:00
|
|
|
defer tester.terminate()
|
2016-06-01 18:07:25 +03:00
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// Create a long block chain to download and the tester
|
2021-11-26 14:26:03 +03:00
|
|
|
targetBlocks := len(testChainBase.blocks) - 1
|
|
|
|
tester.newPeer("peer", protocol, testChainBase.blocks[1:])
|
2015-05-07 14:40:50 +03:00
|
|
|
|
2015-06-12 13:35:29 +03:00
|
|
|
// Wrap the importer to allow stepping
|
2023-04-03 22:48:10 +03:00
|
|
|
var blocked atomic.Uint32
|
|
|
|
proceed := make(chan struct{})
|
2023-09-07 11:39:29 +03:00
|
|
|
tester.downloader.chainInsertHook = func(results []*fetchResult, _ chan struct{}) {
|
2023-04-03 22:48:10 +03:00
|
|
|
blocked.Store(uint32(len(results)))
|
2015-08-14 21:25:41 +03:00
|
|
|
<-proceed
|
2015-06-12 13:35:29 +03:00
|
|
|
}
|
2015-05-29 19:47:00 +03:00
|
|
|
// Start a synchronisation concurrently
|
miner, test: fix potential goroutine leak (#21989)
In miner/worker.go, there are two goroutine using channel w.newWorkCh: newWorkerLoop() sends to this channel, and mainLoop() receives from this channel. Only the receive operation is in a select.
However, w.exitCh may be closed by another goroutine. This is fine for the receive since receive is in select, but if the send operation is blocking, then it will block forever. This commit puts the send in a select, so it won't block even if w.exitCh is closed.
Similarly, there are two goroutines using channel errc: the parent that runs the test receives from it, and the child created at line 573 sends to it. If the parent goroutine exits too early by calling t.Fatalf() at line 614, then the child goroutine will be blocked at line 574 forever. This commit adds 1 buffer to errc. Now send will not block, and receive is not influenced because receive still needs to wait for the send.
2020-12-11 12:29:42 +03:00
|
|
|
errc := make(chan error, 1)
|
2015-05-29 19:47:00 +03:00
|
|
|
go func() {
|
2015-10-13 12:04:25 +03:00
|
|
|
errc <- tester.sync("peer", nil, mode)
|
2015-05-29 19:47:00 +03:00
|
|
|
}()
|
|
|
|
// Iteratively take some blocks, always checking the retrieval count
|
2015-09-09 19:02:54 +03:00
|
|
|
for {
|
|
|
|
// Check the retrieval count synchronously (! reason for this ugly block)
|
|
|
|
tester.lock.RLock()
|
2023-03-02 09:29:15 +03:00
|
|
|
retrieved := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
|
2015-09-09 19:02:54 +03:00
|
|
|
tester.lock.RUnlock()
|
|
|
|
if retrieved >= targetBlocks+1 {
|
|
|
|
break
|
|
|
|
}
|
2015-06-12 13:35:29 +03:00
|
|
|
// Wait a bit for sync to throttle itself
|
2015-10-13 12:04:25 +03:00
|
|
|
var cached, frozen int
|
2015-12-30 14:06:09 +02:00
|
|
|
for start := time.Now(); time.Since(start) < 3*time.Second; {
|
2015-06-07 18:46:32 +03:00
|
|
|
time.Sleep(25 * time.Millisecond)
|
2015-06-12 13:35:29 +03:00
|
|
|
|
2015-11-13 18:08:15 +02:00
|
|
|
tester.lock.Lock()
|
2021-01-09 19:29:19 +03:00
|
|
|
tester.downloader.queue.lock.Lock()
|
|
|
|
tester.downloader.queue.resultCache.lock.Lock()
|
2020-07-24 10:46:26 +03:00
|
|
|
{
|
|
|
|
cached = tester.downloader.queue.resultCache.countCompleted()
|
2023-04-03 22:48:10 +03:00
|
|
|
frozen = int(blocked.Load())
|
2023-03-02 09:29:15 +03:00
|
|
|
retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-01-09 19:29:19 +03:00
|
|
|
tester.downloader.queue.resultCache.lock.Unlock()
|
|
|
|
tester.downloader.queue.lock.Unlock()
|
2015-11-13 18:08:15 +02:00
|
|
|
tester.lock.Unlock()
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2020-09-02 12:01:46 +03:00
|
|
|
if cached == blockCacheMaxItems ||
|
|
|
|
cached == blockCacheMaxItems-reorgProtHeaderDelay ||
|
2020-07-24 10:46:26 +03:00
|
|
|
retrieved+cached+frozen == targetBlocks+1 ||
|
|
|
|
retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
|
2015-06-07 18:46:32 +03:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2015-06-12 13:35:29 +03:00
|
|
|
// Make sure we filled up the cache, then exhaust it
|
|
|
|
time.Sleep(25 * time.Millisecond) // give it a chance to screw up
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.lock.RLock()
|
2023-03-02 09:29:15 +03:00
|
|
|
retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.lock.RUnlock()
|
2020-09-02 12:01:46 +03:00
|
|
|
if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
|
|
|
|
t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
|
2015-05-29 19:47:00 +03:00
|
|
|
}
|
2015-08-14 21:25:41 +03:00
|
|
|
// Permit the blocked blocks to import
|
2023-04-03 22:48:10 +03:00
|
|
|
if blocked.Load() > 0 {
|
|
|
|
blocked.Store(uint32(0))
|
2015-08-14 21:25:41 +03:00
|
|
|
proceed <- struct{}{}
|
2015-05-29 19:47:00 +03:00
|
|
|
}
|
2015-06-12 13:35:29 +03:00
|
|
|
}
|
|
|
|
// Check that we haven't pulled more blocks than available
|
2015-09-28 19:27:31 +03:00
|
|
|
assertOwnChain(t, tester, targetBlocks+1)
|
2015-05-29 19:47:00 +03:00
|
|
|
if err := <-errc; err != nil {
|
|
|
|
t.Fatalf("block synchronization failed: %v", err)
|
2015-05-07 14:40:50 +03:00
|
|
|
}
|
|
|
|
}
|
2015-05-14 15:24:18 +03:00
|
|
|
|
2015-06-30 19:05:06 +03:00
|
|
|
// Tests that simple synchronization against a forked chain works correctly. In
|
|
|
|
// this test common ancestor lookup should *not* be short circuited, and a full
|
|
|
|
// binary search should be executed.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) }
|
|
|
|
func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
|
|
|
|
chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81)
|
|
|
|
tester.newPeer("fork A", protocol, chainA.blocks[1:])
|
|
|
|
tester.newPeer("fork B", protocol, chainB.blocks[1:])
|
2015-06-30 19:05:06 +03:00
|
|
|
// Synchronise with the peer and make sure all blocks were retrieved
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("fork A", nil, mode); err != nil {
|
2015-06-30 19:05:06 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chainA.blocks))
|
2015-09-28 19:27:31 +03:00
|
|
|
|
2015-06-30 19:05:06 +03:00
|
|
|
// Synchronise with the second peer and make sure that fork is pulled too
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("fork B", nil, mode); err != nil {
|
2015-06-30 19:05:06 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chainB.blocks))
|
2015-06-30 19:05:06 +03:00
|
|
|
}
|
|
|
|
|
2022-08-19 09:00:21 +03:00
|
|
|
// Tests that synchronising against a much shorter but much heavier fork works
|
|
|
|
// currently and is not dropped.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) }
|
|
|
|
func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80)
|
|
|
|
chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79)
|
|
|
|
tester.newPeer("light", protocol, chainA.blocks[1:])
|
|
|
|
tester.newPeer("heavy", protocol, chainB.blocks[1:])
|
2016-05-13 13:12:13 +03:00
|
|
|
|
|
|
|
// Synchronise with the peer and make sure all blocks were retrieved
|
|
|
|
if err := tester.sync("light", nil, mode); err != nil {
|
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chainA.blocks))
|
2016-05-13 13:12:13 +03:00
|
|
|
|
|
|
|
// Synchronise with the second peer and make sure that fork is pulled too
|
|
|
|
if err := tester.sync("heavy", nil, mode); err != nil {
|
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chainB.blocks))
|
2016-05-13 13:12:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that chain forks are contained within a certain interval of the current
|
|
|
|
// chain head, ensuring that malicious peers cannot waste resources by feeding
|
|
|
|
// long dead chains.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) }
|
|
|
|
func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
chainA := testChainForkLightA
|
|
|
|
chainB := testChainForkLightB
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("original", protocol, chainA.blocks[1:])
|
|
|
|
tester.newPeer("rewriter", protocol, chainB.blocks[1:])
|
2016-05-13 13:12:13 +03:00
|
|
|
|
|
|
|
// Synchronise with the peer and make sure all blocks were retrieved
|
|
|
|
if err := tester.sync("original", nil, mode); err != nil {
|
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chainA.blocks))
|
2016-05-13 13:12:13 +03:00
|
|
|
|
|
|
|
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
|
|
|
if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
|
|
|
|
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that chain forks are contained within a certain interval of the current
|
|
|
|
// chain head for short but heavy forks too. These are a bit special because they
|
|
|
|
// take different ancestor lookup paths.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestBoundedHeavyForkedSync66Full(t *testing.T) {
|
|
|
|
testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestBoundedHeavyForkedSync66Snap(t *testing.T) {
|
|
|
|
testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync)
|
2021-04-08 18:06:03 +03:00
|
|
|
}
|
|
|
|
func TestBoundedHeavyForkedSync66Light(t *testing.T) {
|
|
|
|
testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
|
|
|
|
}
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestBoundedHeavyForkedSync67Full(t *testing.T) {
|
|
|
|
testBoundedHeavyForkedSync(t, eth.ETH67, FullSync)
|
|
|
|
}
|
|
|
|
func TestBoundedHeavyForkedSync67Snap(t *testing.T) {
|
|
|
|
testBoundedHeavyForkedSync(t, eth.ETH67, SnapSync)
|
|
|
|
}
|
|
|
|
func TestBoundedHeavyForkedSync67Light(t *testing.T) {
|
|
|
|
testBoundedHeavyForkedSync(t, eth.ETH67, LightSync)
|
|
|
|
}
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2021-11-26 14:26:03 +03:00
|
|
|
defer tester.terminate()
|
2016-06-01 18:07:25 +03:00
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// Create a long enough forked chain
|
2018-11-07 17:07:43 +03:00
|
|
|
chainA := testChainForkLightA
|
|
|
|
chainB := testChainForkHeavy
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("original", protocol, chainA.blocks[1:])
|
2016-05-13 13:12:13 +03:00
|
|
|
|
|
|
|
// Synchronise with the peer and make sure all blocks were retrieved
|
|
|
|
if err := tester.sync("original", nil, mode); err != nil {
|
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chainA.blocks))
|
2016-05-13 13:12:13 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:])
|
2016-05-13 13:12:13 +03:00
|
|
|
// Synchronise with the second peer and ensure that the fork is rejected to being too old
|
|
|
|
if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
|
|
|
|
t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
|
|
|
|
}
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2015-06-30 19:05:06 +03:00
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// Tests that a canceled download wipes all previously accumulated state.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) }
|
|
|
|
func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testCancel(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
chain := testChainBase.shorten(MaxHeaderFetch)
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer", protocol, chain.blocks[1:])
|
2015-06-30 19:05:06 +03:00
|
|
|
|
|
|
|
// Make sure canceling works with a pristine downloader
|
2017-03-22 03:37:24 +03:00
|
|
|
tester.downloader.Cancel()
|
2015-09-28 19:27:31 +03:00
|
|
|
if !tester.downloader.queue.Idle() {
|
|
|
|
t.Errorf("download queue not idle")
|
2015-06-30 19:05:06 +03:00
|
|
|
}
|
|
|
|
// Synchronise with the peer, but cancel afterwards
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("peer", nil, mode); err != nil {
|
2015-06-30 19:05:06 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2017-03-22 03:37:24 +03:00
|
|
|
tester.downloader.Cancel()
|
2015-09-28 19:27:31 +03:00
|
|
|
if !tester.downloader.queue.Idle() {
|
|
|
|
t.Errorf("download queue not idle")
|
2015-06-30 19:05:06 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-12 14:36:44 +03:00
|
|
|
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) }
|
|
|
|
func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2015-06-12 14:36:44 +03:00
|
|
|
// Create various peers with various parts of the chain
|
2015-08-14 21:25:41 +03:00
|
|
|
targetPeers := 8
|
2018-11-07 17:07:43 +03:00
|
|
|
chain := testChainBase.shorten(targetPeers * 100)
|
2016-06-01 18:07:25 +03:00
|
|
|
|
2015-06-12 14:36:44 +03:00
|
|
|
for i := 0; i < targetPeers; i++ {
|
|
|
|
id := fmt.Sprintf("peer #%d", i)
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:])
|
2015-06-12 14:36:44 +03:00
|
|
|
}
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("peer #0", nil, mode); err != nil {
|
2015-06-12 14:36:44 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-06-12 14:36:44 +03:00
|
|
|
}
|
|
|
|
|
2015-10-02 13:20:41 +03:00
|
|
|
// Tests that synchronisations behave well in multi-version protocol environments
|
2016-03-15 20:27:49 +02:00
|
|
|
// and not wreak havoc on other nodes in the network.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) }
|
|
|
|
func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2015-10-02 13:20:41 +03:00
|
|
|
// Create a small enough block chain to download
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2015-10-02 13:20:41 +03:00
|
|
|
|
|
|
|
// Create peers of every type
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:])
|
2022-06-15 13:56:47 +03:00
|
|
|
tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:])
|
2015-10-02 13:20:41 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Synchronise with the requested peer and make sure all blocks were retrieved
|
|
|
|
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
|
2015-10-02 13:20:41 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-09-30 19:23:31 +03:00
|
|
|
|
2015-10-02 13:20:41 +03:00
|
|
|
// Check that no peers have been dropped off
|
2022-06-15 13:56:47 +03:00
|
|
|
for _, version := range []int{66, 67} {
|
2015-10-02 13:20:41 +03:00
|
|
|
peer := fmt.Sprintf("peer %d", version)
|
2018-11-07 17:07:43 +03:00
|
|
|
if _, ok := tester.peers[peer]; !ok {
|
2015-10-02 13:20:41 +03:00
|
|
|
t.Errorf("%s dropped", peer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// Tests that if a block is empty (e.g. header only), no body request should be
|
2015-08-14 21:25:41 +03:00
|
|
|
// made, and instead the header should be assembled into a whole block in itself.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) }
|
|
|
|
func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// Create a block chain to download
|
2018-11-07 17:07:43 +03:00
|
|
|
chain := testChainBase
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer", protocol, chain.blocks[1:])
|
2015-08-14 21:25:41 +03:00
|
|
|
|
|
|
|
// Instrument the downloader to signal body requests
|
2023-04-03 22:48:10 +03:00
|
|
|
var bodiesHave, receiptsHave atomic.Int32
|
2015-08-14 21:25:41 +03:00
|
|
|
tester.downloader.bodyFetchHook = func(headers []*types.Header) {
|
2023-04-03 22:48:10 +03:00
|
|
|
bodiesHave.Add(int32(len(headers)))
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
tester.downloader.receiptFetchHook = func(headers []*types.Header) {
|
2023-04-03 22:48:10 +03:00
|
|
|
receiptsHave.Add(int32(len(headers)))
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
|
|
|
// Synchronise with the peer and make sure all blocks were retrieved
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("peer", nil, mode); err != nil {
|
2015-08-14 21:25:41 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-09-28 19:27:31 +03:00
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
// Validate the number of block bodies that should have been requested
|
2015-09-28 19:27:31 +03:00
|
|
|
bodiesNeeded, receiptsNeeded := 0, 0
|
2021-11-26 14:26:03 +03:00
|
|
|
for _, block := range chain.blocks[1:] {
|
|
|
|
if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
|
2015-09-28 19:27:31 +03:00
|
|
|
bodiesNeeded++
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
for _, block := range chain.blocks[1:] {
|
|
|
|
if mode == SnapSync && len(block.Transactions()) > 0 {
|
2015-09-28 19:27:31 +03:00
|
|
|
receiptsNeeded++
|
|
|
|
}
|
|
|
|
}
|
2023-04-03 22:48:10 +03:00
|
|
|
if int(bodiesHave.Load()) != bodiesNeeded {
|
|
|
|
t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded)
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
2023-04-03 22:48:10 +03:00
|
|
|
if int(receiptsHave.Load()) != receiptsNeeded {
|
|
|
|
t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded)
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-15 13:33:45 +03:00
|
|
|
// Tests that headers are enqueued continuously, preventing malicious nodes from
|
|
|
|
// stalling the downloader by feeding gapped header chains.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) }
|
|
|
|
func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
2015-09-15 13:33:45 +03:00
|
|
|
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2021-11-26 14:26:03 +03:00
|
|
|
|
|
|
|
attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
|
|
|
|
attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{}
|
2015-09-15 13:33:45 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("attack", nil, mode); err == nil {
|
2015-09-15 13:33:45 +03:00
|
|
|
t.Fatalf("succeeded attacker synchronisation")
|
|
|
|
}
|
|
|
|
// Synchronise with the valid peer and make sure sync succeeds
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("valid", protocol, chain.blocks[1:])
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("valid", nil, mode); err != nil {
|
2015-09-15 13:33:45 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-09-15 13:33:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that if requested headers are shifted (i.e. first is missing), the queue
|
|
|
|
// detects the invalid numbering.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) }
|
|
|
|
func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
2015-09-15 13:33:45 +03:00
|
|
|
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2016-10-31 14:55:12 +03:00
|
|
|
|
2015-09-15 13:33:45 +03:00
|
|
|
// Attempt a full sync with an attacker feeding shifted headers
|
2021-11-26 14:26:03 +03:00
|
|
|
attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
|
|
|
|
attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{}
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("attack", nil, mode); err == nil {
|
2015-09-15 13:33:45 +03:00
|
|
|
t.Fatalf("succeeded attacker synchronisation")
|
|
|
|
}
|
|
|
|
// Synchronise with the valid peer and make sure sync succeeds
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("valid", protocol, chain.blocks[1:])
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("valid", nil, mode); err != nil {
|
2015-10-09 16:21:47 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-10-09 16:21:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that upon detecting an invalid header, the recent ones are rolled back
|
2015-12-29 14:01:08 +02:00
|
|
|
// for various failure scenarios. Afterwards a full sync is attempted to make
|
|
|
|
// sure no state was corrupted.
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestInvalidHeaderRollback67Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH67, SnapSync) }
|
2015-10-09 16:21:47 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2021-11-26 14:26:03 +03:00
|
|
|
defer tester.terminate()
|
2015-10-09 16:21:47 +03:00
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// Create a small enough block chain to download
|
2018-02-05 19:40:32 +03:00
|
|
|
targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
|
2018-11-07 17:07:43 +03:00
|
|
|
chain := testChainBase.shorten(targetBlocks)
|
2016-10-31 14:55:12 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Attempt to sync with an attacker that feeds junk during the fast sync phase.
|
|
|
|
// This should result in the last fsHeaderSafetyNet headers being rolled back.
|
|
|
|
missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
|
2021-11-26 14:26:03 +03:00
|
|
|
|
|
|
|
fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:])
|
|
|
|
fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
|
2015-10-09 16:21:47 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("fast-attack", nil, mode); err == nil {
|
2015-10-09 16:21:47 +03:00
|
|
|
t.Fatalf("succeeded fast attacker synchronisation")
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
|
2015-10-13 12:04:25 +03:00
|
|
|
t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
|
2015-10-09 16:21:47 +03:00
|
|
|
}
|
2015-10-13 12:04:25 +03:00
|
|
|
// Attempt to sync with an attacker that feeds junk during the block import phase.
|
|
|
|
// This should result in both the last fsHeaderSafetyNet number of headers being
|
|
|
|
// rolled back, and also the pivot point being reverted to a non-block status.
|
|
|
|
missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
|
2021-11-26 14:26:03 +03:00
|
|
|
|
|
|
|
blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:])
|
|
|
|
fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in
|
|
|
|
blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
|
2015-10-09 16:21:47 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("block-attack", nil, mode); err == nil {
|
2015-10-09 16:21:47 +03:00
|
|
|
t.Fatalf("succeeded block attacker synchronisation")
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
|
2015-10-13 12:04:25 +03:00
|
|
|
t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
if mode == SnapSync {
|
2023-03-02 09:29:15 +03:00
|
|
|
if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 {
|
2015-10-13 12:04:25 +03:00
|
|
|
t.Errorf("fast sync pivot block #%d not rolled back", head)
|
2015-10-09 16:21:47 +03:00
|
|
|
}
|
2015-09-15 13:33:45 +03:00
|
|
|
}
|
2015-10-13 12:04:25 +03:00
|
|
|
// Attempt to sync with an attacker that withholds promised blocks after the
|
|
|
|
// fast sync pivot point. This could be a trial to leave the node with a bad
|
|
|
|
// but already imported pivot block.
|
2021-11-26 14:26:03 +03:00
|
|
|
withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:])
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.downloader.syncInitHook = func(uint64, uint64) {
|
2021-11-26 14:26:03 +03:00
|
|
|
for i := missing; i < len(chain.blocks); i++ {
|
|
|
|
withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
|
2015-10-13 12:04:25 +03:00
|
|
|
}
|
|
|
|
tester.downloader.syncInitHook = nil
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("withhold-attack", nil, mode); err == nil {
|
|
|
|
t.Fatalf("succeeded withholding attacker synchronisation")
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
|
2015-10-13 12:04:25 +03:00
|
|
|
t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
if mode == SnapSync {
|
2023-03-02 09:29:15 +03:00
|
|
|
if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 {
|
2015-10-13 12:04:25 +03:00
|
|
|
t.Errorf("fast sync pivot block #%d not rolled back", head)
|
|
|
|
}
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Synchronise with the valid peer and make sure sync succeeds. Since the last rollback
|
2018-11-07 17:07:43 +03:00
|
|
|
// should also disable fast syncing for this process, verify that we did a fresh full
|
|
|
|
// sync. Note, we can't assert anything about the receipts since we won't purge the
|
|
|
|
// database of them, hence we can't use assertOwnChain.
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("valid", protocol, chain.blocks[1:])
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("valid", nil, mode); err != nil {
|
2015-08-14 21:25:41 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
|
|
|
|
2020-05-25 11:21:28 +03:00
|
|
|
// Tests that a peer advertising a high TD doesn't get to stall the downloader
|
2015-07-09 14:40:18 +03:00
|
|
|
// afterwards by not sending any useful hashes.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestHighTDStarvationAttack66Full(t *testing.T) {
|
|
|
|
testHighTDStarvationAttack(t, eth.ETH66, FullSync)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestHighTDStarvationAttack66Snap(t *testing.T) {
|
|
|
|
testHighTDStarvationAttack(t, eth.ETH66, SnapSync)
|
2021-04-08 18:06:03 +03:00
|
|
|
}
|
|
|
|
func TestHighTDStarvationAttack66Light(t *testing.T) {
|
|
|
|
testHighTDStarvationAttack(t, eth.ETH66, LightSync)
|
|
|
|
}
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestHighTDStarvationAttack67Full(t *testing.T) {
|
|
|
|
testHighTDStarvationAttack(t, eth.ETH67, FullSync)
|
|
|
|
}
|
|
|
|
func TestHighTDStarvationAttack67Snap(t *testing.T) {
|
|
|
|
testHighTDStarvationAttack(t, eth.ETH67, SnapSync)
|
|
|
|
}
|
|
|
|
func TestHighTDStarvationAttack67Light(t *testing.T) {
|
|
|
|
testHighTDStarvationAttack(t, eth.ETH67, LightSync)
|
|
|
|
}
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2021-11-26 14:26:03 +03:00
|
|
|
defer tester.terminate()
|
2015-09-28 19:27:31 +03:00
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
chain := testChainBase.shorten(1)
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("attack", protocol, chain.blocks[1:])
|
2023-02-09 17:01:44 +03:00
|
|
|
if err := tester.sync("attack", big.NewInt(1000000), mode); err != errLaggingPeer {
|
|
|
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errLaggingPeer)
|
2015-07-09 14:40:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 18:13:13 +03:00
|
|
|
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) }
|
2015-08-14 21:25:41 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
|
2015-06-12 13:35:29 +03:00
|
|
|
// Define the disconnection requirement for individual hash fetch errors
|
2015-06-11 18:13:13 +03:00
|
|
|
tests := []struct {
|
|
|
|
result error
|
|
|
|
drop bool
|
|
|
|
}{
|
2016-02-25 18:36:42 +02:00
|
|
|
{nil, false}, // Sync succeeded, all is well
|
|
|
|
{errBusy, false}, // Sync is already in progress, no problem
|
|
|
|
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
|
|
|
|
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it
|
|
|
|
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
|
2019-04-16 13:20:38 +03:00
|
|
|
{errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it
|
2016-02-25 18:36:42 +02:00
|
|
|
{errNoPeers, false}, // No peers to download from, soft race, no issue
|
|
|
|
{errTimeout, true}, // No hashes received in due time, drop the peer
|
|
|
|
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
|
|
|
|
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
|
|
|
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
|
|
|
|
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
|
|
|
|
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
|
|
|
|
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
|
|
|
|
{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
2015-06-11 18:13:13 +03:00
|
|
|
}
|
|
|
|
// Run the tests and check disconnection status
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
2018-11-07 17:07:43 +03:00
|
|
|
chain := testChainBase.shorten(1)
|
2016-06-01 18:07:25 +03:00
|
|
|
|
2015-06-11 18:13:13 +03:00
|
|
|
for i, tt := range tests {
|
2020-05-06 16:35:04 +03:00
|
|
|
// Register a new peer and ensure its presence
|
2015-06-11 18:13:13 +03:00
|
|
|
id := fmt.Sprintf("test %d", i)
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer(id, protocol, chain.blocks[1:])
|
2018-11-07 17:07:43 +03:00
|
|
|
if _, ok := tester.peers[id]; !ok {
|
2015-06-11 18:13:13 +03:00
|
|
|
t.Fatalf("test %d: registered peer not found", i)
|
|
|
|
}
|
|
|
|
// Simulate a synchronisation and check the required result
|
|
|
|
tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
|
|
|
|
|
2023-09-21 07:02:59 +03:00
|
|
|
tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync)
|
2018-11-07 17:07:43 +03:00
|
|
|
if _, ok := tester.peers[id]; !ok != tt.drop {
|
2015-06-11 18:13:13 +03:00
|
|
|
t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-06-12 13:35:29 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Tests that synchronisation progress (origin block number, current block number
|
|
|
|
// and highest block number) is tracked and updated correctly.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) }
|
|
|
|
func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
2021-11-26 14:26:03 +03:00
|
|
|
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Set a sync init hook to catch progress changes
|
2015-09-09 19:02:54 +03:00
|
|
|
starting := make(chan struct{})
|
|
|
|
progress := make(chan struct{})
|
|
|
|
|
|
|
|
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
|
|
|
starting <- struct{}{}
|
|
|
|
<-progress
|
|
|
|
}
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Synchronise half the blocks and check initial progress
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:])
|
2015-09-09 19:02:54 +03:00
|
|
|
pending := new(sync.WaitGroup)
|
|
|
|
pending.Add(1)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("peer-half", nil, mode); err != nil {
|
2017-08-06 08:54:25 +03:00
|
|
|
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-starting
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
HighestBlock: uint64(len(chain.blocks)/2 - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
2015-09-09 19:02:54 +03:00
|
|
|
progress <- struct{}{}
|
|
|
|
pending.Wait()
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Synchronise all the blocks and check continuation progress
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer-full", protocol, chain.blocks[1:])
|
2015-09-09 19:02:54 +03:00
|
|
|
pending.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("peer-full", nil, mode); err != nil {
|
2017-08-06 08:54:25 +03:00
|
|
|
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-starting
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
StartingBlock: uint64(len(chain.blocks)/2 - 1),
|
|
|
|
CurrentBlock: uint64(len(chain.blocks)/2 - 1),
|
|
|
|
HighestBlock: uint64(len(chain.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
|
|
|
|
|
|
|
// Check final progress after successful sync
|
2015-09-09 19:02:54 +03:00
|
|
|
progress <- struct{}{}
|
|
|
|
pending.Wait()
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
StartingBlock: uint64(len(chain.blocks)/2 - 1),
|
|
|
|
CurrentBlock: uint64(len(chain.blocks) - 1),
|
|
|
|
HighestBlock: uint64(len(chain.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
|
|
|
}
|
2015-10-13 12:04:25 +03:00
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
|
2018-11-16 14:15:05 +03:00
|
|
|
// Mark this method as a helper to report errors at callsite, not in here
|
2018-11-07 17:07:43 +03:00
|
|
|
t.Helper()
|
2018-11-16 14:15:05 +03:00
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
p := d.Progress()
|
2021-11-26 14:26:03 +03:00
|
|
|
if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock {
|
2018-11-07 17:07:43 +03:00
|
|
|
t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
|
2015-10-13 12:04:25 +03:00
|
|
|
}
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Tests that synchronisation progress (origin block number and highest block
|
2015-09-09 19:02:54 +03:00
|
|
|
// number) is tracked and updated correctly in case of a fork (or manual head
|
|
|
|
// revertal).
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) }
|
|
|
|
func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
2021-11-26 14:26:03 +03:00
|
|
|
|
|
|
|
chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
|
|
|
|
chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch)
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Set a sync init hook to catch progress changes
|
2015-09-09 19:02:54 +03:00
|
|
|
starting := make(chan struct{})
|
|
|
|
progress := make(chan struct{})
|
|
|
|
|
|
|
|
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
|
|
|
starting <- struct{}{}
|
|
|
|
<-progress
|
|
|
|
}
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Synchronise with one of the forks and check progress
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("fork A", protocol, chainA.blocks[1:])
|
2015-09-09 19:02:54 +03:00
|
|
|
pending := new(sync.WaitGroup)
|
|
|
|
pending.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("fork A", nil, mode); err != nil {
|
2017-08-06 08:54:25 +03:00
|
|
|
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-starting
|
2018-11-07 17:07:43 +03:00
|
|
|
|
|
|
|
checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
HighestBlock: uint64(len(chainA.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
2015-09-09 19:02:54 +03:00
|
|
|
progress <- struct{}{}
|
|
|
|
pending.Wait()
|
|
|
|
|
|
|
|
// Simulate a successful sync above the fork
|
2015-10-07 12:14:30 +03:00
|
|
|
tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Synchronise with the second fork and check progress resets
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("fork B", protocol, chainB.blocks[1:])
|
2015-09-09 19:02:54 +03:00
|
|
|
pending.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("fork B", nil, mode); err != nil {
|
2017-08-06 08:54:25 +03:00
|
|
|
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-starting
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
StartingBlock: uint64(len(testChainBase.blocks)) - 1,
|
|
|
|
CurrentBlock: uint64(len(chainA.blocks) - 1),
|
|
|
|
HighestBlock: uint64(len(chainB.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
2015-10-13 12:04:25 +03:00
|
|
|
|
|
|
|
// Check final progress after successful sync
|
2018-11-07 17:07:43 +03:00
|
|
|
progress <- struct{}{}
|
|
|
|
pending.Wait()
|
|
|
|
checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
StartingBlock: uint64(len(testChainBase.blocks)) - 1,
|
|
|
|
CurrentBlock: uint64(len(chainB.blocks) - 1),
|
|
|
|
HighestBlock: uint64(len(chainB.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Tests that if synchronisation is aborted due to some failure, then the progress
|
2015-09-09 19:02:54 +03:00
|
|
|
// origin is not updated in the next sync cycle, as it should be considered the
|
|
|
|
// continuation of the previous sync and not a new instance.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) }
|
|
|
|
func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
2021-11-26 14:26:03 +03:00
|
|
|
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Set a sync init hook to catch progress changes
|
2015-09-09 19:02:54 +03:00
|
|
|
starting := make(chan struct{})
|
|
|
|
progress := make(chan struct{})
|
|
|
|
|
|
|
|
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
|
|
|
starting <- struct{}{}
|
|
|
|
<-progress
|
|
|
|
}
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
|
|
|
|
|
2015-09-09 19:02:54 +03:00
|
|
|
// Attempt a full sync with a faulty peer
|
2021-11-26 14:26:03 +03:00
|
|
|
missing := len(chain.blocks)/2 - 1
|
|
|
|
|
|
|
|
faulter := tester.newPeer("faulty", protocol, chain.blocks[1:])
|
|
|
|
faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
|
2015-09-09 19:02:54 +03:00
|
|
|
|
|
|
|
pending := new(sync.WaitGroup)
|
|
|
|
pending.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("faulty", nil, mode); err == nil {
|
2017-08-06 08:54:25 +03:00
|
|
|
panic("succeeded faulty synchronisation")
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-starting
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
HighestBlock: uint64(len(chain.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
2015-09-09 19:02:54 +03:00
|
|
|
progress <- struct{}{}
|
|
|
|
pending.Wait()
|
2018-11-07 17:07:43 +03:00
|
|
|
afterFailedSync := tester.downloader.Progress()
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
// Synchronise with a good peer and check that the progress origin remind the same
|
|
|
|
// after a failure
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("valid", protocol, chain.blocks[1:])
|
2015-09-09 19:02:54 +03:00
|
|
|
pending.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("valid", nil, mode); err != nil {
|
2017-08-06 08:54:25 +03:00
|
|
|
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-starting
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "completing", afterFailedSync)
|
2015-10-13 12:04:25 +03:00
|
|
|
|
|
|
|
// Check final progress after successful sync
|
2018-11-07 17:07:43 +03:00
|
|
|
progress <- struct{}{}
|
|
|
|
pending.Wait()
|
|
|
|
checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
CurrentBlock: uint64(len(chain.blocks) - 1),
|
|
|
|
HighestBlock: uint64(len(chain.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that if an attacker fakes a chain height, after the attack is detected,
|
2015-10-13 12:04:25 +03:00
|
|
|
// the progress height is successfully reduced at the next sync invocation.
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) }
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) }
|
2021-04-08 18:06:03 +03:00
|
|
|
func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
|
2022-06-15 13:56:47 +03:00
|
|
|
func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) }
|
|
|
|
func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) }
|
|
|
|
func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
2021-11-26 14:26:03 +03:00
|
|
|
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2015-10-13 12:04:25 +03:00
|
|
|
// Set a sync init hook to catch progress changes
|
2015-09-09 19:02:54 +03:00
|
|
|
starting := make(chan struct{})
|
|
|
|
progress := make(chan struct{})
|
|
|
|
tester.downloader.syncInitHook = func(origin, latest uint64) {
|
|
|
|
starting <- struct{}{}
|
|
|
|
<-progress
|
|
|
|
}
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
|
|
|
|
|
|
|
|
// Create and sync with an attacker that promises a higher chain than available.
|
2021-11-26 14:26:03 +03:00
|
|
|
attacker := tester.newPeer("attack", protocol, chain.blocks[1:])
|
2018-11-07 17:07:43 +03:00
|
|
|
numMissing := 5
|
2021-11-26 14:26:03 +03:00
|
|
|
for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- {
|
|
|
|
attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
pending := new(sync.WaitGroup)
|
|
|
|
pending.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("attack", nil, mode); err == nil {
|
2017-08-06 08:54:25 +03:00
|
|
|
panic("succeeded attacker synchronisation")
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-starting
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
HighestBlock: uint64(len(chain.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
2015-09-09 19:02:54 +03:00
|
|
|
progress <- struct{}{}
|
|
|
|
pending.Wait()
|
2018-11-07 17:07:43 +03:00
|
|
|
afterFailedSync := tester.downloader.Progress()
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2023-02-09 17:01:44 +03:00
|
|
|
// it is no longer valid to sync to a lagging peer
|
|
|
|
laggingChain := chain.shorten(800 / 2)
|
|
|
|
tester.newPeer("lagging", protocol, laggingChain.blocks[1:])
|
|
|
|
pending.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
|
|
|
if err := tester.sync("lagging", nil, mode); err != errLaggingPeer {
|
|
|
|
panic(fmt.Sprintf("unexpected lagging synchronisation err:%v", err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
// lagging peer will return before syncInitHook, skip <-starting and progress <- struct{}{}
|
|
|
|
checkProgress(t, tester.downloader, "lagging", ethereum.SyncProgress{
|
|
|
|
CurrentBlock: afterFailedSync.CurrentBlock,
|
|
|
|
HighestBlock: uint64(len(chain.blocks) - 1),
|
|
|
|
})
|
|
|
|
pending.Wait()
|
|
|
|
|
|
|
|
// Synchronise with a good peer and check that the progress height has been increased to
|
2018-11-07 17:07:43 +03:00
|
|
|
// the true value.
|
2023-02-09 17:01:44 +03:00
|
|
|
validChain := chain.shorten(len(chain.blocks))
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("valid", protocol, validChain.blocks[1:])
|
2015-09-09 19:02:54 +03:00
|
|
|
pending.Add(1)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer pending.Done()
|
2015-10-13 12:04:25 +03:00
|
|
|
if err := tester.sync("valid", nil, mode); err != nil {
|
2017-08-06 08:54:25 +03:00
|
|
|
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-starting
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
|
|
|
|
CurrentBlock: afterFailedSync.CurrentBlock,
|
2021-11-26 14:26:03 +03:00
|
|
|
HighestBlock: uint64(len(validChain.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
|
|
|
// Check final progress after successful sync.
|
2015-09-09 19:02:54 +03:00
|
|
|
progress <- struct{}{}
|
|
|
|
pending.Wait()
|
2018-11-07 17:07:43 +03:00
|
|
|
checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
|
2021-11-26 14:26:03 +03:00
|
|
|
CurrentBlock: uint64(len(validChain.blocks) - 1),
|
|
|
|
HighestBlock: uint64(len(validChain.blocks) - 1),
|
2018-11-07 17:07:43 +03:00
|
|
|
})
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
2015-11-13 18:08:15 +02:00
|
|
|
|
2018-11-12 16:18:56 +03:00
|
|
|
func TestRemoteHeaderRequestSpan(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
remoteHeight uint64
|
|
|
|
localHeight uint64
|
|
|
|
expected []int
|
|
|
|
}{
|
|
|
|
// Remote is way higher. We should ask for the remote head and go backwards
|
|
|
|
{1500, 1000,
|
|
|
|
[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
|
|
|
|
},
|
|
|
|
{15000, 13006,
|
|
|
|
[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
|
|
|
|
},
|
2020-10-13 11:58:41 +03:00
|
|
|
// Remote is pretty close to us. We don't have to fetch as many
|
2018-11-12 16:18:56 +03:00
|
|
|
{1200, 1150,
|
|
|
|
[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
|
|
|
|
},
|
|
|
|
// Remote is equal to us (so on a fork with higher td)
|
|
|
|
// We should get the closest couple of ancestors
|
|
|
|
{1500, 1500,
|
|
|
|
[]int{1497, 1499},
|
|
|
|
},
|
|
|
|
// We're higher than the remote! Odd
|
|
|
|
{1000, 1500,
|
|
|
|
[]int{997, 999},
|
|
|
|
},
|
|
|
|
// Check some weird edgecases that it behaves somewhat rationally
|
|
|
|
{0, 1500,
|
|
|
|
[]int{0, 2},
|
|
|
|
},
|
|
|
|
{6000000, 0,
|
|
|
|
[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
|
|
|
|
},
|
|
|
|
{0, 0,
|
|
|
|
[]int{0, 2},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
reqs := func(from, count, span int) []int {
|
|
|
|
var r []int
|
|
|
|
num := from
|
|
|
|
for len(r) < count {
|
|
|
|
r = append(r, num)
|
|
|
|
num += span + 1
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
for i, tt := range testCases {
|
|
|
|
from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
|
|
|
|
data := reqs(int(from), count, span)
|
|
|
|
|
|
|
|
if max != uint64(data[len(data)-1]) {
|
|
|
|
t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
|
|
|
|
}
|
|
|
|
failed := false
|
|
|
|
if len(data) != len(tt.expected) {
|
|
|
|
failed = true
|
|
|
|
t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
|
|
|
|
} else {
|
|
|
|
for j, n := range data {
|
|
|
|
if n != tt.expected[j] {
|
|
|
|
failed = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failed {
|
2022-05-09 13:13:23 +03:00
|
|
|
res := strings.ReplaceAll(fmt.Sprint(data), " ", ",")
|
|
|
|
exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",")
|
2019-07-17 14:20:24 +03:00
|
|
|
t.Logf("got: %v\n", res)
|
|
|
|
t.Logf("exp: %v\n", exp)
|
2018-11-12 16:18:56 +03:00
|
|
|
t.Errorf("test %d: wrong values", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-04-16 13:20:38 +03:00
|
|
|
|
2023-09-07 11:39:29 +03:00
|
|
|
/*
|
2019-04-16 13:20:38 +03:00
|
|
|
// Tests that peers below a pre-configured checkpoint block are prevented from
|
|
|
|
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
2022-04-04 10:10:16 +03:00
|
|
|
func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) }
|
|
|
|
func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) }
|
2019-04-16 13:20:38 +03:00
|
|
|
|
2022-04-04 10:10:16 +03:00
|
|
|
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
|
|
|
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
2019-04-16 13:20:38 +03:00
|
|
|
|
2022-04-04 10:10:16 +03:00
|
|
|
var cases = []struct {
|
|
|
|
name string // The name of testing scenario
|
|
|
|
local int // The length of local chain(canonical chain assumed), 0 means genesis is the head
|
|
|
|
}{
|
|
|
|
{name: "Beacon sync since genesis", local: 0},
|
|
|
|
{name: "Beacon sync with short local chain", local: 1},
|
|
|
|
{name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2},
|
|
|
|
{name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
success := make(chan struct{})
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTesterWithNotification(t, func() {
|
2022-04-04 10:10:16 +03:00
|
|
|
close(success)
|
|
|
|
})
|
|
|
|
defer tester.terminate()
|
|
|
|
|
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
|
|
|
tester.newPeer("peer", protocol, chain.blocks[1:])
|
|
|
|
|
|
|
|
// Build the local chain segment if it's required
|
|
|
|
if c.local > 0 {
|
|
|
|
tester.chain.InsertChain(chain.blocks[1 : c.local+1])
|
|
|
|
}
|
2023-02-23 14:22:41 +03:00
|
|
|
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
|
2022-04-04 10:10:16 +03:00
|
|
|
t.Fatalf("Failed to beacon sync chain %v %v", c.name, err)
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-success:
|
|
|
|
// Ok, downloader fully cancelled after sync cycle
|
2023-03-02 09:29:15 +03:00
|
|
|
if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != len(chain.blocks) {
|
2022-04-04 10:10:16 +03:00
|
|
|
t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks))
|
|
|
|
}
|
|
|
|
case <-time.NewTimer(time.Second * 3).C:
|
|
|
|
t.Fatalf("Failed to sync chain in three seconds")
|
|
|
|
}
|
|
|
|
})
|
2019-04-16 13:20:38 +03:00
|
|
|
}
|
|
|
|
}
|
2023-09-07 11:39:29 +03:00
|
|
|
*/
|