2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2015-04-12 13:38:25 +03:00
|
|
|
package downloader
|
|
|
|
|
|
|
|
import (
|
2015-06-11 18:13:13 +03:00
|
|
|
"fmt"
|
2015-04-12 13:38:25 +03:00
|
|
|
"math/big"
|
2015-09-09 19:02:54 +03:00
|
|
|
"sync"
|
2015-06-12 13:35:29 +03:00
|
|
|
"sync/atomic"
|
2015-04-12 13:38:25 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-04-16 13:20:38 +03:00
|
|
|
"github.com/ethereum/go-ethereum"
|
2015-04-12 13:38:25 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2021-11-26 14:26:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2018-09-24 15:57:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-04-12 13:38:25 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2021-11-26 14:26:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2021-04-08 18:06:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
2021-11-26 14:26:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
2015-05-15 13:26:34 +03:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2021-11-26 14:26:03 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/params"
|
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2015-10-05 19:37:56 +03:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2015-04-12 13:38:25 +03:00
|
|
|
)
|
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// downloadTester is a test simulator for mocking out local block chain.
|
|
|
|
type downloadTester struct {
|
2021-11-26 14:26:03 +03:00
|
|
|
chain *core.BlockChain
|
2016-10-31 14:55:12 +03:00
|
|
|
downloader *Downloader
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
peers map[string]*downloadTesterPeer
|
|
|
|
lock sync.RWMutex
|
2016-10-31 14:55:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// newTester creates a new downloader test mocker.
|
2022-04-08 16:44:55 +03:00
|
|
|
func newTester(t *testing.T) *downloadTester {
|
|
|
|
return newTesterWithNotification(t, nil)
|
2022-04-04 10:10:16 +03:00
|
|
|
}
|
|
|
|
|
2024-03-26 23:01:28 +03:00
|
|
|
// newTesterWithNotification creates a new downloader test mocker.
|
2022-04-08 16:44:55 +03:00
|
|
|
func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
|
2024-05-08 09:43:33 +03:00
|
|
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
|
2021-11-26 14:26:03 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
t.Cleanup(func() {
|
|
|
|
db.Close()
|
|
|
|
})
|
2022-08-30 19:22:28 +03:00
|
|
|
gspec := &core.Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
2024-02-16 21:05:33 +03:00
|
|
|
Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
|
2022-08-09 12:44:39 +03:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2024-09-04 16:03:06 +03:00
|
|
|
chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
|
2021-11-26 14:26:03 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-10-31 14:55:12 +03:00
|
|
|
tester := &downloadTester{
|
2024-05-08 09:43:33 +03:00
|
|
|
chain: chain,
|
|
|
|
peers: make(map[string]*downloadTesterPeer),
|
2021-11-26 14:26:03 +03:00
|
|
|
}
|
2024-05-28 20:52:08 +03:00
|
|
|
tester.downloader = New(db, new(event.TypeMux), tester.chain, tester.dropPeer, success)
|
2016-10-31 14:55:12 +03:00
|
|
|
return tester
|
2016-02-23 12:32:09 +02:00
|
|
|
}
|
|
|
|
|
2016-06-01 18:07:25 +03:00
|
|
|
// terminate aborts any operations on the embedded downloader and releases all
|
|
|
|
// held resources.
|
|
|
|
func (dl *downloadTester) terminate() {
|
|
|
|
dl.downloader.Terminate()
|
2021-11-26 14:26:03 +03:00
|
|
|
dl.chain.Stop()
|
2016-06-01 18:07:25 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// newPeer registers a new block download source into the downloader.
|
|
|
|
func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer {
|
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
2018-11-16 14:15:05 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
peer := &downloadTesterPeer{
|
2024-04-30 16:46:53 +03:00
|
|
|
dl: dl,
|
|
|
|
id: id,
|
|
|
|
chain: newTestBlockchain(blocks),
|
|
|
|
withholdBodies: make(map[common.Hash]struct{}),
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
dl.peers[id] = peer
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
if err := dl.downloader.RegisterPeer(id, version, peer); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if err := dl.downloader.SnapSyncer.Register(peer); err != nil {
|
|
|
|
panic(err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
return peer
|
2015-04-12 13:38:25 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// dropPeer simulates a hard peer removal from the connection pool.
|
|
|
|
func (dl *downloadTester) dropPeer(id string) {
|
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
delete(dl.peers, id)
|
|
|
|
dl.downloader.SnapSyncer.Unregister(id)
|
|
|
|
dl.downloader.UnregisterPeer(id)
|
2015-06-11 17:14:45 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
type downloadTesterPeer struct {
|
2024-04-30 16:46:53 +03:00
|
|
|
dl *downloadTester
|
|
|
|
withholdBodies map[common.Hash]struct{}
|
|
|
|
id string
|
|
|
|
chain *core.BlockChain
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// Head constructs a function to retrieve a peer's current head hash
|
|
|
|
// and total difficulty.
|
|
|
|
func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
|
|
|
|
head := dlp.chain.CurrentBlock()
|
2023-03-02 09:29:15 +03:00
|
|
|
return head.Hash(), dlp.chain.GetTd(head.Hash(), head.Number.Uint64())
|
2021-11-26 14:26:03 +03:00
|
|
|
}
|
2015-09-09 19:02:54 +03:00
|
|
|
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
|
|
|
|
var headers = make([]*types.Header, len(rlpdata))
|
|
|
|
for i, data := range rlpdata {
|
|
|
|
var h types.Header
|
|
|
|
if err := rlp.DecodeBytes(data, &h); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
headers[i] = &h
|
|
|
|
}
|
|
|
|
return headers
|
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
|
|
|
|
// origin; associated with a particular peer in the download tester. The returned
|
|
|
|
// function can be used to retrieve batches of headers from the particular peer.
|
|
|
|
func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
|
|
|
// Service the header query via the live handler code
|
2023-10-03 15:03:19 +03:00
|
|
|
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{
|
2021-11-26 14:26:03 +03:00
|
|
|
Origin: eth.HashOrNumber{
|
|
|
|
Hash: origin,
|
|
|
|
},
|
|
|
|
Amount: uint64(amount),
|
|
|
|
Skip: uint64(skip),
|
|
|
|
Reverse: reverse,
|
|
|
|
}, nil)
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
headers := unmarshalRlpHeaders(rlpHeaders)
|
2021-12-01 21:18:12 +03:00
|
|
|
hashes := make([]common.Hash, len(headers))
|
|
|
|
for i, header := range headers {
|
|
|
|
hashes[i] = header.Hash()
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Deliver the headers to the downloader
|
|
|
|
req := ð.Request{
|
|
|
|
Peer: dlp.id,
|
|
|
|
}
|
|
|
|
res := ð.Response{
|
|
|
|
Req: req,
|
2023-10-03 15:03:19 +03:00
|
|
|
Res: (*eth.BlockHeadersRequest)(&headers),
|
2021-12-01 21:18:12 +03:00
|
|
|
Meta: hashes,
|
2021-11-26 14:26:03 +03:00
|
|
|
Time: 1,
|
|
|
|
Done: make(chan error, 1), // Ignore the returned status
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
sink <- res
|
|
|
|
}()
|
|
|
|
return req, nil
|
2015-06-30 19:05:06 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
|
|
|
|
// origin; associated with a particular peer in the download tester. The returned
|
|
|
|
// function can be used to retrieve batches of headers from the particular peer.
|
|
|
|
func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
|
|
|
|
// Service the header query via the live handler code
|
2023-10-03 15:03:19 +03:00
|
|
|
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{
|
2021-11-26 14:26:03 +03:00
|
|
|
Origin: eth.HashOrNumber{
|
|
|
|
Number: origin,
|
|
|
|
},
|
|
|
|
Amount: uint64(amount),
|
|
|
|
Skip: uint64(skip),
|
|
|
|
Reverse: reverse,
|
|
|
|
}, nil)
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
headers := unmarshalRlpHeaders(rlpHeaders)
|
2021-12-01 21:18:12 +03:00
|
|
|
hashes := make([]common.Hash, len(headers))
|
|
|
|
for i, header := range headers {
|
|
|
|
hashes[i] = header.Hash()
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Deliver the headers to the downloader
|
|
|
|
req := ð.Request{
|
|
|
|
Peer: dlp.id,
|
2015-10-05 19:37:56 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
res := ð.Response{
|
|
|
|
Req: req,
|
2023-10-03 15:03:19 +03:00
|
|
|
Res: (*eth.BlockHeadersRequest)(&headers),
|
2021-12-01 21:18:12 +03:00
|
|
|
Meta: hashes,
|
2021-11-26 14:26:03 +03:00
|
|
|
Time: 1,
|
|
|
|
Done: make(chan error, 1), // Ignore the returned status
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
go func() {
|
|
|
|
sink <- res
|
|
|
|
}()
|
|
|
|
return req, nil
|
2015-09-07 20:43:01 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestBodies constructs a getBlockBodies method associated with a particular
|
|
|
|
// peer in the download tester. The returned function can be used to retrieve
|
|
|
|
// batches of block bodies from the particularly requested peer.
|
|
|
|
func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
|
|
|
blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes)
|
2020-06-09 12:39:19 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
bodies := make([]*eth.BlockBody, len(blobs))
|
|
|
|
for i, blob := range blobs {
|
|
|
|
bodies[i] = new(eth.BlockBody)
|
|
|
|
rlp.DecodeBytes(blob, bodies[i])
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-12-01 21:18:12 +03:00
|
|
|
var (
|
2023-01-25 17:32:25 +03:00
|
|
|
txsHashes = make([]common.Hash, len(bodies))
|
|
|
|
uncleHashes = make([]common.Hash, len(bodies))
|
|
|
|
withdrawalHashes = make([]common.Hash, len(bodies))
|
2024-09-04 15:33:51 +03:00
|
|
|
requestsHashes = make([]common.Hash, len(bodies))
|
2021-12-01 21:18:12 +03:00
|
|
|
)
|
|
|
|
hasher := trie.NewStackTrie(nil)
|
|
|
|
for i, body := range bodies {
|
2024-04-30 16:46:53 +03:00
|
|
|
hash := types.DeriveSha(types.Transactions(body.Transactions), hasher)
|
|
|
|
if _, ok := dlp.withholdBodies[hash]; ok {
|
|
|
|
txsHashes = append(txsHashes[:i], txsHashes[i+1:]...)
|
|
|
|
uncleHashes = append(uncleHashes[:i], uncleHashes[i+1:]...)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
txsHashes[i] = hash
|
2021-12-01 21:18:12 +03:00
|
|
|
uncleHashes[i] = types.CalcUncleHash(body.Uncles)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
req := ð.Request{
|
|
|
|
Peer: dlp.id,
|
2015-06-12 13:35:29 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
res := ð.Response{
|
|
|
|
Req: req,
|
2023-10-03 15:03:19 +03:00
|
|
|
Res: (*eth.BlockBodiesResponse)(&bodies),
|
2024-09-04 15:33:51 +03:00
|
|
|
Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, requestsHashes},
|
2021-11-26 14:26:03 +03:00
|
|
|
Time: 1,
|
|
|
|
Done: make(chan error, 1), // Ignore the returned status
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
go func() {
|
|
|
|
sink <- res
|
|
|
|
}()
|
|
|
|
return req, nil
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestReceipts constructs a getReceipts method associated with a particular
|
|
|
|
// peer in the download tester. The returned function can be used to retrieve
|
|
|
|
// batches of block receipts from the particularly requested peer.
|
|
|
|
func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) {
|
|
|
|
blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes)
|
2015-10-09 16:21:47 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
receipts := make([][]*types.Receipt, len(blobs))
|
|
|
|
for i, blob := range blobs {
|
|
|
|
rlp.DecodeBytes(blob, &receipts[i])
|
2020-08-20 13:01:24 +03:00
|
|
|
}
|
2021-12-01 21:18:12 +03:00
|
|
|
hasher := trie.NewStackTrie(nil)
|
|
|
|
hashes = make([]common.Hash, len(receipts))
|
|
|
|
for i, receipt := range receipts {
|
|
|
|
hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
req := ð.Request{
|
|
|
|
Peer: dlp.id,
|
2020-08-20 13:01:24 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
res := ð.Response{
|
|
|
|
Req: req,
|
2023-10-03 15:03:19 +03:00
|
|
|
Res: (*eth.ReceiptsResponse)(&receipts),
|
2021-12-01 21:18:12 +03:00
|
|
|
Meta: hashes,
|
2021-11-26 14:26:03 +03:00
|
|
|
Time: 1,
|
|
|
|
Done: make(chan error, 1), // Ignore the returned status
|
2020-08-20 13:01:24 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
go func() {
|
|
|
|
sink <- res
|
|
|
|
}()
|
|
|
|
return req, nil
|
2015-10-09 16:21:47 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// ID retrieves the peer's unique identifier.
|
|
|
|
func (dlp *downloadTesterPeer) ID() string {
|
|
|
|
return dlp.id
|
2015-04-12 13:38:25 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestAccountRange fetches a batch of accounts rooted in a specific account
|
|
|
|
// trie, starting with the origin.
|
|
|
|
func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
|
|
|
|
// Create the request and service it
|
|
|
|
req := &snap.GetAccountRangePacket{
|
|
|
|
ID: id,
|
|
|
|
Root: root,
|
|
|
|
Origin: origin,
|
|
|
|
Limit: limit,
|
|
|
|
Bytes: bytes,
|
|
|
|
}
|
|
|
|
slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req)
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// We need to convert to non-slim format, delegate to the packet code
|
|
|
|
res := &snap.AccountRangePacket{
|
|
|
|
ID: id,
|
|
|
|
Accounts: slimaccs,
|
|
|
|
Proof: proofs,
|
|
|
|
}
|
|
|
|
hashes, accounts, _ := res.Unpack()
|
2015-06-11 18:13:13 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs)
|
2021-04-29 17:33:45 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestStorageRanges fetches a batch of storage slots belonging to one or
|
2022-08-19 09:00:21 +03:00
|
|
|
// more accounts. If slots from only one account is requested, an origin marker
|
2021-11-26 14:26:03 +03:00
|
|
|
// may also be used to retrieve from there.
|
|
|
|
func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
|
|
|
|
// Create the request and service it
|
|
|
|
req := &snap.GetStorageRangesPacket{
|
|
|
|
ID: id,
|
|
|
|
Accounts: accounts,
|
|
|
|
Root: root,
|
|
|
|
Origin: origin,
|
|
|
|
Limit: limit,
|
|
|
|
Bytes: bytes,
|
|
|
|
}
|
|
|
|
storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req)
|
2016-07-25 15:14:14 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// We need to convert to demultiplex, delegate to the packet code
|
|
|
|
res := &snap.StorageRangesPacket{
|
|
|
|
ID: id,
|
|
|
|
Slots: storage,
|
|
|
|
Proof: proofs,
|
|
|
|
}
|
|
|
|
hashes, slots := res.Unpack()
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs)
|
2017-06-28 15:25:08 +03:00
|
|
|
return nil
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestByteCodes fetches a batch of bytecodes by hash.
|
|
|
|
func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
|
|
|
|
req := &snap.GetByteCodesPacket{
|
|
|
|
ID: id,
|
|
|
|
Hashes: hashes,
|
|
|
|
Bytes: bytes,
|
|
|
|
}
|
|
|
|
codes := snap.ServiceGetByteCodesQuery(dlp.chain, req)
|
|
|
|
go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes)
|
2017-06-28 15:25:08 +03:00
|
|
|
return nil
|
2015-04-12 13:38:25 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in
|
2022-08-19 09:00:21 +03:00
|
|
|
// a specific state trie.
|
2021-11-26 14:26:03 +03:00
|
|
|
func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error {
|
|
|
|
req := &snap.GetTrieNodesPacket{
|
|
|
|
ID: id,
|
|
|
|
Root: root,
|
|
|
|
Paths: paths,
|
|
|
|
Bytes: bytes,
|
|
|
|
}
|
|
|
|
nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now())
|
|
|
|
go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes)
|
2017-06-28 15:25:08 +03:00
|
|
|
return nil
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// Log retrieves the peer's own contextual logger.
|
|
|
|
func (dlp *downloadTesterPeer) Log() log.Logger {
|
|
|
|
return log.New("peer", dlp.id)
|
2015-10-05 19:37:56 +03:00
|
|
|
}
|
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// assertOwnChain checks if the local chain contains the correct number of items
|
|
|
|
// of the various chain components.
|
|
|
|
func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
|
2018-11-16 14:15:05 +03:00
|
|
|
// Mark this method as a helper to report errors at callsite, not in here
|
|
|
|
t.Helper()
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
headers, blocks, receipts := length, length, length
|
|
|
|
if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers {
|
2015-09-28 19:27:31 +03:00
|
|
|
t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
|
|
|
|
}
|
2023-03-02 09:29:15 +03:00
|
|
|
if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != blocks {
|
2015-09-28 19:27:31 +03:00
|
|
|
t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
|
|
|
|
}
|
2023-03-02 09:29:15 +03:00
|
|
|
if rs := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1; rs != receipts {
|
2018-02-05 19:40:32 +03:00
|
|
|
t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-28 20:52:08 +03:00
|
|
|
func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
|
|
|
|
func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
|
2015-09-28 19:27:31 +03:00
|
|
|
|
2021-02-18 19:54:29 +03:00
|
|
|
func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
|
2024-04-30 16:46:53 +03:00
|
|
|
success := make(chan struct{})
|
|
|
|
tester := newTesterWithNotification(t, func() {
|
|
|
|
close(success)
|
|
|
|
})
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// Create a small enough block chain to download
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer", protocol, chain.blocks[1:])
|
2015-05-10 01:34:07 +03:00
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// Synchronise with the peer and make sure all relevant data was retrieved
|
2024-04-30 16:46:53 +03:00
|
|
|
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
|
|
|
|
t.Fatalf("failed to beacon-sync chain: %v", err)
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-success:
|
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
|
|
|
case <-time.NewTimer(time.Second * 3).C:
|
|
|
|
t.Fatalf("Failed to sync chain in three seconds")
|
2015-05-29 19:47:00 +03:00
|
|
|
}
|
2015-05-10 01:34:07 +03:00
|
|
|
}
|
|
|
|
|
2015-06-30 19:05:06 +03:00
|
|
|
// Tests that if a large batch of blocks are being downloaded, it is throttled
|
|
|
|
// until the cached blocks are retrieved.
|
2023-10-03 15:03:19 +03:00
|
|
|
func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
|
|
|
|
func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTester(t)
|
2021-11-26 14:26:03 +03:00
|
|
|
defer tester.terminate()
|
2016-06-01 18:07:25 +03:00
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// Create a long block chain to download and the tester
|
2021-11-26 14:26:03 +03:00
|
|
|
targetBlocks := len(testChainBase.blocks) - 1
|
|
|
|
tester.newPeer("peer", protocol, testChainBase.blocks[1:])
|
2015-05-07 14:40:50 +03:00
|
|
|
|
2015-06-12 13:35:29 +03:00
|
|
|
// Wrap the importer to allow stepping
|
2023-04-03 22:48:10 +03:00
|
|
|
var blocked atomic.Uint32
|
|
|
|
proceed := make(chan struct{})
|
2015-09-28 19:27:31 +03:00
|
|
|
tester.downloader.chainInsertHook = func(results []*fetchResult) {
|
2023-04-03 22:48:10 +03:00
|
|
|
blocked.Store(uint32(len(results)))
|
2015-08-14 21:25:41 +03:00
|
|
|
<-proceed
|
2015-06-12 13:35:29 +03:00
|
|
|
}
|
2015-05-29 19:47:00 +03:00
|
|
|
// Start a synchronisation concurrently
|
miner, test: fix potential goroutine leak (#21989)
In miner/worker.go, there are two goroutine using channel w.newWorkCh: newWorkerLoop() sends to this channel, and mainLoop() receives from this channel. Only the receive operation is in a select.
However, w.exitCh may be closed by another goroutine. This is fine for the receive since receive is in select, but if the send operation is blocking, then it will block forever. This commit puts the send in a select, so it won't block even if w.exitCh is closed.
Similarly, there are two goroutines using channel errc: the parent that runs the test receives from it, and the child created at line 573 sends to it. If the parent goroutine exits too early by calling t.Fatalf() at line 614, then the child goroutine will be blocked at line 574 forever. This commit adds 1 buffer to errc. Now send will not block, and receive is not influenced because receive still needs to wait for the send.
2020-12-11 12:29:42 +03:00
|
|
|
errc := make(chan error, 1)
|
2015-05-29 19:47:00 +03:00
|
|
|
go func() {
|
2024-04-30 16:46:53 +03:00
|
|
|
errc <- tester.downloader.BeaconSync(mode, testChainBase.blocks[len(testChainBase.blocks)-1].Header(), nil)
|
2015-05-29 19:47:00 +03:00
|
|
|
}()
|
|
|
|
// Iteratively take some blocks, always checking the retrieval count
|
2015-09-09 19:02:54 +03:00
|
|
|
for {
|
|
|
|
// Check the retrieval count synchronously (! reason for this ugly block)
|
|
|
|
tester.lock.RLock()
|
2023-03-02 09:29:15 +03:00
|
|
|
retrieved := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
|
2015-09-09 19:02:54 +03:00
|
|
|
tester.lock.RUnlock()
|
|
|
|
if retrieved >= targetBlocks+1 {
|
|
|
|
break
|
|
|
|
}
|
2015-06-12 13:35:29 +03:00
|
|
|
// Wait a bit for sync to throttle itself
|
2015-10-13 12:04:25 +03:00
|
|
|
var cached, frozen int
|
2015-12-30 14:06:09 +02:00
|
|
|
for start := time.Now(); time.Since(start) < 3*time.Second; {
|
2015-06-07 18:46:32 +03:00
|
|
|
time.Sleep(25 * time.Millisecond)
|
2015-06-12 13:35:29 +03:00
|
|
|
|
2015-11-13 18:08:15 +02:00
|
|
|
tester.lock.Lock()
|
2021-01-09 19:29:19 +03:00
|
|
|
tester.downloader.queue.lock.Lock()
|
|
|
|
tester.downloader.queue.resultCache.lock.Lock()
|
2020-07-24 10:46:26 +03:00
|
|
|
{
|
|
|
|
cached = tester.downloader.queue.resultCache.countCompleted()
|
2023-04-03 22:48:10 +03:00
|
|
|
frozen = int(blocked.Load())
|
2023-03-02 09:29:15 +03:00
|
|
|
retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
2021-01-09 19:29:19 +03:00
|
|
|
tester.downloader.queue.resultCache.lock.Unlock()
|
|
|
|
tester.downloader.queue.lock.Unlock()
|
2015-11-13 18:08:15 +02:00
|
|
|
tester.lock.Unlock()
|
2015-09-09 19:02:54 +03:00
|
|
|
|
2020-09-02 12:01:46 +03:00
|
|
|
if cached == blockCacheMaxItems ||
|
|
|
|
cached == blockCacheMaxItems-reorgProtHeaderDelay ||
|
2020-07-24 10:46:26 +03:00
|
|
|
retrieved+cached+frozen == targetBlocks+1 ||
|
|
|
|
retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
|
2015-06-07 18:46:32 +03:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2015-06-12 13:35:29 +03:00
|
|
|
// Make sure we filled up the cache, then exhaust it
|
|
|
|
time.Sleep(25 * time.Millisecond) // give it a chance to screw up
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.lock.RLock()
|
2023-03-02 09:29:15 +03:00
|
|
|
retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
|
2015-10-13 12:04:25 +03:00
|
|
|
tester.lock.RUnlock()
|
2020-09-02 12:01:46 +03:00
|
|
|
if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
|
|
|
|
t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
|
2015-05-29 19:47:00 +03:00
|
|
|
}
|
2015-08-14 21:25:41 +03:00
|
|
|
// Permit the blocked blocks to import
|
2023-04-03 22:48:10 +03:00
|
|
|
if blocked.Load() > 0 {
|
|
|
|
blocked.Store(uint32(0))
|
2015-08-14 21:25:41 +03:00
|
|
|
proceed <- struct{}{}
|
2015-05-29 19:47:00 +03:00
|
|
|
}
|
2015-06-12 13:35:29 +03:00
|
|
|
}
|
|
|
|
// Check that we haven't pulled more blocks than available
|
2015-09-28 19:27:31 +03:00
|
|
|
assertOwnChain(t, tester, targetBlocks+1)
|
2015-05-29 19:47:00 +03:00
|
|
|
if err := <-errc; err != nil {
|
|
|
|
t.Fatalf("block synchronization failed: %v", err)
|
2015-05-07 14:40:50 +03:00
|
|
|
}
|
|
|
|
}
|
2015-05-14 15:24:18 +03:00
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// Tests that a canceled download wipes all previously accumulated state.
|
2024-05-28 20:52:08 +03:00
|
|
|
func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
|
|
|
|
func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testCancel(t *testing.T, protocol uint, mode SyncMode) {
|
2024-04-30 16:46:53 +03:00
|
|
|
complete := make(chan struct{})
|
|
|
|
success := func() {
|
|
|
|
close(complete)
|
|
|
|
}
|
|
|
|
tester := newTesterWithNotification(t, success)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
chain := testChainBase.shorten(MaxHeaderFetch)
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer", protocol, chain.blocks[1:])
|
2015-06-30 19:05:06 +03:00
|
|
|
|
|
|
|
// Make sure canceling works with a pristine downloader
|
2017-03-22 03:37:24 +03:00
|
|
|
tester.downloader.Cancel()
|
2015-09-28 19:27:31 +03:00
|
|
|
if !tester.downloader.queue.Idle() {
|
|
|
|
t.Errorf("download queue not idle")
|
2015-06-30 19:05:06 +03:00
|
|
|
}
|
|
|
|
// Synchronise with the peer, but cancel afterwards
|
2024-04-30 16:46:53 +03:00
|
|
|
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
|
2015-06-30 19:05:06 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2024-04-30 16:46:53 +03:00
|
|
|
<-complete
|
2017-03-22 03:37:24 +03:00
|
|
|
tester.downloader.Cancel()
|
2015-09-28 19:27:31 +03:00
|
|
|
if !tester.downloader.queue.Idle() {
|
|
|
|
t.Errorf("download queue not idle")
|
2015-06-30 19:05:06 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-02 13:20:41 +03:00
|
|
|
// Tests that synchronisations behave well in multi-version protocol environments
|
2016-03-15 20:27:49 +02:00
|
|
|
// and not wreak havoc on other nodes in the network.
|
2024-05-28 20:52:08 +03:00
|
|
|
func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
|
|
|
|
func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
2024-04-30 16:46:53 +03:00
|
|
|
complete := make(chan struct{})
|
|
|
|
success := func() {
|
|
|
|
close(complete)
|
|
|
|
}
|
|
|
|
tester := newTesterWithNotification(t, success)
|
2016-10-31 14:55:12 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2015-10-02 13:20:41 +03:00
|
|
|
// Create a small enough block chain to download
|
2020-09-02 12:01:46 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
2015-10-02 13:20:41 +03:00
|
|
|
|
|
|
|
// Create peers of every type
|
2023-10-03 15:03:19 +03:00
|
|
|
tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
|
2015-10-02 13:20:41 +03:00
|
|
|
|
2024-04-30 16:46:53 +03:00
|
|
|
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
|
|
|
|
t.Fatalf("failed to start beacon sync: #{err}")
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-complete:
|
|
|
|
break
|
|
|
|
case <-time.NewTimer(time.Second * 3).C:
|
|
|
|
t.Fatalf("Failed to sync chain in three seconds")
|
2015-10-02 13:20:41 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-09-30 19:23:31 +03:00
|
|
|
|
2015-10-02 13:20:41 +03:00
|
|
|
// Check that no peers have been dropped off
|
2024-02-08 16:49:19 +03:00
|
|
|
for _, version := range []int{68} {
|
2015-10-02 13:20:41 +03:00
|
|
|
peer := fmt.Sprintf("peer %d", version)
|
2018-11-07 17:07:43 +03:00
|
|
|
if _, ok := tester.peers[peer]; !ok {
|
2015-10-02 13:20:41 +03:00
|
|
|
t.Errorf("%s dropped", peer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-28 19:27:31 +03:00
|
|
|
// Tests that if a block is empty (e.g. header only), no body request should be
|
2015-08-14 21:25:41 +03:00
|
|
|
// made, and instead the header should be assembled into a whole block in itself.
|
2024-05-28 20:52:08 +03:00
|
|
|
func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
|
|
|
|
func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
|
2021-02-18 19:54:29 +03:00
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
|
2024-04-30 16:46:53 +03:00
|
|
|
success := make(chan struct{})
|
|
|
|
tester := newTesterWithNotification(t, func() {
|
|
|
|
close(success)
|
|
|
|
})
|
2016-06-01 18:07:25 +03:00
|
|
|
defer tester.terminate()
|
|
|
|
|
2016-10-31 14:55:12 +03:00
|
|
|
// Create a block chain to download
|
2018-11-07 17:07:43 +03:00
|
|
|
chain := testChainBase
|
2021-11-26 14:26:03 +03:00
|
|
|
tester.newPeer("peer", protocol, chain.blocks[1:])
|
2015-08-14 21:25:41 +03:00
|
|
|
|
|
|
|
// Instrument the downloader to signal body requests
|
2023-04-03 22:48:10 +03:00
|
|
|
var bodiesHave, receiptsHave atomic.Int32
|
2015-08-14 21:25:41 +03:00
|
|
|
tester.downloader.bodyFetchHook = func(headers []*types.Header) {
|
2023-04-03 22:48:10 +03:00
|
|
|
bodiesHave.Add(int32(len(headers)))
|
2015-09-28 19:27:31 +03:00
|
|
|
}
|
|
|
|
tester.downloader.receiptFetchHook = func(headers []*types.Header) {
|
2023-04-03 22:48:10 +03:00
|
|
|
receiptsHave.Add(int32(len(headers)))
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
2024-04-30 16:46:53 +03:00
|
|
|
|
|
|
|
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
|
2015-08-14 21:25:41 +03:00
|
|
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
|
|
}
|
2024-04-30 16:46:53 +03:00
|
|
|
select {
|
|
|
|
case <-success:
|
|
|
|
checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
|
|
|
|
HighestBlock: uint64(len(chain.blocks) - 1),
|
|
|
|
CurrentBlock: uint64(len(chain.blocks) - 1),
|
|
|
|
})
|
|
|
|
case <-time.NewTimer(time.Second * 3).C:
|
|
|
|
t.Fatalf("Failed to sync chain in three seconds")
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
assertOwnChain(t, tester, len(chain.blocks))
|
2015-09-28 19:27:31 +03:00
|
|
|
|
2015-08-14 21:25:41 +03:00
|
|
|
// Validate the number of block bodies that should have been requested
|
2015-09-28 19:27:31 +03:00
|
|
|
bodiesNeeded, receiptsNeeded := 0, 0
|
2021-11-26 14:26:03 +03:00
|
|
|
for _, block := range chain.blocks[1:] {
|
2024-05-28 20:52:08 +03:00
|
|
|
if len(block.Transactions()) > 0 || len(block.Uncles()) > 0 {
|
2015-09-28 19:27:31 +03:00
|
|
|
bodiesNeeded++
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
for _, block := range chain.blocks[1:] {
|
|
|
|
if mode == SnapSync && len(block.Transactions()) > 0 {
|
2015-09-28 19:27:31 +03:00
|
|
|
receiptsNeeded++
|
|
|
|
}
|
|
|
|
}
|
2023-04-03 22:48:10 +03:00
|
|
|
if int(bodiesHave.Load()) != bodiesNeeded {
|
|
|
|
t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded)
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
2023-04-03 22:48:10 +03:00
|
|
|
if int(receiptsHave.Load()) != receiptsNeeded {
|
|
|
|
t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded)
|
2015-08-14 21:25:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
|
2018-11-16 14:15:05 +03:00
|
|
|
// Mark this method as a helper to report errors at callsite, not in here
|
2018-11-07 17:07:43 +03:00
|
|
|
t.Helper()
|
2018-11-16 14:15:05 +03:00
|
|
|
|
2018-11-07 17:07:43 +03:00
|
|
|
p := d.Progress()
|
2021-11-26 14:26:03 +03:00
|
|
|
if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock {
|
2018-11-07 17:07:43 +03:00
|
|
|
t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
|
2015-10-13 12:04:25 +03:00
|
|
|
}
|
2015-09-09 19:02:54 +03:00
|
|
|
}
|
|
|
|
|
2022-04-04 10:10:16 +03:00
|
|
|
// Tests that peers below a pre-configured checkpoint block are prevented from
|
|
|
|
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
2023-10-03 15:03:19 +03:00
|
|
|
func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
|
|
|
|
func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
|
2022-04-04 10:10:16 +03:00
|
|
|
|
|
|
|
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
|
|
|
|
var cases = []struct {
|
|
|
|
name string // The name of testing scenario
|
|
|
|
local int // The length of local chain(canonical chain assumed), 0 means genesis is the head
|
|
|
|
}{
|
|
|
|
{name: "Beacon sync since genesis", local: 0},
|
|
|
|
{name: "Beacon sync with short local chain", local: 1},
|
|
|
|
{name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2},
|
|
|
|
{name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
success := make(chan struct{})
|
2022-04-08 16:44:55 +03:00
|
|
|
tester := newTesterWithNotification(t, func() {
|
2022-04-04 10:10:16 +03:00
|
|
|
close(success)
|
|
|
|
})
|
|
|
|
defer tester.terminate()
|
|
|
|
|
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
|
|
|
tester.newPeer("peer", protocol, chain.blocks[1:])
|
|
|
|
|
|
|
|
// Build the local chain segment if it's required
|
|
|
|
if c.local > 0 {
|
|
|
|
tester.chain.InsertChain(chain.blocks[1 : c.local+1])
|
|
|
|
}
|
2023-02-23 14:22:41 +03:00
|
|
|
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
|
2022-04-04 10:10:16 +03:00
|
|
|
t.Fatalf("Failed to beacon sync chain %v %v", c.name, err)
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-success:
|
|
|
|
// Ok, downloader fully cancelled after sync cycle
|
2023-03-02 09:29:15 +03:00
|
|
|
if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != len(chain.blocks) {
|
2022-04-04 10:10:16 +03:00
|
|
|
t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks))
|
|
|
|
}
|
|
|
|
case <-time.NewTimer(time.Second * 3).C:
|
|
|
|
t.Fatalf("Failed to sync chain in three seconds")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2024-04-24 10:07:39 +03:00
|
|
|
|
2024-04-30 16:46:53 +03:00
|
|
|
// Tests that synchronisation progress (origin block number, current block number
|
|
|
|
// and highest block number) is tracked and updated correctly.
|
2024-05-28 20:52:08 +03:00
|
|
|
func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
|
|
|
|
func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
|
2024-04-24 10:07:39 +03:00
|
|
|
|
2024-04-30 16:46:53 +03:00
|
|
|
func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
2024-04-24 10:07:39 +03:00
|
|
|
success := make(chan struct{})
|
|
|
|
tester := newTesterWithNotification(t, func() {
|
|
|
|
success <- struct{}{}
|
|
|
|
})
|
|
|
|
defer tester.terminate()
|
2024-04-30 16:46:53 +03:00
|
|
|
checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
|
2024-04-24 10:07:39 +03:00
|
|
|
|
2024-04-30 16:46:53 +03:00
|
|
|
chain := testChainBase.shorten(blockCacheMaxItems - 15)
|
|
|
|
shortChain := chain.shorten(len(chain.blocks) / 2).blocks[1:]
|
2024-04-24 10:07:39 +03:00
|
|
|
|
2024-04-30 16:46:53 +03:00
|
|
|
// Connect to peer that provides all headers and part of the bodies
|
|
|
|
faultyPeer := tester.newPeer("peer-half", protocol, shortChain)
|
|
|
|
for _, header := range shortChain {
|
|
|
|
faultyPeer.withholdBodies[header.Hash()] = struct{}{}
|
2024-04-24 10:07:39 +03:00
|
|
|
}
|
|
|
|
|
2024-04-30 16:46:53 +03:00
|
|
|
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)/2-1].Header(), nil); err != nil {
|
|
|
|
t.Fatalf("failed to beacon-sync chain: %v", err)
|
|
|
|
}
|
2024-04-24 10:07:39 +03:00
|
|
|
select {
|
|
|
|
case <-success:
|
2024-04-30 16:46:53 +03:00
|
|
|
// Ok, downloader fully cancelled after sync cycle
|
|
|
|
checkProgress(t, tester.downloader, "peer-half", ethereum.SyncProgress{
|
|
|
|
CurrentBlock: uint64(len(chain.blocks)/2 - 1),
|
|
|
|
HighestBlock: uint64(len(chain.blocks)/2 - 1),
|
2024-04-24 10:07:39 +03:00
|
|
|
})
|
|
|
|
case <-time.NewTimer(time.Second * 3).C:
|
|
|
|
t.Fatalf("Failed to sync chain in three seconds")
|
|
|
|
}
|
|
|
|
|
2024-04-30 16:46:53 +03:00
|
|
|
// Synchronise all the blocks and check continuation progress
|
|
|
|
tester.newPeer("peer-full", protocol, chain.blocks[1:])
|
|
|
|
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
|
|
|
|
t.Fatalf("failed to beacon-sync chain: %v", err)
|
|
|
|
}
|
2024-05-28 20:52:08 +03:00
|
|
|
startingBlock := uint64(len(chain.blocks)/2 - 1)
|
2024-04-24 10:07:39 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-success:
|
2024-04-30 16:46:53 +03:00
|
|
|
// Ok, downloader fully cancelled after sync cycle
|
|
|
|
checkProgress(t, tester.downloader, "peer-full", ethereum.SyncProgress{
|
|
|
|
StartingBlock: startingBlock,
|
|
|
|
CurrentBlock: uint64(len(chain.blocks) - 1),
|
|
|
|
HighestBlock: uint64(len(chain.blocks) - 1),
|
2024-04-24 10:07:39 +03:00
|
|
|
})
|
|
|
|
case <-time.NewTimer(time.Second * 3).C:
|
|
|
|
t.Fatalf("Failed to sync chain in three seconds")
|
|
|
|
}
|
|
|
|
}
|