2020-12-14 12:27:15 +03:00
|
|
|
// Copyright 2020 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package snap
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
2021-03-26 15:00:06 +03:00
|
|
|
"time"
|
2020-12-14 12:27:15 +03:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2023-02-21 14:12:27 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2020-12-14 12:27:15 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2021-03-26 15:00:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2020-12-14 12:27:15 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2023-10-10 11:30:47 +03:00
|
|
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
2020-12-14 12:27:15 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// softResponseLimit is the target maximum size of replies to data retrievals.
|
|
|
|
softResponseLimit = 2 * 1024 * 1024
|
|
|
|
|
|
|
|
// maxCodeLookups is the maximum number of bytecodes to serve. This number is
|
|
|
|
// there to limit the number of disk lookups.
|
|
|
|
maxCodeLookups = 1024
|
|
|
|
|
|
|
|
// stateLookupSlack defines the ratio by how much a state response can exceed
|
|
|
|
// the requested limit in order to try and avoid breaking up contracts into
|
|
|
|
// multiple packages and proving them.
|
|
|
|
stateLookupSlack = 0.1
|
|
|
|
|
|
|
|
// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
|
|
|
|
// number is there to limit the number of disk lookups.
|
|
|
|
maxTrieNodeLookups = 1024
|
2021-03-29 15:17:35 +03:00
|
|
|
|
|
|
|
// maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
|
|
|
|
// If we spend too much time, then it's a fairly high chance of timing out
|
|
|
|
// at the remote side, which means all the work is in vain.
|
|
|
|
maxTrieNodeTimeSpent = 5 * time.Second
|
2020-12-14 12:27:15 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// Handler is a callback to invoke from an outside runner after the boilerplate
|
|
|
|
// exchanges have passed.
|
|
|
|
type Handler func(peer *Peer) error
|
|
|
|
|
|
|
|
// Backend defines the data retrieval methods to serve remote requests and the
|
|
|
|
// callback methods to invoke on remote deliveries.
|
|
|
|
type Backend interface {
|
|
|
|
// Chain retrieves the blockchain object to serve data.
|
|
|
|
Chain() *core.BlockChain
|
|
|
|
|
|
|
|
// RunPeer is invoked when a peer joins on the `eth` protocol. The handler
|
|
|
|
// should do any peer maintenance work, handshakes and validations. If all
|
|
|
|
// is passed, control should be given back to the `handler` to process the
|
|
|
|
// inbound messages going forward.
|
|
|
|
RunPeer(peer *Peer, handler Handler) error
|
|
|
|
|
|
|
|
// PeerInfo retrieves all known `snap` information about a peer.
|
|
|
|
PeerInfo(id enode.ID) interface{}
|
|
|
|
|
|
|
|
// Handle is a callback to be invoked when a data packet is received from
|
|
|
|
// the remote peer. Only packets not consumed by the protocol handler will
|
|
|
|
// be forwarded to the backend.
|
|
|
|
Handle(peer *Peer, packet Packet) error
|
|
|
|
}
|
|
|
|
|
|
|
|
// MakeProtocols constructs the P2P protocol definitions for `snap`.
|
|
|
|
func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
|
2021-05-04 12:29:32 +03:00
|
|
|
// Filter the discovery iterator for nodes advertising snap support.
|
|
|
|
dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool {
|
|
|
|
var snap enrEntry
|
|
|
|
return n.Load(&snap) == nil
|
|
|
|
})
|
|
|
|
|
2021-02-02 11:44:36 +03:00
|
|
|
protocols := make([]p2p.Protocol, len(ProtocolVersions))
|
|
|
|
for i, version := range ProtocolVersions {
|
2020-12-14 12:27:15 +03:00
|
|
|
version := version // Closure
|
|
|
|
|
|
|
|
protocols[i] = p2p.Protocol{
|
2021-02-02 11:44:36 +03:00
|
|
|
Name: ProtocolName,
|
2020-12-14 12:27:15 +03:00
|
|
|
Version: version,
|
|
|
|
Length: protocolLengths[version],
|
|
|
|
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
2021-11-26 14:26:03 +03:00
|
|
|
return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error {
|
|
|
|
return Handle(backend, peer)
|
2020-12-14 12:27:15 +03:00
|
|
|
})
|
|
|
|
},
|
|
|
|
NodeInfo: func() interface{} {
|
|
|
|
return nodeInfo(backend.Chain())
|
|
|
|
},
|
|
|
|
PeerInfo: func(id enode.ID) interface{} {
|
|
|
|
return backend.PeerInfo(id)
|
|
|
|
},
|
|
|
|
Attributes: []enr.Entry{&enrEntry{}},
|
|
|
|
DialCandidates: dnsdisc,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return protocols
|
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// Handle is the callback invoked to manage the life cycle of a `snap` peer.
|
2020-12-14 12:27:15 +03:00
|
|
|
// When this function terminates, the peer is disconnected.
|
2021-11-26 14:26:03 +03:00
|
|
|
func Handle(backend Backend, peer *Peer) error {
|
2020-12-14 12:27:15 +03:00
|
|
|
for {
|
2021-12-01 12:17:18 +03:00
|
|
|
if err := HandleMessage(backend, peer); err != nil {
|
2020-12-14 12:27:15 +03:00
|
|
|
peer.Log().Debug("Message handling failed in `snap`", "err", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-01 12:17:18 +03:00
|
|
|
// HandleMessage is invoked whenever an inbound message is received from a
|
2021-03-19 10:49:24 +03:00
|
|
|
// remote peer on the `snap` protocol. The remote connection is torn down upon
|
2020-12-14 12:27:15 +03:00
|
|
|
// returning any error.
|
2021-12-01 12:17:18 +03:00
|
|
|
func HandleMessage(backend Backend, peer *Peer) error {
|
2020-12-14 12:27:15 +03:00
|
|
|
// Read the next message from the remote peer, and ensure it's fully consumed
|
|
|
|
msg, err := peer.rw.ReadMsg()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Size > maxMessageSize {
|
|
|
|
return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
|
|
|
|
}
|
|
|
|
defer msg.Discard()
|
2021-03-29 15:17:35 +03:00
|
|
|
start := time.Now()
|
2022-10-11 10:37:00 +03:00
|
|
|
// Track the amount of time it takes to serve the request and run the handler
|
2021-03-26 15:00:06 +03:00
|
|
|
if metrics.Enabled {
|
|
|
|
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
|
|
|
|
defer func(start time.Time) {
|
2021-03-26 17:14:12 +03:00
|
|
|
sampler := func() metrics.Sample {
|
|
|
|
return metrics.ResettingSample(
|
|
|
|
metrics.NewExpDecaySample(1028, 0.015),
|
|
|
|
)
|
|
|
|
}
|
2021-03-26 15:00:06 +03:00
|
|
|
metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
|
2021-03-29 15:17:35 +03:00
|
|
|
}(start)
|
2021-03-26 15:00:06 +03:00
|
|
|
}
|
2020-12-14 12:27:15 +03:00
|
|
|
// Handle the message depending on its contents
|
|
|
|
switch {
|
|
|
|
case msg.Code == GetAccountRangeMsg:
|
|
|
|
// Decode the account retrieval request
|
|
|
|
var req GetAccountRangePacket
|
|
|
|
if err := msg.Decode(&req); err != nil {
|
|
|
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Service the request, potentially returning nothing in case of errors
|
|
|
|
accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req)
|
2020-12-14 12:27:15 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// Send back anything accumulated (or empty in case of errors)
|
2020-12-14 12:27:15 +03:00
|
|
|
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
|
|
|
|
ID: req.ID,
|
|
|
|
Accounts: accounts,
|
|
|
|
Proof: proofs,
|
|
|
|
})
|
|
|
|
|
|
|
|
case msg.Code == AccountRangeMsg:
|
|
|
|
// A range of accounts arrived to one of our previous requests
|
|
|
|
res := new(AccountRangePacket)
|
|
|
|
if err := msg.Decode(res); err != nil {
|
|
|
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
|
|
}
|
|
|
|
// Ensure the range is monotonically increasing
|
|
|
|
for i := 1; i < len(res.Accounts); i++ {
|
|
|
|
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
|
|
|
|
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
|
|
|
|
}
|
|
|
|
}
|
2021-04-22 11:42:46 +03:00
|
|
|
requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
|
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
return backend.Handle(peer, res)
|
|
|
|
|
|
|
|
case msg.Code == GetStorageRangesMsg:
|
|
|
|
// Decode the storage retrieval request
|
|
|
|
var req GetStorageRangesPacket
|
|
|
|
if err := msg.Decode(&req); err != nil {
|
|
|
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Service the request, potentially returning nothing in case of errors
|
|
|
|
slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req)
|
2020-12-14 12:27:15 +03:00
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// Send back anything accumulated (or empty in case of errors)
|
2020-12-14 12:27:15 +03:00
|
|
|
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
|
|
|
|
ID: req.ID,
|
|
|
|
Slots: slots,
|
|
|
|
Proof: proofs,
|
|
|
|
})
|
|
|
|
|
|
|
|
case msg.Code == StorageRangesMsg:
|
|
|
|
// A range of storage slots arrived to one of our previous requests
|
|
|
|
res := new(StorageRangesPacket)
|
|
|
|
if err := msg.Decode(res); err != nil {
|
|
|
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
|
|
}
|
2021-04-27 17:19:59 +03:00
|
|
|
// Ensure the ranges are monotonically increasing
|
2020-12-14 12:27:15 +03:00
|
|
|
for i, slots := range res.Slots {
|
|
|
|
for j := 1; j < len(slots); j++ {
|
|
|
|
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
|
|
|
|
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-04-22 11:42:46 +03:00
|
|
|
requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
|
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
return backend.Handle(peer, res)
|
|
|
|
|
|
|
|
case msg.Code == GetByteCodesMsg:
|
|
|
|
// Decode bytecode retrieval request
|
|
|
|
var req GetByteCodesPacket
|
|
|
|
if err := msg.Decode(&req); err != nil {
|
|
|
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Service the request, potentially returning nothing in case of errors
|
|
|
|
codes := ServiceGetByteCodesQuery(backend.Chain(), &req)
|
|
|
|
|
|
|
|
// Send back anything accumulated (or empty in case of errors)
|
2020-12-14 12:27:15 +03:00
|
|
|
return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
|
|
|
|
ID: req.ID,
|
|
|
|
Codes: codes,
|
|
|
|
})
|
|
|
|
|
|
|
|
case msg.Code == ByteCodesMsg:
|
|
|
|
// A batch of byte codes arrived to one of our previous requests
|
|
|
|
res := new(ByteCodesPacket)
|
|
|
|
if err := msg.Decode(res); err != nil {
|
|
|
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
|
|
}
|
2021-04-22 11:42:46 +03:00
|
|
|
requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
|
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
return backend.Handle(peer, res)
|
|
|
|
|
|
|
|
case msg.Code == GetTrieNodesMsg:
|
|
|
|
// Decode trie node retrieval request
|
|
|
|
var req GetTrieNodesPacket
|
|
|
|
if err := msg.Decode(&req); err != nil {
|
|
|
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Service the request, potentially returning nothing in case of errors
|
|
|
|
nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start)
|
2020-12-14 12:27:15 +03:00
|
|
|
if err != nil {
|
2021-11-26 14:26:03 +03:00
|
|
|
return err
|
2020-12-14 12:27:15 +03:00
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
// Send back anything accumulated (or empty in case of errors)
|
2020-12-14 12:27:15 +03:00
|
|
|
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
|
|
|
|
ID: req.ID,
|
|
|
|
Nodes: nodes,
|
|
|
|
})
|
|
|
|
|
|
|
|
case msg.Code == TrieNodesMsg:
|
|
|
|
// A batch of trie nodes arrived to one of our previous requests
|
|
|
|
res := new(TrieNodesPacket)
|
|
|
|
if err := msg.Decode(res); err != nil {
|
|
|
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
|
|
}
|
2021-04-22 11:42:46 +03:00
|
|
|
requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
|
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
return backend.Handle(peer, res)
|
|
|
|
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
// ServiceGetAccountRangeQuery assembles the response to an account range query.
|
|
|
|
// It is exposed to allow external packages to test protocol behavior.
|
|
|
|
func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) {
|
|
|
|
if req.Bytes > softResponseLimit {
|
|
|
|
req.Bytes = softResponseLimit
|
|
|
|
}
|
|
|
|
// Retrieve the requested state and bail out if non existent
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 22:21:36 +03:00
|
|
|
tr, err := trie.New(trie.StateTrieID(req.Root), chain.TrieDB())
|
2021-11-26 14:26:03 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
// Iterate over the requested range and pile accounts up
|
|
|
|
var (
|
|
|
|
accounts []*AccountData
|
|
|
|
size uint64
|
|
|
|
last common.Hash
|
|
|
|
)
|
2022-02-04 17:24:32 +03:00
|
|
|
for it.Next() {
|
2021-11-26 14:26:03 +03:00
|
|
|
hash, account := it.Hash(), common.CopyBytes(it.Account())
|
|
|
|
|
|
|
|
// Track the returned interval for the Merkle proofs
|
|
|
|
last = hash
|
|
|
|
|
|
|
|
// Assemble the reply item
|
|
|
|
size += uint64(common.HashLength + len(account))
|
|
|
|
accounts = append(accounts, &AccountData{
|
|
|
|
Hash: hash,
|
|
|
|
Body: account,
|
|
|
|
})
|
|
|
|
// If we've exceeded the request threshold, abort
|
|
|
|
if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
|
|
|
|
break
|
|
|
|
}
|
2022-02-04 17:24:32 +03:00
|
|
|
if size > req.Bytes {
|
|
|
|
break
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
}
|
|
|
|
it.Release()
|
|
|
|
|
|
|
|
// Generate the Merkle proofs for the first and last account
|
2023-10-10 11:30:47 +03:00
|
|
|
proof := trienode.NewProofSet()
|
2023-06-19 17:28:40 +03:00
|
|
|
if err := tr.Prove(req.Origin[:], proof); err != nil {
|
2021-11-26 14:26:03 +03:00
|
|
|
log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
if last != (common.Hash{}) {
|
2023-06-19 17:28:40 +03:00
|
|
|
if err := tr.Prove(last[:], proof); err != nil {
|
2021-11-26 14:26:03 +03:00
|
|
|
log.Warn("Failed to prove account range", "last", last, "err", err)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var proofs [][]byte
|
2023-10-10 11:30:47 +03:00
|
|
|
for _, blob := range proof.List() {
|
2021-11-26 14:26:03 +03:00
|
|
|
proofs = append(proofs, blob)
|
|
|
|
}
|
|
|
|
return accounts, proofs
|
|
|
|
}
|
|
|
|
|
|
|
|
func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) {
|
|
|
|
if req.Bytes > softResponseLimit {
|
|
|
|
req.Bytes = softResponseLimit
|
|
|
|
}
|
|
|
|
// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
|
2022-10-11 10:37:00 +03:00
|
|
|
// TODO(karalabe): - Logging locally is not ideal as remote faults annoy the local user
|
2021-11-26 14:26:03 +03:00
|
|
|
// TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
|
|
|
|
|
|
|
|
// Calculate the hard limit at which to abort, even if mid storage trie
|
|
|
|
hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
|
|
|
|
|
|
|
|
// Retrieve storage ranges until the packet limit is reached
|
|
|
|
var (
|
|
|
|
slots [][]*StorageData
|
|
|
|
proofs [][]byte
|
|
|
|
size uint64
|
|
|
|
)
|
|
|
|
for _, account := range req.Accounts {
|
|
|
|
// If we've exceeded the requested data limit, abort without opening
|
|
|
|
// a new storage range (that we'd need to prove due to exceeded size)
|
|
|
|
if size >= req.Bytes {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// The first account might start from a different origin and end sooner
|
|
|
|
var origin common.Hash
|
|
|
|
if len(req.Origin) > 0 {
|
|
|
|
origin, req.Origin = common.BytesToHash(req.Origin), nil
|
|
|
|
}
|
eth/protocols/snap: fix snap sync failure on empty storage range (#28306)
This change addresses an issue in snap sync, specifically when the entire sync process can be halted due to an encountered empty storage range.
Currently, on the snap sync client side, the response to an empty (partial) storage range is discarded as a non-delivery. However, this response can be a valid response, when the particular range requested does not contain any slots.
For instance, consider a large contract where the entire key space is divided into 16 chunks, and there are no available slots in the last chunk [0xf] -> [end]. When the node receives a request for this particular range, the response includes:
The proof with origin [0xf]
A nil storage slot set
If we simply discard this response, the finalization of the last range will be skipped, halting the entire sync process indefinitely. The test case TestSyncWithUnevenStorage can reproduce the scenario described above.
In addition, this change also defines the common variables MaxAddress and MaxHash.
2023-10-13 10:08:26 +03:00
|
|
|
var limit = common.MaxHash
|
2021-11-26 14:26:03 +03:00
|
|
|
if len(req.Limit) > 0 {
|
|
|
|
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
|
|
|
}
|
|
|
|
// Retrieve the requested state and bail out if non existent
|
|
|
|
it, err := chain.Snapshots().StorageIterator(req.Root, account, origin)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
// Iterate over the requested range and pile slots up
|
|
|
|
var (
|
|
|
|
storage []*StorageData
|
|
|
|
last common.Hash
|
|
|
|
abort bool
|
|
|
|
)
|
|
|
|
for it.Next() {
|
|
|
|
if size >= hardLimit {
|
|
|
|
abort = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
hash, slot := it.Hash(), common.CopyBytes(it.Slot())
|
|
|
|
|
|
|
|
// Track the returned interval for the Merkle proofs
|
|
|
|
last = hash
|
|
|
|
|
|
|
|
// Assemble the reply item
|
|
|
|
size += uint64(common.HashLength + len(slot))
|
|
|
|
storage = append(storage, &StorageData{
|
|
|
|
Hash: hash,
|
|
|
|
Body: slot,
|
|
|
|
})
|
|
|
|
// If we've exceeded the request threshold, abort
|
|
|
|
if bytes.Compare(hash[:], limit[:]) >= 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2022-05-17 11:19:51 +03:00
|
|
|
if len(storage) > 0 {
|
|
|
|
slots = append(slots, storage)
|
|
|
|
}
|
2021-11-26 14:26:03 +03:00
|
|
|
it.Release()
|
|
|
|
|
|
|
|
// Generate the Merkle proofs for the first and last storage slot, but
|
|
|
|
// only if the response was capped. If the entire storage trie included
|
|
|
|
// in the response, no need for any proofs.
|
2022-05-17 11:19:51 +03:00
|
|
|
if origin != (common.Hash{}) || (abort && len(storage) > 0) {
|
2021-11-26 14:26:03 +03:00
|
|
|
// Request started at a non-zero hash or was capped prematurely, add
|
|
|
|
// the endpoint Merkle proofs
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 22:21:36 +03:00
|
|
|
accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.TrieDB())
|
2021-11-26 14:26:03 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2023-03-27 11:48:46 +03:00
|
|
|
acc, err := accTrie.GetAccountByHash(account)
|
2022-08-04 17:13:18 +03:00
|
|
|
if err != nil || acc == nil {
|
2021-11-26 14:26:03 +03:00
|
|
|
return nil, nil
|
|
|
|
}
|
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 11:01:02 +03:00
|
|
|
id := trie.StorageTrieID(req.Root, account, acc.Root)
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 22:21:36 +03:00
|
|
|
stTrie, err := trie.NewStateTrie(id, chain.TrieDB())
|
2021-11-26 14:26:03 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2023-10-10 11:30:47 +03:00
|
|
|
proof := trienode.NewProofSet()
|
2023-06-19 17:28:40 +03:00
|
|
|
if err := stTrie.Prove(origin[:], proof); err != nil {
|
2021-11-26 14:26:03 +03:00
|
|
|
log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
if last != (common.Hash{}) {
|
2023-06-19 17:28:40 +03:00
|
|
|
if err := stTrie.Prove(last[:], proof); err != nil {
|
2021-11-26 14:26:03 +03:00
|
|
|
log.Warn("Failed to prove storage range", "last", last, "err", err)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
2023-10-10 11:30:47 +03:00
|
|
|
for _, blob := range proof.List() {
|
2021-11-26 14:26:03 +03:00
|
|
|
proofs = append(proofs, blob)
|
|
|
|
}
|
|
|
|
// Proof terminates the reply as proofs are only added if a node
|
|
|
|
// refuses to serve more data (exception when a contract fetch is
|
|
|
|
// finishing, but that's that).
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return slots, proofs
|
|
|
|
}
|
|
|
|
|
|
|
|
// ServiceGetByteCodesQuery assembles the response to a byte codes query.
|
|
|
|
// It is exposed to allow external packages to test protocol behavior.
|
|
|
|
func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte {
|
|
|
|
if req.Bytes > softResponseLimit {
|
|
|
|
req.Bytes = softResponseLimit
|
|
|
|
}
|
|
|
|
if len(req.Hashes) > maxCodeLookups {
|
|
|
|
req.Hashes = req.Hashes[:maxCodeLookups]
|
|
|
|
}
|
|
|
|
// Retrieve bytecodes until the packet size limit is reached
|
|
|
|
var (
|
|
|
|
codes [][]byte
|
|
|
|
bytes uint64
|
|
|
|
)
|
|
|
|
for _, hash := range req.Hashes {
|
2023-02-21 14:12:27 +03:00
|
|
|
if hash == types.EmptyCodeHash {
|
2021-11-26 14:26:03 +03:00
|
|
|
// Peers should not request the empty code, but if they do, at
|
|
|
|
// least sent them back a correct response without db lookups
|
|
|
|
codes = append(codes, []byte{})
|
2022-02-04 17:24:32 +03:00
|
|
|
} else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil {
|
2021-11-26 14:26:03 +03:00
|
|
|
codes = append(codes, blob)
|
|
|
|
bytes += uint64(len(blob))
|
|
|
|
}
|
|
|
|
if bytes > req.Bytes {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return codes
|
|
|
|
}
|
|
|
|
|
|
|
|
// ServiceGetTrieNodesQuery assembles the response to a trie nodes query.
|
|
|
|
// It is exposed to allow external packages to test protocol behavior.
|
|
|
|
func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) {
|
|
|
|
if req.Bytes > softResponseLimit {
|
|
|
|
req.Bytes = softResponseLimit
|
|
|
|
}
|
|
|
|
// Make sure we have the state associated with the request
|
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-10 22:21:36 +03:00
|
|
|
triedb := chain.TrieDB()
|
2021-11-26 14:26:03 +03:00
|
|
|
|
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 11:01:02 +03:00
|
|
|
accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb)
|
2021-11-26 14:26:03 +03:00
|
|
|
if err != nil {
|
|
|
|
// We don't have the requested state available, bail out
|
|
|
|
return nil, nil
|
|
|
|
}
|
2022-10-03 14:37:17 +03:00
|
|
|
// The 'snap' might be nil, in which case we cannot serve storage slots.
|
2021-11-26 14:26:03 +03:00
|
|
|
snap := chain.Snapshots().Snapshot(req.Root)
|
|
|
|
// Retrieve trie nodes until the packet size limit is reached
|
|
|
|
var (
|
|
|
|
nodes [][]byte
|
|
|
|
bytes uint64
|
2022-08-19 09:00:21 +03:00
|
|
|
loads int // Trie hash expansions to count database reads
|
2021-11-26 14:26:03 +03:00
|
|
|
)
|
|
|
|
for _, pathset := range req.Paths {
|
|
|
|
switch len(pathset) {
|
|
|
|
case 0:
|
|
|
|
// Ensure we penalize invalid requests
|
|
|
|
return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
// If we're only retrieving an account trie node, fetch it directly
|
2023-03-27 11:48:46 +03:00
|
|
|
blob, resolved, err := accTrie.GetNode(pathset[0])
|
2021-11-26 14:26:03 +03:00
|
|
|
loads += resolved // always account database reads, even for failures
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
nodes = append(nodes, blob)
|
|
|
|
bytes += uint64(len(blob))
|
|
|
|
|
|
|
|
default:
|
2022-10-03 14:37:17 +03:00
|
|
|
var stRoot common.Hash
|
2021-11-26 14:26:03 +03:00
|
|
|
// Storage slots requested, open the storage trie and retrieve from there
|
2022-10-03 14:37:17 +03:00
|
|
|
if snap == nil {
|
|
|
|
// We don't have the requested state snapshotted yet (or it is stale),
|
|
|
|
// but can look up the account via the trie instead.
|
2023-03-27 11:48:46 +03:00
|
|
|
account, err := accTrie.GetAccountByHash(common.BytesToHash(pathset[0]))
|
2022-10-03 14:37:17 +03:00
|
|
|
loads += 8 // We don't know the exact cost of lookup, this is an estimate
|
|
|
|
if err != nil || account == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
stRoot = account.Root
|
|
|
|
} else {
|
|
|
|
account, err := snap.Account(common.BytesToHash(pathset[0]))
|
|
|
|
loads++ // always account database reads, even for failures
|
|
|
|
if err != nil || account == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
stRoot = common.BytesToHash(account.Root)
|
2021-11-26 14:26:03 +03:00
|
|
|
}
|
2022-10-03 14:37:17 +03:00
|
|
|
id := trie.StorageTrieID(req.Root, common.BytesToHash(pathset[0]), stRoot)
|
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 11:01:02 +03:00
|
|
|
stTrie, err := trie.NewStateTrie(id, triedb)
|
2021-11-26 14:26:03 +03:00
|
|
|
loads++ // always account database reads, even for failures
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for _, path := range pathset[1:] {
|
2023-03-27 11:48:46 +03:00
|
|
|
blob, resolved, err := stTrie.GetNode(path)
|
2021-11-26 14:26:03 +03:00
|
|
|
loads += resolved // always account database reads, even for failures
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
nodes = append(nodes, blob)
|
|
|
|
bytes += uint64(len(blob))
|
|
|
|
|
|
|
|
// Sanity check limits to avoid DoS on the store trie loads
|
|
|
|
if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Abort request processing if we've exceeded our limits
|
|
|
|
if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nodes, nil
|
|
|
|
}
|
|
|
|
|
2020-12-14 12:27:15 +03:00
|
|
|
// NodeInfo represents a short summary of the `snap` sub-protocol metadata
|
|
|
|
// known about the host peer.
|
|
|
|
type NodeInfo struct{}
|
|
|
|
|
|
|
|
// nodeInfo retrieves some `snap` protocol metadata about the running host node.
|
|
|
|
func nodeInfo(chain *core.BlockChain) *NodeInfo {
|
|
|
|
return &NodeInfo{}
|
|
|
|
}
|