2ce00adb55
* focus on performance improvement in many aspects. 1. Do BlockBody verification concurrently; 2. Do calculation of intermediate root concurrently; 3. Preload accounts before processing blocks; 4. Make the snapshot layers configurable. 5. Reuse some object to reduce GC. add * rlp: improve decoder stream implementation (#22858) This commit makes various cleanup changes to rlp.Stream. * rlp: shrink Stream struct This removes a lot of unused padding space in Stream by reordering the fields. The size of Stream changes from 120 bytes to 88 bytes. Stream instances are internally cached and reused using sync.Pool, so this does not improve performance. * rlp: simplify list stack The list stack kept track of the size of the current list context as well as the current offset into it. The size had to be stored in the stack in order to subtract it from the remaining bytes of any enclosing list in ListEnd. It seems that this can be implemented in a simpler way: just subtract the size from the enclosing list context in List instead. * rlp: use atomic.Value for type cache (#22902) All encoding/decoding operations read the type cache to find the writer/decoder function responsible for a type. When analyzing CPU profiles of geth during sync, I found that the use of sync.RWMutex in cache lookups appears in the profiles. It seems we are running into CPU cache contention problems when package rlp is heavily used on all CPU cores during sync. This change makes it use atomic.Value + a writer lock instead of sync.RWMutex. In the common case where the typeinfo entry is present in the cache, we simply fetch the map and lookup the type. * rlp: optimize byte array handling (#22924) This change improves the performance of encoding/decoding [N]byte. name old time/op new time/op delta DecodeByteArrayStruct-8 336ns ± 0% 246ns ± 0% -26.98% (p=0.000 n=9+10) EncodeByteArrayStruct-8 225ns ± 1% 148ns ± 1% -34.12% (p=0.000 n=10+10) name old alloc/op new alloc/op delta DecodeByteArrayStruct-8 120B ± 0% 48B ± 0% -60.00% (p=0.000 n=10+10) EncodeByteArrayStruct-8 0.00B 0.00B ~ (all equal) * rlp: optimize big.Int decoding for size <= 32 bytes (#22927) This change grows the static integer buffer in Stream to 32 bytes, making it possible to decode 256bit integers without allocating a temporary buffer. In the recent commit 088da24, Stream struct size decreased from 120 bytes down to 88 bytes. This commit grows the struct to 112 bytes again, but the size change will not degrade performance because Stream instances are internally cached in sync.Pool. name old time/op new time/op delta DecodeBigInts-8 12.2µs ± 0% 8.6µs ± 4% -29.58% (p=0.000 n=9+10) name old speed new speed delta DecodeBigInts-8 230MB/s ± 0% 326MB/s ± 4% +42.04% (p=0.000 n=9+10) * eth/protocols/eth, les: avoid Raw() when decoding HashOrNumber (#22841) Getting the raw value is not necessary to decode this type, and decoding it directly from the stream is faster. * fix testcase * debug no lazy * fix can not repair * address comments Co-authored-by: Felix Lange <fjl@twurst.com>
311 lines
9.4 KiB
Go
311 lines
9.4 KiB
Go
// Copyright 2017 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package state
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/VictoriaMetrics/fastcache"
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
"github.com/ethereum/go-ethereum/trie"
|
|
lru "github.com/hashicorp/golang-lru"
|
|
)
|
|
|
|
const (
|
|
// Number of codehash->size associations to keep.
|
|
codeSizeCacheSize = 100000
|
|
|
|
// Number of state trie in cache
|
|
accountTrieCacheSize = 32
|
|
|
|
// Number of storage Trie in cache
|
|
storageTrieCacheSize = 2000
|
|
|
|
// Cache size granted for caching clean code.
|
|
codeCacheSize = 64 * 1024 * 1024
|
|
|
|
purgeInterval = 600
|
|
|
|
maxAccountTrieSize = 1024 * 1024
|
|
)
|
|
|
|
// Database wraps access to tries and contract code.
|
|
type Database interface {
|
|
// OpenTrie opens the main account trie.
|
|
OpenTrie(root common.Hash) (Trie, error)
|
|
|
|
// OpenStorageTrie opens the storage trie of an account.
|
|
OpenStorageTrie(addrHash, root common.Hash) (Trie, error)
|
|
|
|
// CopyTrie returns an independent copy of the given trie.
|
|
CopyTrie(Trie) Trie
|
|
|
|
// ContractCode retrieves a particular contract's code.
|
|
ContractCode(addrHash, codeHash common.Hash) ([]byte, error)
|
|
|
|
// ContractCodeSize retrieves a particular contracts code's size.
|
|
ContractCodeSize(addrHash, codeHash common.Hash) (int, error)
|
|
|
|
// TrieDB retrieves the low level trie database used for data storage.
|
|
TrieDB() *trie.Database
|
|
|
|
// Cache the account trie tree
|
|
CacheAccount(root common.Hash, t Trie)
|
|
|
|
// Cache the storage trie tree
|
|
CacheStorage(addrHash common.Hash, root common.Hash, t Trie)
|
|
|
|
// Purge cache
|
|
Purge()
|
|
}
|
|
|
|
// Trie is a Ethereum Merkle Patricia trie.
|
|
type Trie interface {
|
|
// GetKey returns the sha3 preimage of a hashed key that was previously used
|
|
// to store a value.
|
|
//
|
|
// TODO(fjl): remove this when SecureTrie is removed
|
|
GetKey([]byte) []byte
|
|
|
|
// TryGet returns the value for key stored in the trie. The value bytes must
|
|
// not be modified by the caller. If a node was not found in the database, a
|
|
// trie.MissingNodeError is returned.
|
|
TryGet(key []byte) ([]byte, error)
|
|
|
|
// TryUpdate associates key with value in the trie. If value has length zero, any
|
|
// existing value is deleted from the trie. The value bytes must not be modified
|
|
// by the caller while they are stored in the trie. If a node was not found in the
|
|
// database, a trie.MissingNodeError is returned.
|
|
TryUpdate(key, value []byte) error
|
|
|
|
// TryDelete removes any existing value for key from the trie. If a node was not
|
|
// found in the database, a trie.MissingNodeError is returned.
|
|
TryDelete(key []byte) error
|
|
|
|
// Hash returns the root hash of the trie. It does not write to the database and
|
|
// can be used even if the trie doesn't have one.
|
|
Hash() common.Hash
|
|
|
|
// Commit writes all nodes to the trie's memory database, tracking the internal
|
|
// and external (for account tries) references.
|
|
Commit(onleaf trie.LeafCallback) (common.Hash, error)
|
|
|
|
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
|
|
// starts at the key after the given start key.
|
|
NodeIterator(startKey []byte) trie.NodeIterator
|
|
|
|
// Prove constructs a Merkle proof for key. The result contains all encoded nodes
|
|
// on the path to the value at key. The value itself is also included in the last
|
|
// node and can be retrieved by verifying the proof.
|
|
//
|
|
// If the trie does not contain a value for key, the returned proof contains all
|
|
// nodes of the longest existing prefix of the key (at least the root), ending
|
|
// with the node that proves the absence of the key.
|
|
Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
|
|
}
|
|
|
|
// NewDatabase creates a backing store for state. The returned database is safe for
|
|
// concurrent use, but does not retain any recent trie nodes in memory. To keep some
|
|
// historical state in memory, use the NewDatabaseWithConfig constructor.
|
|
func NewDatabase(db ethdb.Database) Database {
|
|
return NewDatabaseWithConfig(db, nil)
|
|
}
|
|
|
|
// NewDatabaseWithConfig creates a backing store for state. The returned database
|
|
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
|
|
// large memory cache.
|
|
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
|
|
csc, _ := lru.New(codeSizeCacheSize)
|
|
return &cachingDB{
|
|
db: trie.NewDatabaseWithConfig(db, config),
|
|
codeSizeCache: csc,
|
|
codeCache: fastcache.New(codeCacheSize),
|
|
}
|
|
}
|
|
|
|
func NewDatabaseWithConfigAndCache(db ethdb.Database, config *trie.Config) Database {
|
|
csc, _ := lru.New(codeSizeCacheSize)
|
|
atc, _ := lru.New(accountTrieCacheSize)
|
|
stc, _ := lru.New(storageTrieCacheSize)
|
|
|
|
database := &cachingDB{
|
|
db: trie.NewDatabaseWithConfig(db, config),
|
|
codeSizeCache: csc,
|
|
codeCache: fastcache.New(codeCacheSize),
|
|
accountTrieCache: atc,
|
|
storageTrieCache: stc,
|
|
}
|
|
go database.purgeLoop()
|
|
return database
|
|
}
|
|
|
|
type cachingDB struct {
|
|
db *trie.Database
|
|
codeSizeCache *lru.Cache
|
|
codeCache *fastcache.Cache
|
|
accountTrieCache *lru.Cache
|
|
storageTrieCache *lru.Cache
|
|
}
|
|
|
|
type triePair struct {
|
|
root common.Hash
|
|
trie Trie
|
|
}
|
|
|
|
func (db *cachingDB) purgeLoop() {
|
|
for {
|
|
time.Sleep(purgeInterval * time.Second)
|
|
_, accounts, ok := db.accountTrieCache.GetOldest()
|
|
if !ok {
|
|
continue
|
|
}
|
|
tr := accounts.(*trie.SecureTrie).GetRawTrie()
|
|
if tr.Size() > maxAccountTrieSize {
|
|
db.Purge()
|
|
}
|
|
}
|
|
}
|
|
|
|
// OpenTrie opens the main account trie at a specific root hash.
|
|
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
|
|
if db.accountTrieCache != nil {
|
|
if tr, exist := db.accountTrieCache.Get(root); exist {
|
|
return tr.(Trie).(*trie.SecureTrie).Copy(), nil
|
|
}
|
|
}
|
|
tr, err := trie.NewSecure(root, db.db)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return tr, nil
|
|
}
|
|
|
|
// OpenStorageTrie opens the storage trie of an account.
|
|
func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
|
|
if db.storageTrieCache != nil {
|
|
if tries, exist := db.storageTrieCache.Get(addrHash); exist {
|
|
triesPairs := tries.([3]*triePair)
|
|
for _, triePair := range triesPairs {
|
|
if triePair != nil && triePair.root == root {
|
|
return triePair.trie.(*trie.SecureTrie).Copy(), nil
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
tr, err := trie.NewSecure(root, db.db)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return tr, nil
|
|
}
|
|
|
|
func (db *cachingDB) CacheAccount(root common.Hash, t Trie) {
|
|
if db.accountTrieCache == nil {
|
|
return
|
|
}
|
|
tr := t.(*trie.SecureTrie)
|
|
db.accountTrieCache.Add(root, tr.ResetCopy())
|
|
}
|
|
|
|
func (db *cachingDB) CacheStorage(addrHash common.Hash, root common.Hash, t Trie) {
|
|
if db.storageTrieCache == nil {
|
|
return
|
|
}
|
|
tr := t.(*trie.SecureTrie)
|
|
if tries, exist := db.storageTrieCache.Get(addrHash); exist {
|
|
triesArray := tries.([3]*triePair)
|
|
newTriesArray := [3]*triePair{
|
|
{root: root, trie: tr.ResetCopy()},
|
|
triesArray[0],
|
|
triesArray[1],
|
|
}
|
|
db.storageTrieCache.Add(addrHash, newTriesArray)
|
|
} else {
|
|
triesArray := [3]*triePair{{root: root, trie: tr.ResetCopy()}, nil, nil}
|
|
db.storageTrieCache.Add(addrHash, triesArray)
|
|
}
|
|
return
|
|
}
|
|
|
|
func (db *cachingDB) Purge() {
|
|
if db.storageTrieCache != nil {
|
|
db.storageTrieCache.Purge()
|
|
}
|
|
if db.accountTrieCache != nil {
|
|
db.accountTrieCache.Purge()
|
|
}
|
|
}
|
|
|
|
// CopyTrie returns an independent copy of the given trie.
|
|
func (db *cachingDB) CopyTrie(t Trie) Trie {
|
|
switch t := t.(type) {
|
|
case *trie.SecureTrie:
|
|
return t.Copy()
|
|
default:
|
|
panic(fmt.Errorf("unknown trie type %T", t))
|
|
}
|
|
}
|
|
|
|
// ContractCode retrieves a particular contract's code.
|
|
func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
|
|
if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 {
|
|
return code, nil
|
|
}
|
|
code := rawdb.ReadCode(db.db.DiskDB(), codeHash)
|
|
if len(code) > 0 {
|
|
db.codeCache.Set(codeHash.Bytes(), code)
|
|
db.codeSizeCache.Add(codeHash, len(code))
|
|
return code, nil
|
|
}
|
|
return nil, errors.New("not found")
|
|
}
|
|
|
|
// ContractCodeWithPrefix retrieves a particular contract's code. If the
|
|
// code can't be found in the cache, then check the existence with **new**
|
|
// db scheme.
|
|
func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) {
|
|
if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 {
|
|
return code, nil
|
|
}
|
|
code := rawdb.ReadCodeWithPrefix(db.db.DiskDB(), codeHash)
|
|
if len(code) > 0 {
|
|
db.codeCache.Set(codeHash.Bytes(), code)
|
|
db.codeSizeCache.Add(codeHash, len(code))
|
|
return code, nil
|
|
}
|
|
return nil, errors.New("not found")
|
|
}
|
|
|
|
// ContractCodeSize retrieves a particular contracts code's size.
|
|
func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
|
|
if cached, ok := db.codeSizeCache.Get(codeHash); ok {
|
|
return cached.(int), nil
|
|
}
|
|
code, err := db.ContractCode(addrHash, codeHash)
|
|
return len(code), err
|
|
}
|
|
|
|
// TrieDB retrieves any intermediate trie-node caching layer.
|
|
func (db *cachingDB) TrieDB() *trie.Database {
|
|
return db.db
|
|
}
|