Merge pull request #2527 from bnb-chain/develop

Draft release v1.4.10
This commit is contained in:
zzzckck 2024-06-21 16:13:48 +08:00 committed by GitHub
commit f0c7795542
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 331 additions and 43 deletions

@ -1,4 +1,20 @@
# Changelog
## v1.4.10
### FEATURE
NA
### IMPROVEMENT
* [\#2512](https://github.com/bnb-chain/bsc/pull/2512) feat: add mev helper params and func
* [\#2508](https://github.com/bnb-chain/bsc/pull/2508) perf: speedup pbss trienode read
* [\#2509](https://github.com/bnb-chain/bsc/pull/2509) perf: optimize chain commit performance for multi-database
* [\#2451](https://github.com/bnb-chain/bsc/pull/2451) core/forkchoice: improve stability when inturn block not generate
### BUGFIX
* [\#2518](https://github.com/bnb-chain/bsc/pull/2518) fix: remove zero gasprice check for BSC
* [\#2519](https://github.com/bnb-chain/bsc/pull/2519) UT: random failure of TestSnapSyncWithBlobs
* [\#2515](https://github.com/bnb-chain/bsc/pull/2515) fix getBlobSidecars by ethclient
* [\#2525](https://github.com/bnb-chain/bsc/pull/2525) fix: ensure empty withdrawals after cancun before broadcast
## v1.4.9
### FEATURE
* [\#2463](https://github.com/bnb-chain/bsc/pull/2463) utils: add check_blobtx.js

@ -301,6 +301,7 @@ type BlockChain struct {
diffLayerFreezerBlockLimit uint64
wg sync.WaitGroup
dbWg sync.WaitGroup
quit chan struct{} // shutdown signal, closed in Stop.
stopping atomic.Bool // false if chain is running, true when stopped
procInterrupt atomic.Bool // interrupt signaler for block processing
@ -669,7 +670,7 @@ func (bc *BlockChain) cacheBlock(hash common.Hash, block *types.Block) {
// into node seamlessly.
func (bc *BlockChain) empty() bool {
genesis := bc.genesisBlock.Hash()
for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db.BlockStore()), rawdb.ReadHeadHeaderHash(bc.db.BlockStore()), rawdb.ReadHeadFastBlockHash(bc.db)} {
for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db.BlockStore()), rawdb.ReadHeadHeaderHash(bc.db.BlockStore()), rawdb.ReadHeadFastBlockHash(bc.db.BlockStore())} {
if hash != genesis {
return false
}
@ -738,7 +739,7 @@ func (bc *BlockChain) loadLastState() error {
bc.currentSnapBlock.Store(headBlock.Header())
headFastBlockGauge.Update(int64(headBlock.NumberU64()))
if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
if head := rawdb.ReadHeadFastBlockHash(bc.db.BlockStore()); head != (common.Hash{}) {
if block := bc.GetBlockByHash(head); block != nil {
bc.currentSnapBlock.Store(block.Header())
headFastBlockGauge.Update(int64(block.NumberU64()))
@ -1137,7 +1138,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// If SetHead was only called as a chain reparation method, try to skip
// touching the header chain altogether, unless the freezer is broken
if repair {
if target, force := updateFn(bc.db, bc.CurrentBlock()); force {
if target, force := updateFn(bc.db.BlockStore(), bc.CurrentBlock()); force {
bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
}
} else {
@ -1298,19 +1299,33 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
//
// Note, this function assumes that the `mu` mutex is held!
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
// Add the block to the canonical chain number scheme and mark as the head
rawdb.WriteCanonicalHash(bc.db.BlockStore(), block.Hash(), block.NumberU64())
rawdb.WriteHeadHeaderHash(bc.db.BlockStore(), block.Hash())
rawdb.WriteHeadBlockHash(bc.db.BlockStore(), block.Hash())
bc.dbWg.Add(2)
defer bc.dbWg.Wait()
go func() {
defer bc.dbWg.Done()
// Add the block to the canonical chain number scheme and mark as the head
blockBatch := bc.db.BlockStore().NewBatch()
rawdb.WriteCanonicalHash(blockBatch, block.Hash(), block.NumberU64())
rawdb.WriteHeadHeaderHash(blockBatch, block.Hash())
rawdb.WriteHeadBlockHash(blockBatch, block.Hash())
rawdb.WriteHeadFastBlockHash(blockBatch, block.Hash())
// Flush the whole batch into the disk, exit the node if failed
if err := blockBatch.Write(); err != nil {
log.Crit("Failed to update chain indexes and markers in block db", "err", err)
}
}()
go func() {
defer bc.dbWg.Done()
batch := bc.db.NewBatch()
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
rawdb.WriteTxLookupEntriesByBlock(batch, block)
batch := bc.db.NewBatch()
rawdb.WriteTxLookupEntriesByBlock(batch, block)
// Flush the whole batch into the disk, exit the node if failed
if err := batch.Write(); err != nil {
log.Crit("Failed to update chain indexes in chain db", "err", err)
}
}()
// Flush the whole batch into the disk, exit the node if failed
if err := batch.Write(); err != nil {
log.Crit("Failed to update chain indexes and markers", "err", err)
}
// Update all in-memory chain markers in the last step
bc.hc.SetCurrentHeader(block.Header())
@ -1531,7 +1546,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
} else if !reorg {
return false
}
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
rawdb.WriteHeadFastBlockHash(bc.db.BlockStore(), head.Hash())
bc.currentSnapBlock.Store(head.Header())
headFastBlockGauge.Update(int64(head.NumberU64()))
return true
@ -1774,7 +1789,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
rawdb.WritePreimages(bc.db, state.Preimages())
blockBatch := bc.db.BlockStore().NewBatch()
rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
rawdb.WriteBlock(blockBatch, block)
@ -1783,7 +1797,11 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
rawdb.WriteBlobSidecars(blockBatch, block.Hash(), block.NumberU64(), block.Sidecars())
}
rawdb.WritePreimages(blockBatch, state.Preimages())
if bc.db.StateStore() != nil {
rawdb.WritePreimages(bc.db.StateStore(), state.Preimages())
} else {
rawdb.WritePreimages(blockBatch, state.Preimages())
}
if err := blockBatch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err)
}

@ -114,7 +114,9 @@ func (f *ForkChoice) ReorgNeeded(current *types.Header, extern *types.Header) (b
if f.preserve != nil {
currentPreserve, externPreserve = f.preserve(current), f.preserve(extern)
}
reorg = !currentPreserve && (externPreserve || f.rand.Float64() < 0.5)
reorg = !currentPreserve && (externPreserve ||
extern.Time < current.Time ||
extern.Time == current.Time && f.rand.Float64() < 0.5)
}
return reorg, nil
}

@ -498,7 +498,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
rawdb.WriteReceipts(db.BlockStore(), block.Hash(), block.NumberU64(), nil)
rawdb.WriteCanonicalHash(db.BlockStore(), block.Hash(), block.NumberU64())
rawdb.WriteHeadBlockHash(db.BlockStore(), block.Hash())
rawdb.WriteHeadFastBlockHash(db, block.Hash())
rawdb.WriteHeadFastBlockHash(db.BlockStore(), block.Hash())
rawdb.WriteHeadHeaderHash(db.BlockStore(), block.Hash())
rawdb.WriteChainConfig(db, block.Hash(), config)
return block, nil

@ -668,7 +668,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
// first then remove the relative data from the database.
//
// Update head first(head fast block, head full block) before deleting the data.
markerBatch := hc.chainDb.NewBatch()
markerBatch := hc.chainDb.BlockStore().NewBatch()
if updateFn != nil {
newHead, force := updateFn(markerBatch, parent)
if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) {
@ -677,7 +677,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
}
}
// Update head header then.
rawdb.WriteHeadHeaderHash(hc.chainDb.BlockStore(), parentHash)
rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
if err := markerBatch.Write(); err != nil {
log.Crit("Failed to update chain markers", "error", err)
}

@ -81,7 +81,7 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
batch.Reset()
WriteHeadHeaderHash(db.BlockStore(), hash)
WriteHeadFastBlockHash(db, hash)
WriteHeadFastBlockHash(db.BlockStore(), hash)
log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
}

@ -873,7 +873,7 @@ func DataTypeByKey(key []byte) DataType {
return StateDataType
}
}
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey} {
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey, headBlockKey, headFastBlockKey} {
if bytes.Equal(key, meta) {
return BlockDataType
}
@ -1088,7 +1088,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
hashNumPairings.Add(size)
default:
var accounted bool
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey} {
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey, headBlockKey, headFastBlockKey} {
if bytes.Equal(key, meta) {
metadata.Add(size)
accounted = true
@ -1282,7 +1282,7 @@ func ReadChainMetadataFromMultiDatabase(db ethdb.Database) [][]string {
data := [][]string{
{"databaseVersion", pp(ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", ReadHeadBlockHash(db.BlockStore()))},
{"headFastBlockHash", fmt.Sprintf("%v", ReadHeadFastBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", ReadHeadFastBlockHash(db.BlockStore()))},
{"headHeaderHash", fmt.Sprintf("%v", ReadHeadHeaderHash(db.BlockStore()))},
{"lastPivotNumber", pp(ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(ReadSnapshotSyncStatus(db)))},

@ -193,5 +193,7 @@ type MevParams struct {
ValidatorCommission uint64 // 100 means 1%
BidSimulationLeftOver time.Duration
GasCeil uint64
GasPrice *big.Int // Minimum avg gas price for bid block
BuilderFeeCeil *big.Int
Version string
}

@ -484,6 +484,10 @@ func (b *EthAPIBackend) RemoveBuilder(builder common.Address) error {
return b.Miner().RemoveBuilder(builder)
}
func (b *EthAPIBackend) HasBuilder(builder common.Address) bool {
return b.Miner().HasBuilder(builder)
}
func (b *EthAPIBackend) SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error) {
return b.Miner().SendBid(ctx, bid)
}

@ -161,12 +161,18 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Optimize memory distribution by reallocating surplus allowance from the
// dirty cache to the clean cache.
if config.StateScheme == rawdb.PathScheme && config.TrieDirtyCache > pathdb.MaxDirtyBufferSize/1024/1024 {
log.Info("Capped dirty cache size", "provided", common.StorageSize(config.TrieDirtyCache)*1024*1024, "adjusted", common.StorageSize(pathdb.MaxDirtyBufferSize))
log.Info("Clean cache size", "provided", common.StorageSize(config.TrieCleanCache)*1024*1024)
log.Info("Capped dirty cache size", "provided", common.StorageSize(config.TrieDirtyCache)*1024*1024,
"adjusted", common.StorageSize(pathdb.MaxDirtyBufferSize))
log.Info("Clean cache size", "provided", common.StorageSize(config.TrieCleanCache)*1024*1024,
"adjusted", common.StorageSize(config.TrieCleanCache+config.TrieDirtyCache-pathdb.MaxDirtyBufferSize/1024/1024)*1024*1024)
config.TrieCleanCache += config.TrieDirtyCache - pathdb.MaxDirtyBufferSize/1024/1024
config.TrieDirtyCache = pathdb.MaxDirtyBufferSize / 1024 / 1024
}
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)
log.Info("Allocated memory caches",
"state_scheme", config.StateScheme,
"trie_clean_cache", common.StorageSize(config.TrieCleanCache)*1024*1024,
"trie_dirty_cache", common.StorageSize(config.TrieDirtyCache)*1024*1024,
"snapshot_cache", common.StorageSize(config.SnapshotCache)*1024*1024)
// Try to recover offline state pruning only in hash-based.
if config.StateScheme == rawdb.HashScheme {
if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, config.TriesInMemory); err != nil {

@ -731,9 +731,6 @@ func (f *BlockFetcher) loop() {
matched = true
if f.getBlock(hash) == nil {
block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
if block.Header().EmptyWithdrawalsHash() {
block = block.WithWithdrawals(make([]*types.Withdrawal, 0))
}
block = block.WithSidecars(task.sidecars[i])
block.ReceivedAt = task.time
blocks = append(blocks, block)
@ -919,6 +916,10 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) {
return
}
if block.Header().EmptyWithdrawalsHash() {
block = block.WithWithdrawals(make([]*types.Withdrawal, 0))
}
defer func() { f.done <- hash }()
// Quickly validate the header and propagate the block if it passes
switch err := f.verifyHeader(block.Header()); err {

@ -633,6 +633,9 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {
return eth.Handle((*ethHandler)(source.handler), peer)
})
// Wait a bit for the above handlers to start
time.Sleep(100 * time.Millisecond)
if err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain), nil); err != nil {
t.Fatalf("failed to run protocol handshake")
}

@ -151,6 +151,8 @@ func testChainSyncWithBlobs(t *testing.T, mode downloader.SyncMode, preCancunBlk
go full.handler.runEthPeer(fullPeerEth, func(peer *eth.Peer) error {
return eth.Handle((*ethHandler)(full.handler), peer)
})
// Wait a bit for the above handlers to start
time.Sleep(250 * time.Millisecond)
emptyPipeSnap, fullPipeSnap := p2p.MsgPipe()
defer emptyPipeSnap.Close()

@ -133,7 +133,7 @@ func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumb
// BlobSidecars return the Sidecars of a given block number or hash.
func (ec *Client) BlobSidecars(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.BlobTxSidecar, error) {
var r []*types.BlobTxSidecar
err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecars", blockNrOrHash.String(), true)
err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecars", blockNrOrHash.String())
if err == nil && r == nil {
return nil, ethereum.NotFound
}
@ -143,7 +143,7 @@ func (ec *Client) BlobSidecars(ctx context.Context, blockNrOrHash rpc.BlockNumbe
// BlobSidecarByTxHash return a sidecar of a given blob transaction
func (ec *Client) BlobSidecarByTxHash(ctx context.Context, hash common.Hash) (*types.BlobTxSidecar, error) {
var r *types.BlobTxSidecar
err := ec.c.CallContext(ctx, &r, "eth_getBlockSidecarByTxHash", hash, true)
err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecarByTxHash", hash)
if err == nil && r == nil {
return nil, ethereum.NotFound
}
@ -752,6 +752,13 @@ func (ec *Client) MevRunning(ctx context.Context) (bool, error) {
return result, err
}
// HasBuilder returns whether the builder is registered
func (ec *Client) HasBuilder(ctx context.Context, address common.Address) (bool, error) {
var result bool
err := ec.c.CallContext(ctx, &result, "mev_hasBuilder", address)
return result, err
}
// SendBid sends a bid
func (ec *Client) SendBid(ctx context.Context, args types.BidArgs) (common.Hash, error) {
var hash common.Hash

@ -87,6 +87,10 @@ func (m *MevAPI) Params() *types.MevParams {
return m.b.MevParams()
}
func (m *MevAPI) HasBuilder(builder common.Address) bool {
return m.b.HasBuilder(builder)
}
// Running returns true if mev is running
func (m *MevAPI) Running() bool {
return m.b.MevRunning()

@ -650,7 +650,8 @@ func (b testBackend) ServiceFilter(ctx context.Context, session *bloombits.Match
panic("implement me")
}
func (b *testBackend) MevRunning() bool { return false }
func (b *testBackend) MevRunning() bool { return false }
func (b *testBackend) HasBuilder(builder common.Address) bool { return false }
func (b *testBackend) MevParams() *types.MevParams {
return &types.MevParams{}
}

@ -115,6 +115,8 @@ type Backend interface {
AddBuilder(builder common.Address, builderUrl string) error
// RemoveBuilder removes a builder from the bid simulator.
RemoveBuilder(builder common.Address) error
// HasBuilder returns true if the builder is in the builder list.
HasBuilder(builder common.Address) bool
// SendBid receives bid from the builders.
SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error)
// BestBidGasFee returns the gas fee of the best bid for the given parent hash.

@ -204,7 +204,8 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
// Sanity check the EIP-1559 fee parameters if present.
if args.GasPrice == nil && eip1559ParamsSet {
if args.MaxFeePerGas.ToInt().Sign() == 0 {
return errors.New("maxFeePerGas must be non-zero")
// return errors.New("maxFeePerGas must be non-zero")
log.Warn("EIP-1559 Tx with zero maxFeePerGas") // BSC accepts zero gas price.
}
if args.MaxFeePerGas.ToInt().Cmp(args.MaxPriorityFeePerGas.ToInt()) < 0 {
return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas)
@ -217,7 +218,8 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
if args.GasPrice != nil && !eip1559ParamsSet {
// Zero gas-price is not allowed after London fork
if args.GasPrice.ToInt().Sign() == 0 && isLondon {
return errors.New("gasPrice must be non-zero after london fork")
// return errors.New("gasPrice must be non-zero after london fork")
log.Warn("non EIP-1559 Tx with zero gasPrice") // BSC accepts zero gas price.
}
return nil // No need to set anything, user already set GasPrice
}

@ -85,8 +85,8 @@ func TestSetFeeDefaults(t *testing.T) {
"legacy tx post-London with zero price",
"london",
&TransactionArgs{GasPrice: zero},
nil,
errors.New("gasPrice must be non-zero after london fork"),
&TransactionArgs{GasPrice: zero},
nil, // errors.New("gasPrice must be non-zero after london fork"),
},
// Access list txs
@ -180,8 +180,8 @@ func TestSetFeeDefaults(t *testing.T) {
"dynamic fee tx post-London, explicit gas price",
"london",
&TransactionArgs{MaxFeePerGas: zero, MaxPriorityFeePerGas: zero},
nil,
errors.New("maxFeePerGas must be non-zero"),
&TransactionArgs{MaxFeePerGas: zero, MaxPriorityFeePerGas: zero},
nil, // errors.New("maxFeePerGas must be non-zero"),
},
// Misc
@ -416,7 +416,8 @@ func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent)
func (b *backendMock) Engine() consensus.Engine { return nil }
func (b *backendMock) MevRunning() bool { return false }
func (b *backendMock) MevRunning() bool { return false }
func (b *backendMock) HasBuilder(builder common.Address) bool { return false }
func (b *backendMock) MevParams() *types.MevParams {
return &types.MevParams{}
}

@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
type BuilderConfig struct {
@ -59,6 +60,11 @@ func (miner *Miner) RemoveBuilder(builderAddr common.Address) error {
return miner.bidSimulator.RemoveBuilder(builderAddr)
}
// HasBuilder returns true if the builder is in the builder list.
func (miner *Miner) HasBuilder(builder common.Address) bool {
return miner.bidSimulator.ExistBuilder(builder)
}
func (miner *Miner) SendBid(ctx context.Context, bidArgs *types.BidArgs) (common.Hash, error) {
builder, err := bidArgs.EcrecoverSender()
if err != nil {
@ -117,6 +123,8 @@ func (miner *Miner) MevParams() *types.MevParams {
ValidatorCommission: miner.worker.config.Mev.ValidatorCommission,
BidSimulationLeftOver: miner.worker.config.Mev.BidSimulationLeftOver,
GasCeil: miner.worker.config.GasCeil,
GasPrice: miner.worker.config.GasPrice,
BuilderFeeCeil: builderFeeCeil,
Version: params.Version,
}
}

@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 4 // Minor version component of the current release
VersionPatch = 9 // Patch version component of the current release
VersionPatch = 10 // Patch version component of the current release
VersionMeta = "" // Version metadata to append to the version string
)

@ -26,6 +26,106 @@ import (
"github.com/ethereum/go-ethereum/trie/triestate"
)
type RefTrieNode struct {
refCount uint32
node *trienode.Node
}
type HashNodeCache struct {
lock sync.RWMutex
cache map[common.Hash]*RefTrieNode
}
func (h *HashNodeCache) length() int {
if h == nil {
return 0
}
h.lock.RLock()
defer h.lock.RUnlock()
return len(h.cache)
}
func (h *HashNodeCache) set(hash common.Hash, node *trienode.Node) {
if h == nil {
return
}
h.lock.Lock()
defer h.lock.Unlock()
if n, ok := h.cache[hash]; ok {
n.refCount++
} else {
h.cache[hash] = &RefTrieNode{1, node}
}
}
func (h *HashNodeCache) Get(hash common.Hash) *trienode.Node {
if h == nil {
return nil
}
h.lock.RLock()
defer h.lock.RUnlock()
if n, ok := h.cache[hash]; ok {
return n.node
}
return nil
}
func (h *HashNodeCache) del(hash common.Hash) {
if h == nil {
return
}
h.lock.Lock()
defer h.lock.Unlock()
n, ok := h.cache[hash]
if !ok {
return
}
if n.refCount > 0 {
n.refCount--
}
if n.refCount == 0 {
delete(h.cache, hash)
}
}
func (h *HashNodeCache) Add(ly layer) {
if h == nil {
return
}
dl, ok := ly.(*diffLayer)
if !ok {
return
}
beforeAdd := h.length()
for _, subset := range dl.nodes {
for _, node := range subset {
h.set(node.Hash, node)
}
}
diffHashCacheLengthGauge.Update(int64(h.length()))
log.Debug("Add difflayer to hash map", "root", ly.rootHash(), "block_number", dl.block, "map_len", h.length(), "add_delta", h.length()-beforeAdd)
}
func (h *HashNodeCache) Remove(ly layer) {
if h == nil {
return
}
dl, ok := ly.(*diffLayer)
if !ok {
return
}
go func() {
beforeDel := h.length()
for _, subset := range dl.nodes {
for _, node := range subset {
h.del(node.Hash)
}
}
diffHashCacheLengthGauge.Update(int64(h.length()))
log.Debug("Remove difflayer from hash map", "root", ly.rootHash(), "block_number", dl.block, "map_len", h.length(), "del_delta", beforeDel-h.length())
}()
}
// diffLayer represents a collection of modifications made to the in-memory tries
// along with associated state changes after running a block on top.
//
@ -39,7 +139,10 @@ type diffLayer struct {
nodes map[common.Hash]map[string]*trienode.Node // Cached trie nodes indexed by owner and path
states *triestate.Set // Associated state change set for building history
memory uint64 // Approximate guess as to how much memory we use
cache *HashNodeCache // trienode cache by hash key. cache is immutable, but cache's item can be add/del.
// mutables
origin *diskLayer // The current difflayer corresponds to the underlying disklayer and is updated during cap.
parent layer // Parent layer modified by this one, never nil, **can be changed**
lock sync.RWMutex // Lock used to protect parent
}
@ -58,6 +161,20 @@ func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes
states: states,
parent: parent,
}
switch l := parent.(type) {
case *diskLayer:
dl.origin = l
dl.cache = &HashNodeCache{
cache: make(map[common.Hash]*RefTrieNode),
}
case *diffLayer:
dl.origin = l.originDiskLayer()
dl.cache = l.cache
default:
panic("unknown parent type")
}
for _, subset := range nodes {
for path, n := range subset {
dl.memory += uint64(n.Size() + len(path))
@ -75,6 +192,12 @@ func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes
return dl
}
func (dl *diffLayer) originDiskLayer() *diskLayer {
dl.lock.RLock()
defer dl.lock.RUnlock()
return dl.origin
}
// rootHash implements the layer interface, returning the root hash of
// corresponding state.
func (dl *diffLayer) rootHash() common.Hash {
@ -133,6 +256,32 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, dept
// Node implements the layer interface, retrieving the trie node blob with the
// provided node information. No error will be returned if the node is not found.
func (dl *diffLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
if n := dl.cache.Get(hash); n != nil {
// The query from the hash map is fastpath,
// avoiding recursive query of 128 difflayers.
diffHashCacheHitMeter.Mark(1)
diffHashCacheReadMeter.Mark(int64(len(n.Blob)))
return n.Blob, nil
}
diffHashCacheMissMeter.Mark(1)
persistLayer := dl.originDiskLayer()
if persistLayer != nil {
blob, err := persistLayer.Node(owner, path, hash)
if err != nil {
// This is a bad case with a very low probability.
// r/w the difflayer cache and r/w the disklayer are not in the same lock,
// so in extreme cases, both reading the difflayer cache and reading the disklayer may fail, eg, disklayer is stale.
// In this case, fallback to the original 128-layer recursive difflayer query path.
diffHashCacheSlowPathMeter.Mark(1)
log.Debug("Retry difflayer due to query origin failed", "owner", owner, "path", path, "hash", hash.String(), "error", err)
return dl.node(owner, path, hash, 0)
} else { // This is the fastpath.
return blob, nil
}
}
diffHashCacheSlowPathMeter.Mark(1)
log.Debug("Retry difflayer due to origin is nil", "owner", owner, "path", path, "hash", hash.String())
return dl.node(owner, path, hash, 0)
}

@ -286,6 +286,9 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
}
log.Debug("Pruned state history", "items", pruned, "tailid", oldest)
}
// The bottom has been eaten by disklayer, releasing the hash cache of bottom difflayer.
bottom.cache.Remove(bottom)
return ndl, nil
}

@ -51,9 +51,20 @@ func (tree *layerTree) reset(head layer) {
tree.lock.Lock()
defer tree.lock.Unlock()
for _, ly := range tree.layers {
if dl, ok := ly.(*diffLayer); ok {
// Clean up the hash cache of difflayers due to reset.
dl.cache.Remove(dl)
}
}
var layers = make(map[common.Hash]layer)
for head != nil {
layers[head.rootHash()] = head
if dl, ok := head.(*diffLayer); ok {
// Add the hash cache of difflayers due to reset.
dl.cache.Add(dl)
}
head = head.parentLayer()
}
tree.layers = layers
@ -98,12 +109,19 @@ func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint6
if root == parentRoot {
return errors.New("layer cycle")
}
if tree.get(root) != nil {
log.Info("Skip add repeated difflayer", "root", root.String(), "block_id", block)
return nil
}
parent := tree.get(parentRoot)
if parent == nil {
return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot)
}
l := parent.update(root, parent.stateID()+1, block, nodes.Flatten(), states)
// Before adding layertree, update the hash cache.
l.cache.Add(l)
tree.lock.Lock()
tree.layers[l.rootHash()] = l
tree.lock.Unlock()
@ -132,8 +150,15 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
if err != nil {
return err
}
for _, ly := range tree.layers {
if dl, ok := ly.(*diffLayer); ok {
dl.cache.Remove(dl)
log.Debug("Cleanup difflayer hash cache due to cap all", "diff_root", dl.root.String(), "diff_block_number", dl.block)
}
}
// Replace the entire layer tree with the flat base
tree.layers = map[common.Hash]layer{base.rootHash(): base}
log.Debug("Cap all difflayers to disklayer", "disk_root", base.rootHash().String())
return nil
}
// Dive until we run out of layers or reach the persistent database
@ -146,6 +171,7 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
return nil
}
}
var persisted *diskLayer
// We're out of layers, flatten anything below, stopping if it's the disk or if
// the memory limit is not yet exceeded.
switch parent := diff.parentLayer().(type) {
@ -166,6 +192,7 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
diff.parent = base
diff.lock.Unlock()
persisted = base.(*diskLayer)
default:
panic(fmt.Sprintf("unknown data layer in triedb: %T", parent))
@ -180,6 +207,13 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
}
var remove func(root common.Hash)
remove = func(root common.Hash) {
if df, exist := tree.layers[root]; exist {
if dl, ok := df.(*diffLayer); ok {
// Clean up the hash cache of the child difflayer corresponding to the stale parent, include the re-org case.
dl.cache.Remove(dl)
log.Debug("Cleanup difflayer hash cache due to reorg", "diff_root", dl.root.String(), "diff_block_number", dl.block)
}
}
delete(tree.layers, root)
for _, child := range children[root] {
remove(child)
@ -189,8 +223,25 @@ func (tree *layerTree) cap(root common.Hash, layers int) error {
for root, layer := range tree.layers {
if dl, ok := layer.(*diskLayer); ok && dl.isStale() {
remove(root)
log.Debug("Remove stale the disklayer", "disk_root", dl.root.String())
}
}
if persisted != nil {
var updateOriginFunc func(root common.Hash)
updateOriginFunc = func(root common.Hash) {
if diff, ok := tree.layers[root].(*diffLayer); ok {
diff.lock.Lock()
diff.origin = persisted
diff.lock.Unlock()
}
for _, child := range children[root] {
updateOriginFunc(child)
}
}
updateOriginFunc(persisted.root)
}
return nil
}

@ -47,4 +47,10 @@ var (
historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil)
diffHashCacheHitMeter = metrics.NewRegisteredMeter("pathdb/difflayer/hashcache/hit", nil)
diffHashCacheReadMeter = metrics.NewRegisteredMeter("pathdb/difflayer/hashcache/read", nil)
diffHashCacheMissMeter = metrics.NewRegisteredMeter("pathdb/difflayer/hashcache/miss", nil)
diffHashCacheSlowPathMeter = metrics.NewRegisteredMeter("pathdb/difflayer/hashcache/slowpath", nil)
diffHashCacheLengthGauge = metrics.NewRegisteredGauge("pathdb/difflayer/hashcache/size", nil)
)