Compare commits

...

3 Commits

Author SHA1 Message Date
joeycli
0af2fb4324 feat: support rewind for versa db
fix: rewind ancient store
2024-09-02 17:32:56 +08:00
joeycli
3e8e74d5c2 feat: delete statedb debug system 2024-08-27 09:29:24 +08:00
joeycli
d20bbb4799 feat: integrate versa db
feat: add version commit

chore: forbid versiondb rewind

feat: add mode for new caching db

feat: add version scheme for startup

feat: init genesis for versa db

feat: add caching db and trie copy

feat: support HasState on versa db and must fullsync under versa db

fix: append open strorage trie error to statedb

fix: storage tree value encode

fix: add state object trie expire interface

fix: blockchain stateat use rw state

chore: forbid prefetcher

chore: delete storage pool

feat: hold version in state objet for repeat search account tree

fix: version mismatch that add contract balance without update storage tree

fix: 373559 blocks issue add breakpoint

feat: add version state debug system

feat: add hash state db debug system

feat: add version and hash state diff system

fix: the timing release debug state instance
2024-08-27 09:01:25 +08:00
30 changed files with 2254 additions and 254 deletions

@ -833,7 +833,7 @@ func dump(ctx *cli.Context) error {
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false) // always enable preimage lookup
defer triedb.Close()
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb, false), nil)
if err != nil {
return err
}

@ -18,6 +18,7 @@ package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math"
@ -34,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
@ -91,6 +93,9 @@ Remove blockchain and state databases`,
dbHbss2PbssCmd,
dbTrieGetCmd,
dbTrieDeleteCmd,
getVersionDBState,
getHashDBState,
diffDebugStateDB,
},
}
dbInspectCmd = &cli.Command{
@ -286,8 +291,141 @@ WARNING: This is a low-level operation which may cause database corruption!`,
Description: `This commands will read current offset from kvdb, which is the current offset and starting BlockNumber
of ancientStore, will also displays the reserved number of blocks in ancientStore `,
}
getVersionDBState = &cli.Command{
Action: getDebugVersionState,
Name: "get-debug-version-state",
Flags: []cli.Flag{
utils.VersionStateDirFlag,
utils.BlockNumber,
},
}
getHashDBState = &cli.Command{
Action: getDebugHashState,
Name: "get-debug-hash-state",
Flags: []cli.Flag{
utils.HashStateDirFlag,
utils.BlockNumber,
},
}
diffDebugStateDB = &cli.Command{
Action: diffDebugState,
Name: "diff-debug-state",
Flags: []cli.Flag{
utils.VersionStateDirFlag,
utils.HashStateDirFlag,
utils.BlockNumber,
},
}
)
func diffDebugState(ctx *cli.Context) error {
if !ctx.IsSet(utils.VersionStateDirFlag.Name) {
return fmt.Errorf("please set `--versionstatedir` flag")
}
if !ctx.IsSet(utils.BlockNumber.Name) {
return fmt.Errorf("please set `--block` flag")
}
if !ctx.IsSet(utils.HashStateDirFlag.Name) {
return fmt.Errorf("please set `--hashstatedir` flag")
}
verDir := ctx.String(utils.VersionStateDirFlag.Name)
hasDir := ctx.String(utils.HashStateDirFlag.Name)
block := ctx.Int64(utils.BlockNumber.Name)
vdb, err := rawdb.Open(rawdb.OpenOptions{
ReadOnly: true,
Type: "pebble",
Directory: verDir,
})
if err != nil {
return err
}
verData, err := vdb.Get(state.DebugVersionStateKey(block))
if err != nil {
return err
}
verDebugState := &state.DebugVersionState{}
err = json.Unmarshal(verData, verDebugState)
if err != nil {
return nil
}
hdb, err := rawdb.Open(rawdb.OpenOptions{
ReadOnly: true,
Type: "pebble",
Directory: hasDir,
})
if err != nil {
return err
}
hashData, err := hdb.Get(state.DebugHashStateKey(block))
if err != nil {
return err
}
hasDebugState := &state.DebugHashState{}
err = json.Unmarshal(hashData, hasDebugState)
if err != nil {
return err
}
res := state.GenerateDebugStateDiff(verDebugState, hasDebugState)
fmt.Println(res)
return nil
}
func getDebugVersionState(ctx *cli.Context) error {
if !ctx.IsSet(utils.VersionStateDirFlag.Name) {
return fmt.Errorf("please set `--versionstatedir` flag")
}
if !ctx.IsSet(utils.BlockNumber.Name) {
return fmt.Errorf("please set `--block` flag")
}
dir := ctx.String(utils.VersionStateDirFlag.Name)
block := ctx.Int64(utils.BlockNumber.Name)
db, err := rawdb.Open(rawdb.OpenOptions{
ReadOnly: true,
Type: "pebble",
Directory: dir,
})
if err != nil {
return err
}
data, err := db.Get(state.DebugVersionStateKey(block))
if err != nil {
return err
}
fmt.Println(string(data))
return nil
}
func getDebugHashState(ctx *cli.Context) error {
if !ctx.IsSet(utils.HashStateDirFlag.Name) {
return fmt.Errorf("please set `--hashstatedir` flag")
}
if !ctx.IsSet(utils.BlockNumber.Name) {
return fmt.Errorf("please set `--block` flag")
}
dir := ctx.String(utils.HashStateDirFlag.Name)
block := ctx.Int64(utils.BlockNumber.Name)
db, err := rawdb.Open(rawdb.OpenOptions{
ReadOnly: true,
Type: "pebble",
Directory: dir,
})
if err != nil {
return err
}
data, err := db.Get(state.DebugHashStateKey(block))
if err != nil {
return err
}
fmt.Println(string(data))
return nil
}
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)

@ -353,7 +353,7 @@ var (
}
StateSchemeFlag = &cli.StringFlag{
Name: "state.scheme",
Usage: "Scheme to use for storing ethereum state ('hash' or 'path')",
Usage: "Scheme to use for storing ethereum state ('hash', 'path', 'version')",
Category: flags.StateCategory,
}
PathDBSyncFlag = &cli.BoolFlag{
@ -1135,6 +1135,25 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Value: params.DefaultExtraReserveForBlobRequests,
Category: flags.MiscCategory,
}
BlockNumber = &cli.Int64Flag{
Name: "block",
Value: int64(0),
}
VersionStateDirFlag = &flags.DirectoryFlag{
Name: "versionstatedir",
Usage: "Data directory for the version databases and keystore",
Value: flags.DirectoryString(node.DefaultDataDir()),
Category: flags.EthCategory,
}
HashStateDirFlag = &flags.DirectoryFlag{
Name: "hashstatedir",
Usage: "Data directory for the version databases and keystore",
Value: flags.DirectoryString(node.DefaultDataDir()),
Category: flags.EthCategory,
}
)
var (
@ -1953,11 +1972,17 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(StateHistoryFlag.Name) {
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
}
scheme, err := ParseCLIAndConfigStateScheme(ctx.String(StateSchemeFlag.Name), cfg.StateScheme)
if err != nil {
Fatalf("%v", err)
if ctx.String(StateSchemeFlag.Name) != rawdb.VersionScheme {
scheme, err := ParseCLIAndConfigStateScheme(ctx.String(StateSchemeFlag.Name), cfg.StateScheme)
if err != nil {
Fatalf("%v", err)
}
cfg.StateScheme = scheme
} else {
// TODO:: compatible with cli line and configuration file, currently only supports cli.
cfg.StateScheme = rawdb.VersionScheme
}
cfg.StateScheme = scheme
// Parse transaction history flag, if user is still using legacy config
// file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'.
if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit {

@ -196,6 +196,10 @@ func (c *CacheConfig) triedbConfig() *triedb.Config {
JournalFile: c.JournalFile,
}
}
// TODO:: support other versa db config items, currently use the default config
if c.StateScheme == rawdb.VersionScheme {
config.IsVersion = true
}
return config
}
@ -384,7 +388,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
}
bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit))
bc.forker = NewForkChoice(bc, shouldPreserve)
bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb, true)
bc.validator = NewBlockValidator(chainConfig, bc, engine)
bc.prefetcher = NewStatePrefetcher(chainConfig, bc, engine)
bc.processor = NewStateProcessor(chainConfig, bc, engine)
@ -420,78 +424,101 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// Make sure the state associated with the block is available, or log out
// if there is no available state, waiting for state sync.
head := bc.CurrentBlock()
if !bc.HasState(head.Root) {
if head.Number.Uint64() == 0 {
// The genesis state is missing, which is only possible in the path-based
// scheme. This situation occurs when the initial state sync is not finished
// yet, or the chain head is rewound below the pivot point. In both scenarios,
// there is no possible recovery approach except for rerunning a snap sync.
// Do nothing here until the state syncer picks it up.
log.Info("Genesis state is missing, wait state sync")
} else {
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
// rewound point is lower than disk layer.
var diskRoot common.Hash
if bc.cacheConfig.SnapshotLimit > 0 {
diskRoot = rawdb.ReadSnapshotRoot(bc.db)
}
if bc.triedb.Scheme() == rawdb.PathScheme && !bc.NoTries() {
recoverable, _ := bc.triedb.Recoverable(diskRoot)
if !bc.HasState(diskRoot) && !recoverable {
diskRoot = bc.triedb.Head()
}
}
if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "diskRoot", diskRoot)
snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
if err != nil {
return nil, err
}
// Chain rewound, persist old snapshot number to indicate recovery procedure
if snapDisk != 0 {
rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
}
if bc.triedb.Scheme() != rawdb.VersionScheme {
if !bc.HasState(head.Root) {
if head.Number.Uint64() == 0 {
// The genesis state is missing, which is only possible in the path-based
// scheme. This situation occurs when the initial state sync is not finished
// yet, or the chain head is rewound below the pivot point. In both scenarios,
// there is no possible recovery approach except for rerunning a snap sync.
// Do nothing here until the state syncer picks it up.
log.Info("Genesis state is missing, wait state sync")
} else {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil {
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
// rewound point is lower than disk layer.
var diskRoot common.Hash
if bc.cacheConfig.SnapshotLimit > 0 {
diskRoot = rawdb.ReadSnapshotRoot(bc.db)
}
if bc.triedb.Scheme() == rawdb.PathScheme && !bc.NoTries() {
recoverable, _ := bc.triedb.Recoverable(diskRoot)
if !bc.HasState(diskRoot) && !recoverable {
diskRoot = bc.triedb.Head()
}
}
if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "diskRoot", diskRoot)
snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
if err != nil {
return nil, err
}
// Chain rewound, persist old snapshot number to indicate recovery procedure
if snapDisk != 0 {
rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
}
} else {
log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil {
return nil, err
}
}
}
}
} else {
log.Warn("versa db no recovery, rewind in load state")
}
// Ensure that a previous crash in SetHead doesn't leave extra ancients
if bc.triedb.Scheme() != rawdb.VersionScheme {
if frozen, err := bc.db.BlockStore().ItemAmountInAncient(); err == nil && frozen > 0 {
frozen, err = bc.db.BlockStore().Ancients()
if err != nil {
return nil, err
}
var (
needRewind bool
low uint64
)
// The head full block may be rolled back to a very low height due to
// blockchain repair. If the head full block is even lower than the ancient
// chain, truncate the ancient store.
fullBlock := bc.CurrentBlock()
if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.Number.Uint64() < frozen-1 {
needRewind = true
low = fullBlock.Number.Uint64()
}
// In snap sync, it may happen that ancient data has been written to the
// ancient store, but the LastFastBlock has not been updated, truncate the
// extra data here.
snapBlock := bc.CurrentSnapBlock()
if snapBlock != nil && snapBlock.Number.Uint64() < frozen-1 {
needRewind = true
if snapBlock.Number.Uint64() < low || low == 0 {
low = snapBlock.Number.Uint64()
}
}
if needRewind {
log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
if err := bc.SetHead(low); err != nil {
return nil, err
}
}
}
}
// Ensure that a previous crash in SetHead doesn't leave extra ancients
if frozen, err := bc.db.BlockStore().ItemAmountInAncient(); err == nil && frozen > 0 {
frozen, err = bc.db.BlockStore().Ancients()
} else {
//TODO:: need consider the offline and inline prune block
frozen, err := bc.db.BlockStore().Ancients()
if err != nil {
return nil, err
}
items, err := bc.db.BlockStore().ItemAmountInAncient()
if err != nil {
return nil, err
}
var (
needRewind bool
low uint64
)
// The head full block may be rolled back to a very low height due to
// blockchain repair. If the head full block is even lower than the ancient
// chain, truncate the ancient store.
fullBlock := bc.CurrentBlock()
if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.Number.Uint64() < frozen-1 {
needRewind = true
low = fullBlock.Number.Uint64()
}
// In snap sync, it may happen that ancient data has been written to the
// ancient store, but the LastFastBlock has not been updated, truncate the
// extra data here.
snapBlock := bc.CurrentSnapBlock()
if snapBlock != nil && snapBlock.Number.Uint64() < frozen-1 {
needRewind = true
if snapBlock.Number.Uint64() < low || low == 0 {
low = snapBlock.Number.Uint64()
}
}
if needRewind {
log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
if err := bc.SetHead(low); err != nil {
log.Info("version mode rewind ancient store", "target", fullBlock.Number.Uint64(), "old head", frozen, "items", items, "offset", bc.db.BlockStore().AncientOffSet())
if frozen >= fullBlock.Number.Uint64() {
if _, err = bc.db.BlockStore().TruncateTail(fullBlock.Number.Uint64()); err != nil {
return nil, err
}
}
@ -698,20 +725,54 @@ func (bc *BlockChain) getFinalizedNumber(header *types.Header) uint64 {
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
// Restore the last known head block
head := rawdb.ReadHeadBlockHash(bc.db)
if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch
log.Warn("Empty database, resetting chain")
return bc.Reset()
}
// Make sure the entire head block is available
headBlock := bc.GetBlockByHash(head)
if headBlock == nil {
// Corrupt or empty database, init from scratch
log.Warn("Head block missing, resetting chain", "hash", head)
return bc.Reset()
// TODO:: before versa db support recovery, only rewind
var headBlock *types.Block
if bc.triedb.Scheme() == rawdb.VersionScheme {
head := rawdb.ReadHeadBlockHash(bc.db)
headBlock = bc.GetBlockByHash(head)
versa := bc.triedb.VersaDB()
archiveVersion, _ := versa.LatestStoreDiskVersionInfo()
// empty chain
if archiveVersion == -1 {
archiveVersion = 0
}
if int64(headBlock.NumberU64()) < archiveVersion {
log.Crit("versa db disk version large than header block", "head number", headBlock.NumberU64(), "versa archive number", archiveVersion)
}
log.Info("begin rewind versa db head", "target", archiveVersion)
for {
if int64(headBlock.NumberU64()) == archiveVersion {
rawdb.WriteCanonicalHash(bc.db, headBlock.Hash(), headBlock.NumberU64())
rawdb.WriteHeadHeaderHash(bc.db, headBlock.Hash())
rawdb.WriteHeadBlockHash(bc.db, headBlock.Hash())
rawdb.WriteHeadFastBlockHash(bc.db, headBlock.Hash())
log.Info("reset versa db head block", "number", headBlock.NumberU64(), "hash", headBlock.Hash())
break
}
headBlock = rawdb.ReadBlock(bc.db, headBlock.ParentHash(), headBlock.NumberU64()-1)
if headBlock == nil {
panic("versa db rewind head is nil")
}
}
} else {
// Restore the last known head block
head := rawdb.ReadHeadBlockHash(bc.db)
if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch
log.Warn("Empty database, resetting chain")
return bc.Reset()
}
// Make sure the entire head block is available
headBlock = bc.GetBlockByHash(head)
if headBlock == nil {
// Corrupt or empty database, init from scratch
log.Warn("Head block missing, resetting chain", "hash", head)
return bc.Reset()
}
}
log.Info("load state head block", "number", headBlock.NumberU64())
// Everything seems to be fine, set as the head block
bc.currentBlock.Store(headBlock.Header())
@ -1165,6 +1226,10 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// SnapSyncCommitHead sets the current head block to the one defined by the hash
// irrelevant what the chain contents were prior.
func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
// TODO:: temporarily not support for snapsync
if bc.triedb.Scheme() == rawdb.VersionScheme {
panic("version db not support snap sync")
}
// Make sure that both the block as well at its state trie exists
block := bc.GetBlockByHash(hash)
if block == nil {
@ -1376,48 +1441,50 @@ func (bc *BlockChain) Stop() {
}
bc.snaps.Release()
}
if bc.triedb.Scheme() == rawdb.PathScheme {
// Ensure that the in-memory trie nodes are journaled to disk properly.
if err := bc.triedb.Journal(bc.CurrentBlock().Root); err != nil {
log.Info("Failed to journal in-memory trie nodes", "err", err)
}
} else {
// Ensure the state of a recent block is also stored to disk before exiting.
// We're writing three different states to catch different restart scenarios:
// - HEAD: So we don't need to reprocess any blocks in the general case
// - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
if !bc.cacheConfig.TrieDirtyDisabled {
triedb := bc.triedb
var once sync.Once
for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
recent := bc.GetBlockByNumber(number - offset)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
if err := triedb.Commit(recent.Root(), true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
} else {
rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64())
once.Do(func() {
rawdb.WriteHeadBlockHash(bc.db.BlockStore(), recent.Hash())
})
if bc.triedb.Scheme() != rawdb.VersionScheme {
if bc.triedb.Scheme() == rawdb.PathScheme {
// Ensure that the in-memory trie nodes are journaled to disk properly.
if err := bc.triedb.Journal(bc.CurrentBlock().Root); err != nil {
log.Info("Failed to journal in-memory trie nodes", "err", err)
}
} else {
// Ensure the state of a recent block is also stored to disk before exiting.
// We're writing three different states to catch different restart scenarios:
// - HEAD: So we don't need to reprocess any blocks in the general case
// - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
if !bc.cacheConfig.TrieDirtyDisabled {
triedb := bc.triedb
var once sync.Once
for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
recent := bc.GetBlockByNumber(number - offset)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
if err := triedb.Commit(recent.Root(), true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
} else {
rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64())
once.Do(func() {
rawdb.WriteHeadBlockHash(bc.db.BlockStore(), recent.Hash())
})
}
}
}
}
if snapBase != (common.Hash{}) {
log.Info("Writing snapshot state to disk", "root", snapBase)
if err := triedb.Commit(snapBase, true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
} else {
rawdb.WriteSafePointBlockNumber(bc.db, bc.CurrentBlock().Number.Uint64())
if snapBase != (common.Hash{}) {
log.Info("Writing snapshot state to disk", "root", snapBase)
if err := triedb.Commit(snapBase, true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
} else {
rawdb.WriteSafePointBlockNumber(bc.db, bc.CurrentBlock().Number.Uint64())
}
}
for !bc.triegc.Empty() {
triedb.Dereference(bc.triegc.PopItem())
}
if _, size, _, _ := triedb.Size(); size != 0 {
log.Error("Dangling trie nodes after full cleanup")
}
}
for !bc.triegc.Empty() {
triedb.Dereference(bc.triegc.PopItem())
}
if _, size, _, _ := triedb.Size(); size != 0 {
log.Error("Dangling trie nodes after full cleanup")
}
}
}
@ -1813,7 +1880,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// If node is running in path mode, skip explicit gc operation
// which is unnecessary in this mode.
if bc.triedb.Scheme() == rawdb.PathScheme {
if bc.triedb.Scheme() != rawdb.HashScheme {
return nil
}
@ -2232,28 +2299,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}
if bc.stateCache.Scheme() != rawdb.VersionScheme {
if block.NumberU64() == 2000001 {
log.Crit("exit.... path mode, 200w blocks")
}
}
bc.stateCache.SetVersion(int64(block.NumberU64()))
statedb, err := state.NewWithSharedPool(parent.Root, bc.stateCache, bc.snaps)
if err != nil {
bc.stateCache.Release()
return it.index, err
}
bc.updateHighestVerifiedHeader(block.Header())
// Enable prefetching to pull in trie node paths while processing transactions
statedb.StartPrefetcher("chain")
//statedb.StartPrefetcher("chain")
interruptCh := make(chan struct{})
// For diff sync, it may fallback to full sync, so we still do prefetch
if len(block.Transactions()) >= prefetchTxNumber {
// do Prefetch in a separate goroutine to avoid blocking the critical path
// 1.do state prefetch for snapshot cache
throwaway := statedb.CopyDoPrefetch()
go bc.prefetcher.Prefetch(block, throwaway, &bc.vmConfig, interruptCh)
// 2.do trie prefetch for MPT trie node cache
// it is for the big state trie tree, prefetch based on transaction's From/To address.
// trie prefetcher is thread safe now, ok to prefetch in a separate routine
go throwaway.TriePrefetchInAdvance(block, signer)
}
//if len(block.Transactions()) >= prefetchTxNumber {
// // do Prefetch in a separate goroutine to avoid blocking the critical path
//
// // 1.do state prefetch for snapshot cache
// throwaway := statedb.CopyDoPrefetch()
// go bc.prefetcher.Prefetch(block, throwaway, &bc.vmConfig, interruptCh)
//
// // 2.do trie prefetch for MPT trie node cache
// // it is for the big state trie tree, prefetch based on transaction's From/To address.
// // trie prefetcher is thread safe now, ok to prefetch in a separate routine
// go throwaway.TriePrefetchInAdvance(block, signer)
//}
// Process block using the parent state as reference point
if bc.pipeCommit {
@ -2264,6 +2339,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
close(interruptCh) // state prefetch can be stopped
if err != nil {
bc.stateCache.Release()
bc.reportBlock(block, receipts, err)
statedb.StopPrefetcher()
return it.index, err
@ -2273,7 +2349,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
// Validate the state using the default validator
vstart := time.Now()
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
log.Error("validate state failed", "error", err)
bc.stateCache.Release()
bc.reportBlock(block, receipts, err)
statedb.StopPrefetcher()
return it.index, err
@ -2309,8 +2385,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
}
if err != nil {
bc.stateCache.Release()
return it.index, err
}
bc.stateCache.Release()
bc.cacheReceipts(block.Hash(), receipts, block)

@ -348,8 +348,7 @@ func (bc *BlockChain) HasState(hash common.Hash) bool {
return true
}
}
_, err := bc.stateCache.OpenTrie(hash)
return err == nil
return bc.stateCache.HasState(hash)
}
// HasBlockAndState checks if a block and associated state trie is fully present
@ -396,7 +395,8 @@ func (bc *BlockChain) State() (*state.StateDB, error) {
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
stateDb, err := state.New(root, bc.stateCache, bc.snaps)
// new state db with no need commit mode
stateDb, err := state.New(root, state.NewDatabaseWithNodeDB(bc.db, bc.triedb, false), bc.snaps)
if err != nil {
return nil, err
}

@ -401,7 +401,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
defer triedb.Close()
for i := 0; i < n; i++ {
statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, triedb), nil)
statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, triedb, true), nil)
if err != nil {
panic(err)
}

@ -126,7 +126,10 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
}
// Create an ephemeral in-memory database for computing hash,
// all the derived states will be discarded to not pollute disk.
db := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), config)
db := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), config, true)
log.Info("genesis calc root hash use hash mode triedb")
db.SetVersion(0)
defer db.Release()
statedb, err := state.New(types.EmptyRootHash, db, nil)
if err != nil {
return common.Hash{}, err
@ -154,7 +157,10 @@ func flushAlloc(ga *types.GenesisAlloc, db ethdb.Database, triedb *triedb.Databa
if triedbConfig != nil {
triedbConfig.NoTries = false
}
statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
cachingdb := state.NewDatabaseWithNodeDB(db, triedb, true)
cachingdb.SetVersion(0)
defer cachingdb.Release()
statedb, err := state.New(types.EmptyRootHash, cachingdb, nil)
if err != nil {
return err
}
@ -174,7 +180,7 @@ func flushAlloc(ga *types.GenesisAlloc, db ethdb.Database, triedb *triedb.Databa
return err
}
// Commit newly generated states into disk if it's not empty.
if root != types.EmptyRootHash {
if root != types.EmptyRootHash && triedb.Scheme() != rawdb.VersionScheme {
if err := triedb.Commit(root, true); err != nil {
return err
}

@ -46,6 +46,8 @@ const HashScheme = "hash"
// on extra state diffs to survive deep reorg.
const PathScheme = "path"
const VersionScheme = "version"
// hasher is used to compute the sha256 hash of the provided data.
type hasher struct{ sha crypto.KeccakState }
@ -314,7 +316,7 @@ func ReadStateScheme(db ethdb.Reader) string {
// ValidateStateScheme used to check state scheme whether is valid.
// Valid state scheme: hash and path.
func ValidateStateScheme(stateScheme string) bool {
if stateScheme == HashScheme || stateScheme == PathScheme {
if stateScheme == HashScheme || stateScheme == PathScheme || stateScheme == VersionScheme {
return true
}
return false

@ -0,0 +1,521 @@
package state
import (
"errors"
"fmt"
"sync/atomic"
versa "github.com/bnb-chain/versioned-state-database"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb"
)
const InvalidSateObjectVersion int64 = math.MinInt64
type cachingVersaDB struct {
version int64
triedb *triedb.Database
versionDB versa.Database
codeDB ethdb.KeyValueStore
codeSizeCache *lru.Cache[common.Hash, int]
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
accTree *VersaTree
state versa.StateHandler
root common.Hash
mode versa.StateMode
hasState atomic.Bool
//debug *DebugVersionState
}
// NewVersaDatabase should be call by NewDatabaseWithNodeDB
// TODO:: NewDatabaseWithNodeDB should add mode param.
func NewVersaDatabase(db ethdb.Database, triedb *triedb.Database, mode versa.StateMode) Database {
return &cachingVersaDB{
triedb: triedb,
versionDB: triedb.VersaDB(),
codeDB: db,
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
mode: mode,
state: versa.ErrStateHandler,
}
}
func (cv *cachingVersaDB) Copy() Database {
cp := &cachingVersaDB{}
cp.codeCache = cv.codeCache
cp.codeSizeCache = cv.codeSizeCache
cp.triedb = cv.triedb
cp.versionDB = cv.versionDB
cp.codeDB = cv.codeDB
cp.mode = versa.S_RW // it is important
// TODO:: maybe add lock for cv.root
if cv.hasState.Load() {
_, err := cp.OpenTrie(cv.root)
if err != nil {
//if cv.debug != nil {
// cv.debug.OnError(fmt.Errorf("failed to open trie in copy caching versa db, error: %s", err.Error()))
//}
return cp
}
}
return cp
}
// CopyTrie is used with Copy()
func (cv *cachingVersaDB) CopyTrie(tr Trie) Trie {
vtr, ok := tr.(*VersaTree)
if !ok {
panic("caching versa db copy non versa tree")
}
if vtr.accountTree {
if cv.accTree != nil {
if cv.accTree.root.Cmp(vtr.root) != 0 {
panic("copy acc trie mismatch")
}
return cv.accTree
}
tree, err := cv.OpenTrie(vtr.root)
if err != nil {
//if cv.debug != nil {
// cv.debug.OnError(fmt.Errorf("failed to open trie in copy versa trie, error: %s", err.Error()))
//}
return nil
}
return tree
} else {
tree, err := cv.OpenStorageTrie(vtr.stateRoot, vtr.address, vtr.root, nil)
if err != nil {
//if cv.debug != nil {
// cv.debug.OnError(fmt.Errorf("failed to open storage trie in copy versa trie, error: %s", err.Error()))
//}
return nil
}
return tree
}
return nil
}
func (cv *cachingVersaDB) HasState(root common.Hash) bool {
return cv.versionDB.HasState(root)
}
func (cv *cachingVersaDB) OpenTrie(root common.Hash) (Trie, error) {
if cv.hasState.Load() {
//TODO:: will change to log.Error after stabilization
panic("account tree has open")
}
// TODO:: if root tree, versa db should ignore check version, temp use -1
state, err := cv.versionDB.OpenState(cv.version, root, cv.mode)
if err != nil {
//if cv.debug != nil {
// cv.debug.OnError(fmt.Errorf("failed to open state, root:%s, error: %s", root.String(), err.Error()))
//}
return nil, err
}
//if cv.debug != nil {
// cv.debug.OnOpenState(state)
//}
handler, err := cv.versionDB.OpenTree(state, cv.version, common.Hash{}, root)
if err != nil {
//if cv.debug != nil {
// cv.debug.OnError(fmt.Errorf("failed to open account trie, root:%s, error: %s", root.String(), err.Error()))
//}
return nil, err
}
tree := &VersaTree{
db: cv.versionDB,
handler: handler,
accountTree: true,
root: root,
mode: cv.mode,
//debug: cv.debug,
}
cv.state = state
cv.hasState.Store(true) // if set, can't change
cv.accTree = tree
cv.root = root
//if cv.debug != nil {
// cv.debug.OnOpenTree(handler, common.Hash{}, common.Address{})
//}
return tree, nil
}
func (cv *cachingVersaDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, _ Trie) (Trie, error) {
version, _, err := cv.accTree.getAccountWithVersion(address)
if err != nil {
return nil, err
}
return cv.openStorageTreeWithVersion(version, stateRoot, address, root)
}
func (cv *cachingVersaDB) openStorageTreeWithVersion(version int64, stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) {
if !cv.hasState.Load() {
//TODO:: will change to log.Error after stabilization
panic("open account tree, before open storage tree")
}
if cv.root.Cmp(stateRoot) != 0 {
panic(fmt.Sprintf("account root mismatch, on open storage tree, actual: %s, expect: %s", root.String(), cv.root.String()))
}
owner := crypto.Keccak256Hash(address.Bytes())
handler, err := cv.versionDB.OpenTree(cv.state, version, owner, root)
if err != nil {
//if cv.debug != nil {
// cv.debug.OnError(fmt.Errorf("failed to open storage trie, version: %d,stateRoot:%s, address:%s, root: %s, error: %s",
// version, stateRoot.String(), address.String(), root.String(), err.Error()))
//}
return nil, err
}
//if cv.debug != nil {
// cv.debug.OnOpenTree(handler, owner, address)
//}
tree := &VersaTree{
db: cv.versionDB,
handler: handler,
version: version,
root: stateRoot,
stateRoot: root,
address: address,
mode: cv.mode,
//debug: cv.debug,
}
return tree, nil
}
// Flush unique to versa
func (cv *cachingVersaDB) Flush() error {
err := cv.versionDB.Flush(cv.state)
//if err != nil && cv.debug != nil {
// cv.debug.OnError(fmt.Errorf("failed to flush state, version: %d, root:%s, mode:%d, error: %s",
// cv.accTree.version, cv.accTree.root.String(), cv.accTree.mode, err.Error()))
//}
return err
}
func (cv *cachingVersaDB) SetVersion(version int64) {
cv.version = version - 1
//cv.debug = NewDebugVersionState(cv.codeDB, cv.versionDB)
//cv.debug.Version = version
}
func (cv *cachingVersaDB) GetVersion() int64 {
return cv.version
}
// Release unique to versa
func (cv *cachingVersaDB) Release() error {
//log.Info("close state", "state info", cv.versionDB.ParseStateHandler(cv.state))
if cv.state != versa.ErrStateHandler {
//if cv.debug != nil {
// cv.debug.OnCloseState(cv.state)
//}
if err := cv.versionDB.CloseState(cv.state); err != nil {
//if cv.debug != nil {
// cv.debug.OnError(fmt.Errorf("failed to close state in release, version: %d, root:%s, mode:%d, error: %s",
// cv.accTree.version, cv.accTree.root.String(), cv.accTree.mode, err.Error()))
//}
return err
}
cv.hasState.Store(false)
cv.accTree = nil
cv.state = versa.ErrStateHandler
cv.root = common.Hash{}
//cv.debug = nil
}
return nil
}
func (cv *cachingVersaDB) Reset() {
if cv.state != versa.ErrStateHandler {
_ = cv.versionDB.CloseState(cv.state)
panic("close state in reset")
}
cv.hasState.Store(false)
cv.accTree = nil
cv.state = versa.ErrStateHandler
cv.root = common.Hash{}
}
func (cv *cachingVersaDB) HasTreeExpired(tr Trie) bool {
vtr, ok := tr.(*VersaTree)
if !ok {
panic("trie type mismatch")
}
return cv.versionDB.HasTreeExpired(vtr.handler)
}
func (cv *cachingVersaDB) Scheme() string {
return cv.triedb.Scheme()
}
func (cv *cachingVersaDB) ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error) {
//if cv.debug != nil {
// cv.debug.OnGetCode(addr, codeHash)
//}
code, _ := cv.codeCache.Get(codeHash)
if len(code) > 0 {
return code, nil
}
code = rawdb.ReadCode(cv.codeDB, codeHash)
if len(code) > 0 {
cv.codeCache.Add(codeHash, code)
cv.codeSizeCache.Add(codeHash, len(code))
return code, nil
}
return nil, errors.New("not found")
}
func (cv *cachingVersaDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) {
if cached, ok := cv.codeSizeCache.Get(codeHash); ok {
return cached, nil
}
code, err := cv.ContractCode(addr, codeHash)
return len(code), err
}
func (cv *cachingVersaDB) ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error) {
code, _ := cv.codeCache.Get(codeHash)
if len(code) > 0 {
return code, nil
}
code = rawdb.ReadCodeWithPrefix(cv.codeDB, codeHash)
if len(code) > 0 {
cv.codeCache.Add(codeHash, code)
cv.codeSizeCache.Add(codeHash, len(code))
return code, nil
}
return nil, errors.New("not found")
}
func (cv *cachingVersaDB) DiskDB() ethdb.KeyValueStore {
return cv.codeDB
}
func (cv *cachingVersaDB) TrieDB() *triedb.Database {
return cv.triedb
}
func (cv *cachingVersaDB) NoTries() bool {
// TODO:: not support fastnode
return false
}
type VersaTree struct {
db versa.Database
handler versa.TreeHandler
version int64
accountTree bool
//debug *DebugVersionState
// TODO:: debugging, used for logging
stateRoot common.Hash
root common.Hash
address common.Address
mode versa.StateMode
}
func (vt *VersaTree) GetKey(key []byte) []byte {
_, val, err := vt.db.Get(vt.handler, key)
if err != nil {
log.Warn("failed to get key from version db")
}
return val
}
func (vt *VersaTree) GetAccount(address common.Address) (*types.StateAccount, error) {
_, res, err := vt.getAccountWithVersion(address)
return res, err
}
func (vt *VersaTree) getAccountWithVersion(address common.Address) (int64, *types.StateAccount, error) {
vt.CheckAccountTree()
ver, res, err := vt.db.Get(vt.handler, address.Bytes())
if res == nil || err != nil {
//if err != nil && vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to get account, root: %s, address: %s, error: %s",
// vt.root.String(), address.String(), err.Error()))
//}
return ver, nil, err
}
ret := new(types.StateAccount)
err = rlp.DecodeBytes(res, ret)
if err != nil {
//if vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to rlp decode account, root: %s, address: %s, error: %s",
// vt.root.String(), address.String(), err.Error()))
//}
}
//if vt.debug != nil {
// vt.debug.OnGetAccount(address, ret)
//}
return ver, ret, err
}
func (vt *VersaTree) GetStorage(address common.Address, key []byte) ([]byte, error) {
if vt.address.Cmp(address) != 0 {
panic(fmt.Sprintf("address mismatch in get storage, expect: %s, actul: %s", vt.address.String(), address.String()))
}
vt.CheckStorageTree()
_, enc, err := vt.db.Get(vt.handler, key)
if err != nil || len(enc) == 0 {
//if err != nil && vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to get storage, root: %s, stateRoot: %s, address:%s, key: %s, error: %s",
// vt.root.String(), vt.stateRoot.String(), address.String(), common.Bytes2Hex(key), err.Error()))
//}
return nil, err
}
_, content, _, err := rlp.Split(enc)
if err != nil {
//if vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to rlp decode storage, root: %s, stateRoot: %s, address: %s, key: %s,error: %s",
// vt.root.String(), vt.stateRoot.String(), address.String(), common.Bytes2Hex(key), err.Error()))
//}
}
//if vt.debug != nil {
// vt.debug.OnGetStorage(vt.handler, address, key, content)
//}
return content, err
}
func (vt *VersaTree) UpdateAccount(address common.Address, account *types.StateAccount) error {
vt.CheckAccountTree()
data, err := rlp.EncodeToBytes(account)
if err != nil {
//if vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to update account, root: %s, address: %s, account: %s, error: %s",
// vt.root.String(), address.String(), account.String(), err.Error()))
//}
return err
}
//if vt.debug != nil {
// vt.debug.OnUpdateAccount(address, account)
//}
return vt.db.Put(vt.handler, address.Bytes(), data)
}
func (vt *VersaTree) UpdateStorage(address common.Address, key, value []byte) error {
if vt.address.Cmp(address) != 0 {
panic(fmt.Sprintf("address mismatch in get storage, expect: %s, actul: %s", vt.address.String(), address.String()))
}
vt.CheckStorageTree()
v, _ := rlp.EncodeToBytes(value)
err := vt.db.Put(vt.handler, key, v)
if err != nil {
//if vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to update storage, root: %s, stateRoot: %s, address: %s, key: %s, val: %s, error: %s",
// vt.root.String(), vt.stateRoot.String(), address.String(), common.Bytes2Hex(key), common.Bytes2Hex(value), err.Error()))
//}
}
//if vt.debug != nil {
// vt.debug.OnUpdateStorage(vt.handler, address, key, value)
//}
return err
}
func (vt *VersaTree) DeleteAccount(address common.Address) error {
vt.CheckAccountTree()
err := vt.db.Delete(vt.handler, address.Bytes())
if err != nil {
//if vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to delete account, root: %s, address: %s, error: %s",
// vt.root.String(), address.String(), err.Error()))
//}
}
//if vt.debug != nil {
// vt.debug.OnDeleteAccount(address)
//}
return err
}
func (vt *VersaTree) DeleteStorage(address common.Address, key []byte) error {
vt.CheckStorageTree()
err := vt.db.Delete(vt.handler, key)
if err != nil {
//if vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to delete storage, root: %s, stateRoot: %s, address: %s, key: %s, error: %s",
// vt.root.String(), vt.stateRoot.String(), address.String(), common.Bytes2Hex(key), err.Error()))
//}
}
//if vt.debug != nil {
// vt.debug.OnDeleteStorage(vt.handler, address, key)
//}
return err
}
func (vt *VersaTree) UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error {
return nil
}
func (vt *VersaTree) Hash() common.Hash {
hash, err := vt.db.CalcRootHash(vt.handler)
if err != nil {
//if vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to calc root, root: %s, stateRoot%s, error:%s",
// vt.root.String(), vt.stateRoot.String(), err.Error()))
//}
}
//if vt.debug != nil {
// vt.debug.OnCalcHash(vt.address, hash)
//}
return hash
}
func (vt *VersaTree) Commit(_ bool) (common.Hash, *trienode.NodeSet, error) {
hash, err := vt.db.Commit(vt.handler)
if err != nil {
//if vt.debug != nil {
// vt.debug.OnError(fmt.Errorf("failed to commit versa tree, root: %s, stateRoot: %s, error: %s",
// vt.root.String(), vt.stateRoot.String(), err.Error()))
//}
}
//if vt.debug != nil {
// vt.debug.OnCalcHash(vt.address, hash)
// vt.debug.OnCommitTree(vt.address, vt.handler)
//}
return hash, nil, err
}
func (vt *VersaTree) NodeIterator(startKey []byte) (trie.NodeIterator, error) {
panic("versa tree not support iterate")
return nil, nil
}
func (vt *VersaTree) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
panic("versa tree not support prove")
return nil
}
// TODO:: debug code, will be deleted after stabilization
func (vt *VersaTree) CheckAccountTree() {
if !vt.accountTree {
panic("sub tree can't operate account")
}
}
// TODO:: debug code, will be deleted after stabilization
func (vt *VersaTree) CheckStorageTree() {
if vt.accountTree {
panic("root tree can't operate storage")
}
}

@ -20,6 +20,7 @@ import (
"errors"
"fmt"
versa "github.com/bnb-chain/versioned-state-database"
"github.com/crate-crypto/go-ipa/banderwagon"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
@ -70,8 +71,33 @@ type Database interface {
// TrieDB returns the underlying trie database for managing trie nodes.
TrieDB() *triedb.Database
// Scheme returns triedb scheme, used to distinguish version triedb.
Scheme() string
// Flush used for version caching versa db to commit block state data.
Flush() error
// Release used for caching versa db to release resource.
Release() error
// Reset used for caching versa db to clean up meta data.
Reset()
// Copy used for caching versa db to copy db, main to transfer triedb with rw mode.
Copy() Database
// HasState returns the state data whether in the triedb.
HasState(root common.Hash) bool
// HasTreeExpired used for caching versa db, whether the state where the opened tree resides has been closed
HasTreeExpired(tr Trie) bool
// NoTries returns whether the database has tries storage.
NoTries() bool
SetVersion(version int64)
GetVersion() int64
}
// Trie is a Ethereum Merkle Patricia trie.
@ -148,28 +174,43 @@ type Trie interface {
// concurrent use, but does not retain any recent trie nodes in memory. To keep some
// historical state in memory, use the NewDatabaseWithConfig constructor.
func NewDatabase(db ethdb.Database) Database {
return NewDatabaseWithConfig(db, nil)
return NewDatabaseWithConfig(db, nil, false)
}
// NewDatabaseWithConfig creates a backing store for state. The returned database
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
// large memory cache.
func NewDatabaseWithConfig(db ethdb.Database, config *triedb.Config) Database {
func NewDatabaseWithConfig(db ethdb.Database, config *triedb.Config, needCommit bool) Database {
noTries := config != nil && config.NoTries
triedb := triedb.NewDatabase(db, config)
if triedb.Scheme() == rawdb.VersionScheme {
if needCommit {
return NewVersaDatabase(db, triedb, versa.S_COMMIT)
}
return NewVersaDatabase(db, triedb, versa.S_RW)
}
return &cachingDB{
disk: db,
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
triedb: triedb.NewDatabase(db, config),
triedb: triedb,
noTries: noTries,
}
}
// NewDatabaseWithNodeDB creates a state database with an already initialized node database.
func NewDatabaseWithNodeDB(db ethdb.Database, triedb *triedb.Database) Database {
func NewDatabaseWithNodeDB(db ethdb.Database, triedb *triedb.Database, needCommit bool) Database {
noTries := triedb != nil && triedb.Config() != nil && triedb.Config().NoTries
if triedb.Scheme() == rawdb.VersionScheme {
if needCommit {
return NewVersaDatabase(db, triedb, versa.S_COMMIT)
}
return NewVersaDatabase(db, triedb, versa.S_RW)
}
return &cachingDB{
disk: db,
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
@ -185,6 +226,8 @@ type cachingDB struct {
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
triedb *triedb.Database
noTries bool
//debug *DebugHashState
}
// OpenTrie opens the main account trie at a specific root hash.
@ -197,8 +240,22 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
}
tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)
if err != nil {
//if db.debug != nil {
// db.debug.OnError(fmt.Errorf("failed to open tree, root: %s, error: %s", root.String(), err.Error()))
//}
return nil, err
}
//ht := &HashTrie{
// trie: tr,
// root: root,
// address: common.Address{},
// owner: common.Hash{},
// debug: db.debug,
//}
//if db.debug != nil {
// db.debug.OnOpenTree(root, common.Hash{}, common.Address{})
//}
//return ht, nil
return tr, nil
}
@ -214,10 +271,27 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre
if db.triedb.IsVerkle() {
return self, nil
}
tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb)
owner := crypto.Keccak256Hash(address.Bytes())
tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, owner, root), db.triedb)
if err != nil {
//if db.debug != nil {
// db.debug.OnError(fmt.Errorf("failed to storage open tree, stateRoot: %s, address: %s, root: %s, error: %s",
// stateRoot.String(), address.String(), root.String(), err.Error()))
//}
return nil, err
}
//ht := &HashTrie{
// trie: tr,
// root: stateRoot,
// statRoot: root,
// address: address,
// owner: owner,
// debug: db.debug,
//}
//if db.debug != nil {
// db.debug.OnOpenTree(root, owner, address)
//}
//return ht, nil
return tr, nil
}
@ -235,6 +309,8 @@ func (db *cachingDB) CopyTrie(t Trie) Trie {
return t.Copy()
case *trie.EmptyTrie:
return t.Copy()
//case *HashTrie:
// return db.CopyTrie(t.trie)
default:
panic(fmt.Errorf("unknown trie type %T", t))
}
@ -242,6 +318,9 @@ func (db *cachingDB) CopyTrie(t Trie) Trie {
// ContractCode retrieves a particular contract's code.
func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) {
//if db.debug != nil {
// db.debug.OnGetCode(address, codeHash)
//}
code, _ := db.codeCache.Get(codeHash)
if len(code) > 0 {
return code, nil
@ -290,3 +369,179 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore {
func (db *cachingDB) TrieDB() *triedb.Database {
return db.triedb
}
func (db *cachingDB) Reset() {
return
}
func (db *cachingDB) Scheme() string {
return db.triedb.Scheme()
}
func (db *cachingDB) Flush() error {
return nil
}
func (db *cachingDB) Release() error {
//db.debug.flush()
//db.debug = nil
return nil
}
func (db *cachingDB) SetVersion(version int64) {
//db.debug = NewDebugHashState(db.disk)
//db.debug.Version = version
}
func (db *cachingDB) GetVersion() int64 {
//return db.debug.Version
return 0
}
func (db *cachingDB) Copy() Database {
return db
}
func (db *cachingDB) HasState(root common.Hash) bool {
_, err := db.OpenTrie(root)
return err == nil
}
func (db *cachingDB) HasTreeExpired(_ Trie) bool {
return false
}
//type HashTrie struct {
// trie Trie
// root common.Hash
// statRoot common.Hash
// address common.Address
// owner common.Hash
//
// debug *DebugHashState
//}
//
//func (ht *HashTrie) GetKey(key []byte) []byte {
// return ht.trie.GetKey(key)
//}
//
//func (ht *HashTrie) GetAccount(address common.Address) (*types.StateAccount, error) {
// acc, err := ht.trie.GetAccount(address)
// if err != nil {
// if ht.debug != nil {
// ht.debug.OnError(fmt.Errorf("failed to get account, address: %s, error: %s", address.String(), err.Error()))
// }
// return nil, err
// }
// if ht.debug != nil {
// ht.debug.OnGetAccount(address, acc)
// }
// return acc, nil
//}
//
//func (ht *HashTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
// val, err := ht.trie.GetStorage(addr, key)
// if err != nil {
// if ht.debug != nil {
// ht.debug.OnError(fmt.Errorf("failed to get storage, address: %s, error: %s", addr.String(), err.Error()))
// }
// return val, err
// }
// if ht.debug != nil {
// ht.debug.OnGetStorage(addr, key, val)
// }
// return val, err
//}
//
//func (ht *HashTrie) UpdateAccount(address common.Address, account *types.StateAccount) error {
// err := ht.trie.UpdateAccount(address, account)
// if err != nil {
// if ht.debug != nil {
// ht.debug.OnError(fmt.Errorf("failed to update account, address: %s, account: %s, error: %s",
// address.String(), account.String(), err.Error()))
// }
// return err
// }
// if ht.debug != nil {
// ht.debug.OnUpdateAccount(address, account)
// }
// return nil
//}
//
//func (ht *HashTrie) UpdateStorage(addr common.Address, key, value []byte) error {
// err := ht.trie.UpdateStorage(addr, key, value)
// if err != nil {
// if ht.debug != nil {
// ht.debug.OnError(fmt.Errorf("failed to update storage, address: %s, key: %s, val: %s, error: %s",
// addr.String(), common.Bytes2Hex(key), common.Bytes2Hex(value), err.Error()))
// }
// return err
// }
// if ht.debug != nil {
// ht.debug.OnUpdateStorage(addr, key, value)
// }
// return nil
//}
//
//func (ht *HashTrie) DeleteAccount(address common.Address) error {
// err := ht.trie.DeleteAccount(address)
// if err != nil {
// if ht.debug != nil {
// ht.debug.OnError(fmt.Errorf("failed to delete account, address: %s, error: %s", address.String(), err.Error()))
// }
// return err
// }
// if ht.debug != nil {
// ht.debug.OnDeleteAccount(address)
// }
// return nil
//}
//
//func (ht *HashTrie) DeleteStorage(addr common.Address, key []byte) error {
// err := ht.trie.DeleteStorage(addr, key)
// if err != nil {
// if ht.debug != nil {
// ht.debug.OnError(fmt.Errorf("failed to update storage, address: %s, key: %s, error: %s",
// addr.String(), common.Bytes2Hex(key), err.Error()))
// }
// return err
// }
// if ht.debug != nil {
// ht.debug.OnDeleteStorage(addr, key)
// }
// return nil
//}
//
//func (ht *HashTrie) UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error {
// return ht.trie.UpdateContractCode(address, codeHash, code)
//}
//
//func (ht *HashTrie) Hash() common.Hash {
// root := ht.trie.Hash()
// if ht.debug != nil {
// ht.debug.OnCalcHash(ht.address, root)
// }
// return root
//}
//
//func (ht *HashTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
// hash, set, err := ht.trie.Commit(collectLeaf)
// if err != nil {
// ht.debug.OnError(fmt.Errorf("failed to commit tree, address: %s, error: %s",
// ht.address.String(), err.Error()))
// return hash, set, err
// }
// if ht.debug != nil {
// ht.debug.OnCalcHash(ht.address, hash)
// ht.debug.OnCommitTree(ht.address, hash)
// }
// return hash, set, nil
//}
//
//func (ht *HashTrie) NodeIterator(startKey []byte) (trie.NodeIterator, error) {
// return ht.trie.NodeIterator(startKey)
//}
//
//func (ht *HashTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
// return ht.trie.Prove(key, proofDb)
//}

@ -161,7 +161,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
address = &addr
account.Address = address
}
obj := newObject(s, addr, &data)
obj := newObject(s, addr, &data, InvalidSateObjectVersion)
if !conf.SkipCode {
account.Code = obj.Code()
}

@ -232,6 +232,7 @@ func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
}
log.Info("Database compaction finished", "elapsed", common.PrettyDuration(time.Since(cstart)))
}
// pruner should be not used to version db
statedb, _ := state.New(common.Hash{}, state.NewDatabase(maindb), nil)
for addr, account := range g.Alloc {
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance))

@ -24,6 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
@ -63,6 +64,7 @@ func (s Storage) Copy() Storage {
// - Finally, call commit to return the changes of storage trie and update account data.
type stateObject struct {
db *StateDB
version int64
address common.Address // address of ethereum account
addrHash common.Hash // hash of ethereum address of the account
origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
@ -99,7 +101,7 @@ func (s *stateObject) empty() bool {
}
// newObject creates a state object.
func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject {
func newObject(db *StateDB, address common.Address, acct *types.StateAccount, version int64) *stateObject {
var (
origin = acct
created = acct == nil // true if the account was not existent
@ -112,9 +114,9 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s
if db != nil && db.storagePool != nil {
storageMap = db.GetStorage(address)
}
return &stateObject{
db: db,
version: version,
address: address,
addrHash: crypto.Keccak256Hash(address[:]),
origin: origin,
@ -158,8 +160,18 @@ func (s *stateObject) getTrie() (Trie, error) {
// s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root)
// }
// if s.trie == nil {
tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie)
var (
tr Trie
err error
)
if s.version == InvalidSateObjectVersion {
tr, err = s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie)
} else {
tr, err = s.db.db.(*cachingVersaDB).openStorageTreeWithVersion(s.version, s.db.originalRoot, s.address, s.data.Root)
}
if err != nil {
panic(fmt.Sprintf("open storage storage failed, root version: %d, storage version: %d, addrss: %s, storage root: %s, error: %s", s.db.db.GetVersion(), s.version, s.address.String(), s.data.Root.String(), err.Error()))
return nil, err
}
s.trie = tr
@ -229,6 +241,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
value common.Hash
)
if s.db.snap != nil {
panic("snap is not nil")
start := time.Now()
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
if metrics.EnabledExpensive {
@ -302,6 +315,30 @@ func (s *stateObject) finalise(prefetch bool) {
}
}
func (s *stateObject) IsContractAccount() bool {
return s.data.Root.Cmp(types.EmptyRootHash) != 0 ||
bytes.Compare(s.data.CodeHash, types.EmptyCodeHash.Bytes()) != 0
}
func (s *stateObject) IsAccountChanged() bool {
if s.origin == nil {
return true
}
if s.data.Nonce != s.origin.Nonce {
return true
}
if s.data.Balance.Cmp(s.origin.Balance) != 0 {
return true
}
if s.data.Root.Cmp(s.origin.Root) != 0 {
return true
}
if bytes.Compare(s.data.CodeHash, s.origin.CodeHash) != 0 {
return true
}
return false
}
// updateTrie is responsible for persisting cached storage changes into the
// object's storage trie. In case the storage trie is not yet loaded, this
// function will load the trie automatically. If any issues arise during the
@ -312,10 +349,28 @@ func (s *stateObject) updateTrie() (Trie, error) {
// Make sure all dirty slots are finalized into the pending storage area
s.finalise(false)
// Short circuit if nothing changed, don't bother with hashing anything
if len(s.pendingStorage) == 0 {
return s.trie, nil
// fix 33740 blocks issue, add 1002 contract balance, but not update 1002
// storage tree, the case lead to 1002 account version mismatch with 1002
// storage tree version, occurs 53409 block open 1002 storage tree error.
if s.db.db.Scheme() == rawdb.VersionScheme {
if len(s.pendingStorage) == 0 {
// transferring balance to a contract or upgrading the code, but
// without updating the storage key, a commit is still required to
// increment the version number of the storage tree.
if !s.IsContractAccount() {
return s.trie, nil
}
//if !s.IsAccountChanged() {
// return s.trie, nil
//}
}
} else {
// Short circuit if nothing changed, don't bother with hashing anything
if len(s.pendingStorage) == 0 {
return s.trie, nil
}
}
// Track the amount of time wasted on updating the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) {
@ -511,6 +566,7 @@ func (s *stateObject) setBalance(amount *uint256.Int) {
}
func (s *stateObject) deepCopy(db *StateDB) *stateObject {
//TODO:: debug code, deleted in the future
obj := &stateObject{
db: db,
address: s.address,

@ -166,12 +166,18 @@ func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*St
if err != nil {
return nil, err
}
statedb.storagePool = NewStoragePool()
//statedb.storagePool = NewStoragePool()
return statedb, nil
}
// New creates a new state from a given trie.
func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
if db.Scheme() == rawdb.VersionScheme && snaps != nil {
panic("statedb snapshot must be nil in version db.")
}
// clean up previous traces
db.Reset()
sdb := &StateDB{
db: db,
originalRoot: root,
@ -196,6 +202,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
sdb.snap = sdb.snaps.Snapshot(root)
}
// It should only one to open account tree
tr, err := db.OpenTrie(root)
// return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification
if err != nil && (sdb.snap == nil || sdb.snap.Verified()) {
@ -222,6 +229,9 @@ func (s *StateDB) TransferPrefetcher(prev *StateDB) {
prev.prefetcherLock.Lock()
fetcher = prev.prefetcher
prev.prefetcher = nil
if fetcher != nil {
panic("TransferPrefetcher is not nil")
}
prev.prefetcherLock.Unlock()
s.prefetcherLock.Lock()
@ -243,6 +253,8 @@ func (s *StateDB) StartPrefetcher(namespace string) {
s.prefetcher = nil
}
if s.snap != nil {
// TODO:: debug code , will be deleted in the future
panic("snapshot is not nill, will start prefetch")
parent := s.snap.Parent()
if parent != nil {
s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, parent.Root(), namespace)
@ -305,6 +317,7 @@ func (s *StateDB) EnablePipeCommit() {
if s.snap != nil && s.snaps.Layers() > 1 {
// after big merge, disable pipeCommit for now,
// because `s.db.TrieDB().Update` should be called after `s.trie.Commit(true)`
panic("snapshot is not nil")
s.pipeCommit = false
}
}
@ -323,6 +336,8 @@ func (s *StateDB) MarkFullProcessed() {
func (s *StateDB) setError(err error) {
if s.dbErr == nil {
s.dbErr = err
} else {
s.dbErr = fmt.Errorf(s.dbErr.Error()+", ", err.Error())
}
}
@ -338,6 +353,8 @@ func (s *StateDB) Error() error {
// Not thread safe
func (s *StateDB) Trie() (Trie, error) {
if s.trie == nil {
// TODO:: debug code, will be deleted in the future.
panic("state get trie is nil")
err := s.WaitPipeVerification()
if err != nil {
return nil, err
@ -719,6 +736,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
// If no live objects are available, attempt to use snapshots
var data *types.StateAccount
if s.snap != nil {
panic("snapshot is not nil")
start := time.Now()
acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
if metrics.EnabledExpensive {
@ -743,9 +761,12 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
}
version := InvalidSateObjectVersion
// If snapshot unavailable or reading from it failed, load from the database
if data == nil {
if s.trie == nil {
// TODO:: debug code, will be deleted in the future.
panic("getDeletedStateObject get trie is nil")
tr, err := s.db.OpenTrie(s.originalRoot)
if err != nil {
s.setError(errors.New("failed to open trie tree"))
@ -755,7 +776,12 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
start := time.Now()
var err error
data, err = s.trie.GetAccount(addr)
if vtr, ok := s.trie.(*VersaTree); ok {
version, data, err = vtr.getAccountWithVersion(addr)
} else {
data, err = s.trie.GetAccount(addr)
}
if metrics.EnabledExpensive {
s.AccountReads += time.Since(start)
}
@ -768,7 +794,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
}
// Insert into the live set
obj := newObject(s, addr, data)
obj := newObject(s, addr, data, version)
s.setStateObject(obj)
return obj
}
@ -790,7 +816,7 @@ func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
// the given address, it is overwritten and returned as the second return value.
func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
newobj = newObject(s, addr, nil)
newobj = newObject(s, addr, nil, InvalidSateObjectVersion)
if prev == nil {
s.journal.append(createObjectChange{account: &addr})
} else {
@ -859,10 +885,12 @@ func (s *StateDB) CopyDoPrefetch() *StateDB {
// If doPrefetch is true, it tries to reuse the prefetcher, the copied StateDB will do active trie prefetch.
// otherwise, just do inactive copy trie prefetcher.
func (s *StateDB) copyInternal(doPrefetch bool) *StateDB {
db := s.db.Copy()
tr := db.CopyTrie(s.trie)
// Copy all the basic fields, initialize the memory ones
state := &StateDB{
db: s.db,
trie: s.db.CopyTrie(s.trie),
db: db,
trie: tr,
// noTrie:s.noTrie,
// expectedRoot: s.expectedRoot,
// stateRoot: s.stateRoot,
@ -1003,6 +1031,7 @@ func (s *StateDB) GetRefund() uint64 {
func (s *StateDB) WaitPipeVerification() error {
// Need to wait for the parent trie to commit
if s.snap != nil {
panic("snapshot is not nil")
if valid := s.snap.WaitAndGetVerifyRes(); !valid {
return errors.New("verification on parent snap failed")
}
@ -1108,6 +1137,7 @@ func (s *StateDB) PopulateSnapAccountAndStorage() {
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted {
if s.snap != nil {
panic("snapshot is not nil")
s.populateSnapStorage(obj)
s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data)
}
@ -1210,6 +1240,8 @@ func (s *StateDB) StateIntermediateRoot() common.Hash {
}
}
if s.trie == nil {
// TODO:: debug code, will be deleted in the future.
panic("StateIntermediateRoot get trie is nil")
tr, err := s.db.OpenTrie(s.originalRoot)
if err != nil {
panic(fmt.Sprintf("failed to open trie tree %s", s.originalRoot))
@ -1369,6 +1401,7 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// generated, or it's internally corrupted. Fallback to the slow
// one just in case.
if s.snap != nil {
panic("snapshot is not nil")
aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root)
}
if s.snap == nil || err != nil {
@ -1422,7 +1455,8 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A
// considerable time and storage deletion isn't supported in hash mode, thus
// preemptively avoiding unnecessary expenses.
incomplete := make(map[common.Address]struct{})
if s.db.TrieDB().Scheme() == rawdb.HashScheme {
// Only pbss need handler incomplete destruction storage trie
if s.db.TrieDB().Scheme() != rawdb.PathScheme {
return incomplete, nil
}
for addr, prev := range s.stateObjectsDestruct {
@ -1498,6 +1532,7 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
)
if s.snap != nil {
panic("snapshot is not nil")
diffLayer = &types.DiffLayer{}
}
if s.pipeCommit {
@ -1613,18 +1648,33 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
origin = types.EmptyRootHash
}
if root != origin {
if s.db.Scheme() == rawdb.VersionScheme {
// flush and release will occur regardless of whether the root changes
start := time.Now()
set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)
if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil {
if err := s.db.Flush(); err != nil {
return err
}
if err := s.db.Release(); err != nil {
return err
}
s.originalRoot = root
if metrics.EnabledExpensive {
s.TrieDBCommits += time.Since(start)
}
if s.onCommit != nil {
s.onCommit(set)
} else {
if root != origin {
start := time.Now()
set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)
if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil {
return err
}
s.originalRoot = root
if metrics.EnabledExpensive {
s.TrieDBCommits += time.Since(start)
}
if s.onCommit != nil {
s.onCommit(set)
}
}
}
}
@ -1663,8 +1713,21 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
// Write any contract code associated with the state object
if obj.code != nil && obj.dirtyCode {
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
//switch d := s.db.(type) {
//case *cachingVersaDB:
// if d.debug != nil {
// d.debug.OnUpdateCode(obj.address, common.BytesToHash(obj.CodeHash()))
// }
//case *cachingDB:
// if d.debug != nil {
// d.debug.OnUpdateCode(obj.address, common.BytesToHash(obj.CodeHash()))
// }
//default:
// panic("caching db type error")
//}
obj.dirtyCode = false
if s.snap != nil {
panic("snapshot is not nil")
diffLayer.Codes = append(diffLayer.Codes, types.DiffCode{
Hash: common.BytesToHash(obj.CodeHash()),
Code: obj.code,
@ -1690,6 +1753,7 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
func() error {
// If snapshotting is enabled, update the snapshot tree with this new version
if s.snap != nil {
panic("snapshot is not nil")
if metrics.EnabledExpensive {
defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
}

@ -0,0 +1,207 @@
package state
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common"
)
var (
VersionState = "version"
HashState = "hash"
)
type DebugStateDiff struct {
DiffUpdateAccount map[string][]*VersaAccountInfo
DiffDeleteAccount map[string][]common.Address
DiffUpdateStorage map[string][]*VersaStorageInfo
DiffDeleteStorage map[string][]*VersaStorageInfo
DiffCalcHash map[string]map[common.Address]common.Hash
OwnerMap map[common.Address]common.Hash
DiffErrs map[string][]string
}
func (df *DebugStateDiff) diffUpdateAccount(vs []*VersaAccountInfo, hs []*VersaAccountInfo) {
count := len(vs)
if count > len(hs) {
count = len(hs)
}
idx := 0
for ; idx < count; idx++ {
if vs[idx].Address.Cmp(hs[idx].Address) != 0 {
break
}
if vs[idx].Account.Nonce != hs[idx].Account.Nonce {
break
}
if vs[idx].Account.Balance.Cmp(hs[idx].Account.Balance) != 0 {
break
}
if vs[idx].Account.Root.Cmp(hs[idx].Account.Root) != 0 {
break
}
if common.BytesToHash(vs[idx].Account.CodeHash).Cmp(common.BytesToHash(hs[idx].Account.CodeHash)) != 0 {
break
}
}
if idx < len(vs) {
df.DiffUpdateAccount[VersionState] = vs[idx:]
}
if idx < len(hs) {
df.DiffUpdateAccount[HashState] = hs[idx:]
}
return
}
func (df *DebugStateDiff) diffDeleteAccount(vs []common.Address, hs []common.Address) {
count := len(vs)
if count > len(hs) {
count = len(hs)
}
idx := 0
for ; idx < count; idx++ {
if vs[idx].Cmp(hs[idx]) != 0 {
break
}
}
if idx < len(vs) {
df.DiffDeleteAccount[VersionState] = vs[idx:]
}
if idx < len(hs) {
df.DiffDeleteAccount[HashState] = hs[idx:]
}
return
}
func (df *DebugStateDiff) diffUpdateStorage(vs []*VersaStorageInfo, hs []*VersaStorageInfo) {
count := len(vs)
if count > len(hs) {
count = len(hs)
}
idx := 0
for ; idx < count; idx++ {
if vs[idx].Address.Cmp(hs[idx].Address) != 0 {
break
}
if vs[idx].Key != hs[idx].Key {
break
}
if vs[idx].Val != hs[idx].Val {
break
}
}
if idx < len(vs) {
df.DiffUpdateStorage[VersionState] = vs[idx:]
}
if idx < len(hs) {
df.DiffUpdateStorage[HashState] = hs[idx:]
}
return
}
func (df *DebugStateDiff) diffDeleteStorage(vs []*VersaStorageInfo, hs []*VersaStorageInfo) {
count := len(vs)
if count > len(hs) {
count = len(hs)
}
idx := 0
for ; idx < count; idx++ {
if vs[idx].Address.Cmp(hs[idx].Address) != 0 {
break
}
if vs[idx].Key != hs[idx].Key {
break
}
}
if idx < len(vs) {
df.DiffDeleteStorage[VersionState] = vs[idx:]
}
if idx < len(hs) {
df.DiffDeleteStorage[HashState] = hs[idx:]
}
return
}
func (df *DebugStateDiff) diffCalcHash(vs map[common.Address]common.Hash, hs map[common.Address]common.Hash) {
record := make(map[common.Address]struct{})
for address, vch := range vs {
record[address] = struct{}{}
hch, ok := hs[address]
if !ok {
df.DiffCalcHash[VersionState][address] = vch
}
if vch.Cmp(hch) != 0 {
df.DiffCalcHash[VersionState][address] = vch
df.DiffCalcHash[HashState][address] = hch
}
}
for address := range record {
delete(vs, address)
delete(hs, address)
}
for address, hash := range vs {
df.DiffCalcHash[VersionState][address] = hash
}
for address, hash := range hs {
df.DiffCalcHash[HashState][address] = hash
}
}
func GenerateDebugStateDiff(vs *DebugVersionState, hs *DebugHashState) string {
diff := &DebugStateDiff{
DiffUpdateAccount: make(map[string][]*VersaAccountInfo),
DiffDeleteAccount: make(map[string][]common.Address),
DiffUpdateStorage: make(map[string][]*VersaStorageInfo),
DiffDeleteStorage: make(map[string][]*VersaStorageInfo),
DiffCalcHash: make(map[string]map[common.Address]common.Hash),
OwnerMap: make(map[common.Address]common.Hash),
DiffErrs: make(map[string][]string),
}
diff.DiffUpdateAccount[VersionState] = make([]*VersaAccountInfo, 0)
diff.DiffUpdateAccount[HashState] = make([]*VersaAccountInfo, 0)
diff.DiffDeleteAccount[VersionState] = make([]common.Address, 0)
diff.DiffDeleteAccount[HashState] = make([]common.Address, 0)
diff.DiffUpdateStorage[VersionState] = make([]*VersaStorageInfo, 0)
diff.DiffUpdateStorage[HashState] = make([]*VersaStorageInfo, 0)
diff.DiffDeleteStorage[VersionState] = make([]*VersaStorageInfo, 0)
diff.DiffDeleteStorage[HashState] = make([]*VersaStorageInfo, 0)
diff.DiffCalcHash[VersionState] = make(map[common.Address]common.Hash)
diff.DiffCalcHash[HashState] = make(map[common.Address]common.Hash)
diff.DiffErrs[VersionState] = make([]string, 0)
diff.DiffErrs[HashState] = make([]string, 0)
diff.diffUpdateAccount(vs.UpdateAccounts, hs.UpdateAccounts)
diff.diffDeleteAccount(vs.DeleteAccounts, hs.DeleteAccounts)
diff.diffUpdateStorage(vs.UpdateStorage, hs.UpdateStorage)
diff.diffDeleteStorage(vs.DeleteStorage, hs.DeleteStorage)
diff.diffCalcHash(vs.CalcHash, hs.CalcHash)
for address, _ := range diff.DiffCalcHash[HashState] {
diff.OwnerMap[address] = hs.StorageAddr2Owner[address]
}
for address, _ := range diff.DiffCalcHash[VersionState] {
diff.OwnerMap[address] = vs.StorageAddr2Owner[address]
}
if len(vs.Errs) != 0 || len(hs.Errs) != 0 {
diff.DiffErrs[VersionState] = vs.Errs
diff.DiffErrs[HashState] = hs.Errs
}
data, err := json.Marshal(diff)
if err != nil {
panic(fmt.Sprintf("failed to json encode debug info, err: %s", err.Error()))
}
return string(data)
}

@ -0,0 +1,216 @@
package state
import (
"encoding/json"
"fmt"
"sort"
"strconv"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
)
type DebugHashState struct {
disk ethdb.KeyValueStore
lock sync.Mutex
Version int64
AccessTrees map[common.Address][]common.Hash
CommitTrees map[common.Address][]common.Hash
CalcHash map[common.Address]common.Hash
GetAccounts []*VersaAccountInfo
UpdateAccounts []*VersaAccountInfo
DeleteAccounts []common.Address
GetStorage []*VersaStorageInfo
UpdateStorage []*VersaStorageInfo
DeleteStorage []*VersaStorageInfo
StorageAddr2Owner map[common.Address]common.Hash
GetCode map[common.Address][]common.Hash
UpdateCode map[common.Address][]common.Hash
Errs []string
}
func NewDebugHashState(disk ethdb.KeyValueStore) *DebugHashState {
return &DebugHashState{
disk: disk,
AccessTrees: make(map[common.Address][]common.Hash),
CommitTrees: make(map[common.Address][]common.Hash),
CalcHash: make(map[common.Address]common.Hash),
GetAccounts: make([]*VersaAccountInfo, 0),
UpdateAccounts: make([]*VersaAccountInfo, 0),
DeleteAccounts: make([]common.Address, 0),
GetStorage: make([]*VersaStorageInfo, 0),
UpdateStorage: make([]*VersaStorageInfo, 0),
DeleteStorage: make([]*VersaStorageInfo, 0),
StorageAddr2Owner: make(map[common.Address]common.Hash),
GetCode: make(map[common.Address][]common.Hash),
UpdateCode: make(map[common.Address][]common.Hash),
Errs: make([]string, 0),
}
}
func (hs *DebugHashState) OnOpenTree(root common.Hash, owner common.Hash, address common.Address) {
hs.lock.Lock()
defer hs.lock.Unlock()
if _, ok := hs.AccessTrees[address]; !ok {
hs.AccessTrees[address] = make([]common.Hash, 0)
}
hs.AccessTrees[address] = append(hs.AccessTrees[address], root)
hs.StorageAddr2Owner[address] = owner
}
func (hs *DebugHashState) OnGetAccount(addr common.Address, acc *types.StateAccount) {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.GetAccounts = append(hs.GetAccounts, &VersaAccountInfo{
Address: addr,
Account: acc,
})
}
func (hs *DebugHashState) OnUpdateAccount(addr common.Address, acc *types.StateAccount) {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.UpdateAccounts = append(hs.UpdateAccounts, &VersaAccountInfo{
Address: addr,
Account: acc,
})
}
func (hs *DebugHashState) OnDeleteAccount(address common.Address) {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.DeleteAccounts = append(hs.DeleteAccounts, address)
}
func (hs *DebugHashState) OnGetStorage(address common.Address, key []byte, val []byte) {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.GetStorage = append(hs.GetStorage, &VersaStorageInfo{
Address: address,
Key: common.Bytes2Hex(key),
Val: common.Bytes2Hex(val),
})
}
func (hs *DebugHashState) OnUpdateStorage(address common.Address, key []byte, val []byte) {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.UpdateStorage = append(hs.UpdateStorage, &VersaStorageInfo{
Address: address,
Key: common.Bytes2Hex(key),
Val: common.Bytes2Hex(val),
})
}
func (hs *DebugHashState) OnDeleteStorage(address common.Address, key []byte) {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.DeleteStorage = append(hs.DeleteStorage, &VersaStorageInfo{
Address: address,
Key: common.Bytes2Hex(key),
})
}
func (hs *DebugHashState) OnGetCode(addr common.Address, codeHash common.Hash) {
hs.lock.Lock()
defer hs.lock.Unlock()
if _, ok := hs.GetCode[addr]; !ok {
hs.GetCode[addr] = make([]common.Hash, 0)
}
hs.GetCode[addr] = append(hs.GetCode[addr], codeHash)
}
func (hs *DebugHashState) OnUpdateCode(addr common.Address, codeHash common.Hash) {
hs.lock.Lock()
defer hs.lock.Unlock()
if _, ok := hs.UpdateCode[addr]; !ok {
hs.UpdateCode[addr] = make([]common.Hash, 0)
}
hs.UpdateCode[addr] = append(hs.UpdateCode[addr], codeHash)
}
func (hs *DebugHashState) OnCalcHash(addr common.Address, root common.Hash) {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.CalcHash[addr] = root
}
func (hs *DebugHashState) OnCommitTree(addr common.Address, root common.Hash) {
hs.lock.Lock()
defer hs.lock.Unlock()
if _, ok := hs.CommitTrees[addr]; !ok {
hs.CommitTrees[addr] = make([]common.Hash, 0)
}
hs.CommitTrees[addr] = append(hs.CommitTrees[addr], root)
}
func (hs *DebugHashState) OnError(err error) {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.Errs = append(hs.Errs, err.Error())
}
func (hs *DebugHashState) flush() {
hs.lock.Lock()
defer hs.lock.Unlock()
hs.sortItems()
data, err := json.Marshal(hs)
if err != nil {
panic(fmt.Sprintf("failed to json encode debug info, err: %s", err.Error()))
}
err = hs.disk.Put(DebugHashStateKey(hs.Version), data)
if err != nil {
panic(fmt.Sprintf("failed to put debug version state into disk, err: %s", err.Error()))
}
}
func (hs *DebugHashState) sortItems() {
sort.Slice(hs.GetAccounts, func(i, j int) bool {
return hs.GetAccounts[i].Address.Cmp(hs.GetAccounts[j].Address) < 0
})
sort.Slice(hs.UpdateAccounts, func(i, j int) bool {
return hs.UpdateAccounts[i].Address.Cmp(hs.UpdateAccounts[j].Address) < 0
})
sort.Slice(hs.DeleteAccounts, func(i, j int) bool {
return hs.DeleteAccounts[i].Cmp(hs.DeleteAccounts[j]) < 0
})
sort.Slice(hs.GetStorage, func(i, j int) bool {
if hs.GetStorage[i].Address.Cmp(hs.GetStorage[j].Address) == 0 {
return hs.GetStorage[i].Key < hs.GetStorage[j].Key
}
return hs.GetStorage[i].Address.Cmp(hs.GetStorage[j].Address) < 0
})
sort.Slice(hs.UpdateStorage, func(i, j int) bool {
if hs.UpdateStorage[i].Address.Cmp(hs.UpdateStorage[j].Address) == 0 {
return hs.UpdateStorage[i].Key < hs.UpdateStorage[j].Key
}
return hs.UpdateStorage[i].Address.Cmp(hs.UpdateStorage[j].Address) < 0
})
sort.Slice(hs.DeleteStorage, func(i, j int) bool {
if hs.DeleteStorage[i].Address.Cmp(hs.DeleteStorage[j].Address) == 0 {
return hs.DeleteStorage[i].Key < hs.DeleteStorage[j].Key
}
return hs.DeleteStorage[i].Address.Cmp(hs.DeleteStorage[j].Address) < 0
})
}
func DebugHashStateKey(version int64) []byte {
key := "debug_hash_prefix" + strconv.FormatInt(version, 10)
return []byte(key)
}

@ -0,0 +1,290 @@
package state
import (
"encoding/json"
"fmt"
"sort"
"strconv"
"sync"
versa "github.com/bnb-chain/versioned-state-database"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
var (
DiffVersionCount = 0
DiskVersionCount = 0
)
type VersaAccountInfo struct {
Address common.Address
Account *types.StateAccount
}
type VersaStorageInfo struct {
Handler versa.TreeHandler
Address common.Address
Key string
Val string
}
type DebugVersionState struct {
disk ethdb.KeyValueStore
versionDB versa.Database
lock sync.Mutex
Version int64
PreState *versa.StateInfo
PostState *versa.StateInfo
AccessTrees map[common.Address][]*versa.TreeInfo
CommitTrees map[common.Address][]*versa.TreeInfo
CalcHash map[common.Address]common.Hash
GetAccounts []*VersaAccountInfo
UpdateAccounts []*VersaAccountInfo
DeleteAccounts []common.Address
GetStorage []*VersaStorageInfo
UpdateStorage []*VersaStorageInfo
DeleteStorage []*VersaStorageInfo
StorageAddr2Owner map[common.Address]common.Hash
GetCode map[common.Address][]common.Hash
UpdateCode map[common.Address][]common.Hash
Errs []string
}
func NewDebugVersionState(disk ethdb.KeyValueStore, versionDB versa.Database) *DebugVersionState {
return &DebugVersionState{
disk: disk,
versionDB: versionDB,
AccessTrees: make(map[common.Address][]*versa.TreeInfo, 0),
CommitTrees: make(map[common.Address][]*versa.TreeInfo, 0),
CalcHash: make(map[common.Address]common.Hash),
GetAccounts: make([]*VersaAccountInfo, 0),
UpdateAccounts: make([]*VersaAccountInfo, 0),
DeleteAccounts: make([]common.Address, 0),
GetStorage: make([]*VersaStorageInfo, 0),
UpdateStorage: make([]*VersaStorageInfo, 0),
DeleteStorage: make([]*VersaStorageInfo, 0),
StorageAddr2Owner: make(map[common.Address]common.Hash),
GetCode: make(map[common.Address][]common.Hash, 0),
UpdateCode: make(map[common.Address][]common.Hash, 0),
Errs: make([]string, 0),
}
}
func (ds *DebugVersionState) SetVersion(version int64) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.Version = version
}
func (ds *DebugVersionState) OnOpenState(handler versa.StateHandler) {
ds.lock.Lock()
defer ds.lock.Unlock()
stateInfo, err := ds.versionDB.GetStateInfo(handler)
if err != nil {
panic(fmt.Sprintf("failed to get state info on open state, err: %s", err.Error()))
}
ds.PreState = stateInfo
}
func (ds *DebugVersionState) OnOpenTree(handler versa.TreeHandler, owner common.Hash, address common.Address) {
ds.lock.Lock()
defer ds.lock.Unlock()
treeInfo, err := ds.versionDB.GetTreeInfo(handler)
if err != nil {
panic(fmt.Sprintf("failed to get tree info on open tree, err: %s", err.Error()))
}
if _, ok := ds.AccessTrees[address]; !ok {
ds.AccessTrees[address] = make([]*versa.TreeInfo, 0)
}
ds.AccessTrees[address] = append(ds.AccessTrees[address], treeInfo)
ds.StorageAddr2Owner[address] = owner
}
func (ds *DebugVersionState) OnGetAccount(addr common.Address, acc *types.StateAccount) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.GetAccounts = append(ds.GetAccounts, &VersaAccountInfo{
Address: addr,
Account: acc,
})
}
func (ds *DebugVersionState) OnUpdateAccount(addr common.Address, acc *types.StateAccount) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.UpdateAccounts = append(ds.UpdateAccounts, &VersaAccountInfo{
Address: addr,
Account: acc,
})
}
func (ds *DebugVersionState) OnDeleteAccount(address common.Address) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.DeleteAccounts = append(ds.DeleteAccounts, address)
}
func (ds *DebugVersionState) OnGetStorage(handler versa.TreeHandler, address common.Address, key []byte, val []byte) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.GetStorage = append(ds.GetStorage, &VersaStorageInfo{
Handler: handler,
Address: address,
Key: common.Bytes2Hex(key),
Val: common.Bytes2Hex(val),
})
}
func (ds *DebugVersionState) OnUpdateStorage(handler versa.TreeHandler, address common.Address, key []byte, val []byte) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.UpdateStorage = append(ds.UpdateStorage, &VersaStorageInfo{
Handler: handler,
Address: address,
Key: common.Bytes2Hex(key),
Val: common.Bytes2Hex(val),
})
}
func (ds *DebugVersionState) OnDeleteStorage(handler versa.TreeHandler, address common.Address, key []byte) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.DeleteStorage = append(ds.DeleteStorage, &VersaStorageInfo{
Handler: handler,
Address: address,
Key: common.Bytes2Hex(key),
})
}
func (ds *DebugVersionState) OnGetCode(addr common.Address, codeHash common.Hash) {
ds.lock.Lock()
defer ds.lock.Unlock()
if _, ok := ds.GetCode[addr]; !ok {
ds.GetCode[addr] = make([]common.Hash, 0)
}
ds.GetCode[addr] = append(ds.GetCode[addr], codeHash)
}
func (ds *DebugVersionState) OnUpdateCode(addr common.Address, codeHash common.Hash) {
ds.lock.Lock()
defer ds.lock.Unlock()
if _, ok := ds.UpdateCode[addr]; !ok {
ds.UpdateCode[addr] = make([]common.Hash, 0)
}
ds.UpdateCode[addr] = append(ds.UpdateCode[addr], codeHash)
}
func (ds *DebugVersionState) OnCalcHash(addr common.Address, root common.Hash) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.CalcHash[addr] = root
}
func (ds *DebugVersionState) OnCommitTree(addr common.Address, handler versa.TreeHandler) {
ds.lock.Lock()
defer ds.lock.Unlock()
treeInfo, err := ds.versionDB.GetTreeInfo(handler)
if err != nil {
panic(fmt.Sprintf("failed to get tree info on commit tree, err: %s", err.Error()))
}
if _, ok := ds.CommitTrees[addr]; !ok {
ds.CommitTrees[addr] = make([]*versa.TreeInfo, 0)
}
ds.CommitTrees[addr] = append(ds.CommitTrees[addr], treeInfo)
}
func (ds *DebugVersionState) OnError(err error) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.Errs = append(ds.Errs, err.Error())
}
func (ds *DebugVersionState) OnCloseState(handler versa.StateHandler) {
ds.lock.Lock()
defer ds.lock.Unlock()
stateInfo, err := ds.versionDB.GetStateInfo(handler)
if err != nil {
panic(fmt.Sprintf("failed to get state info on close state, err: %s", err.Error()))
}
ds.PostState = stateInfo
oldDiskVersionCount := DiskVersionCount
if ds.PreState.Root.Cmp(ds.PostState.Root) != 0 {
DiffVersionCount++
if ds.PostState.IsDiskVersion {
DiskVersionCount++
}
}
if ds.Version%1000 == 0 || oldDiskVersionCount != DiskVersionCount {
log.Info("version state info", "current block", ds.Version, "diff version count", DiffVersionCount, "disk version count", DiskVersionCount)
}
ds.sortItems()
data, err := json.Marshal(ds)
if err != nil {
panic(fmt.Sprintf("failed to json encode debug info, err: %s", err.Error()))
}
err = ds.disk.Put(DebugVersionStateKey(ds.Version), data)
if err != nil {
panic(fmt.Sprintf("failed to put debug version state into disk, err: %s", err.Error()))
}
if len(ds.Errs) != 0 {
log.Info("version state occurs error", "debug info", string(data))
log.Crit("exit....")
}
}
func (ds *DebugVersionState) sortItems() {
sort.Slice(ds.GetAccounts, func(i, j int) bool {
return ds.GetAccounts[i].Address.Cmp(ds.GetAccounts[j].Address) < 0
})
sort.Slice(ds.UpdateAccounts, func(i, j int) bool {
return ds.UpdateAccounts[i].Address.Cmp(ds.UpdateAccounts[j].Address) < 0
})
sort.Slice(ds.DeleteAccounts, func(i, j int) bool {
return ds.DeleteAccounts[i].Cmp(ds.DeleteAccounts[j]) < 0
})
sort.Slice(ds.GetStorage, func(i, j int) bool {
if ds.GetStorage[i].Address.Cmp(ds.GetStorage[j].Address) == 0 {
return ds.GetStorage[i].Key < ds.GetStorage[j].Key
}
return ds.GetStorage[i].Address.Cmp(ds.GetStorage[j].Address) < 0
})
sort.Slice(ds.UpdateStorage, func(i, j int) bool {
if ds.UpdateStorage[i].Address.Cmp(ds.UpdateStorage[j].Address) == 0 {
return ds.UpdateStorage[i].Key < ds.UpdateStorage[j].Key
}
return ds.UpdateStorage[i].Address.Cmp(ds.UpdateStorage[j].Address) < 0
})
sort.Slice(ds.DeleteStorage, func(i, j int) bool {
if ds.DeleteStorage[i].Address.Cmp(ds.DeleteStorage[j].Address) == 0 {
return ds.DeleteStorage[i].Key < ds.DeleteStorage[j].Key
}
return ds.DeleteStorage[i].Address.Cmp(ds.DeleteStorage[j].Address) < 0
})
}
func DebugVersionStateKey(version int64) []byte {
key := "debug_version_prefix" + strconv.FormatInt(version, 10)
return []byte(key)
}

@ -81,6 +81,7 @@ type triePrefetcher struct {
// newTriePrefetcher
func newTriePrefetcher(db Database, root, rootParent common.Hash, namespace string) *triePrefetcher {
panic("prefetcher not support")
prefix := triePrefetchMetricsPrefix + namespace
p := &triePrefetcher{
db: db,

@ -49,6 +49,7 @@ func NewStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine conse
// the transaction messages using the statedb, but any changes are discarded. The
// only goal is to pre-cache transaction signatures and state trie nodes.
func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg *vm.Config, interruptCh <-chan struct{}) {
panic("prefetcher not support")
var (
header = block.Header()
signer = types.MakeSigner(p.config, header.Number, header.Time)
@ -101,6 +102,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
// the transaction messages using the statedb, but any changes are discarded. The
// only goal is to pre-cache transaction signatures and snapshot clean state. Only used for mining stage
func (p *statePrefetcher) PrefetchMining(txs TransactionsByPriceAndNonce, header *types.Header, gasLimit uint64, statedb *state.StateDB, cfg vm.Config, interruptCh <-chan struct{}, txCurr **types.Transaction) {
panic("prefetcher not support")
var signer = types.MakeSigner(p.config, header.Number, header.Time)
txCh := make(chan *types.Transaction, 2*prefetchThread)

@ -18,6 +18,7 @@ package types
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
@ -58,6 +59,11 @@ func (acct *StateAccount) Copy() *StateAccount {
}
}
func (acct *StateAccount) String() string {
return fmt.Sprintf("nonce: %d, balance: %d, root: %s, codeHash: %s",
acct.Nonce, acct.Balance, acct.Root.String(), common.Bytes2Hex(acct.CodeHash))
}
// SlimAccount is a modified version of an Account, where the root is replaced
// with a byte slice. This format can be used to represent full-consensus format
// or slim format which replaces the empty root and code hash as nil byte slice.

@ -123,6 +123,18 @@ type Ethereum struct {
// New creates a new Ethereum object (including the
// initialisation of the common Ethereum object)
func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
onlyFullSync := false
if config.StateScheme == rawdb.VersionScheme {
config.SnapshotCache = 0
onlyFullSync = true
config.SyncMode = downloader.FullSync
}
// TODO:: debug code
config.SnapshotCache = 0
onlyFullSync = true
config.SyncMode = downloader.FullSync
// Ensure configuration values are compatible and sane
if config.SyncMode == downloader.LightSync {
return nil, errors.New("can't run eth.Ethereum in light sync mode, light mode has been deprecated")
@ -328,6 +340,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
DirectBroadcast: config.DirectBroadcast,
DisablePeerTxBroadcast: config.DisablePeerTxBroadcast,
PeerSet: peers,
OnlyFullSync: onlyFullSync,
}); err != nil {
return nil, err
}

@ -124,6 +124,7 @@ type handlerConfig struct {
DirectBroadcast bool
DisablePeerTxBroadcast bool
PeerSet *peerSet
OnlyFullSync bool
}
type handler struct {
@ -202,35 +203,37 @@ func newHandler(config *handlerConfig) (*handler, error) {
handlerStartCh: make(chan struct{}),
stopCh: make(chan struct{}),
}
if config.Sync == downloader.FullSync {
// The database seems empty as the current block is the genesis. Yet the snap
// block is ahead, so snap sync was enabled for this node at a certain point.
// The scenarios where this can happen is
// * if the user manually (or via a bad block) rolled back a snap sync node
// below the sync point.
// * the last snap sync is not finished while user specifies a full sync this
// time. But we don't have any recent state for full sync.
// In these cases however it's safe to reenable snap sync.
fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock()
if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 {
if rawdb.ReadAncientType(h.database) == rawdb.PruneFreezerType {
log.Crit("Fast Sync not finish, can't enable pruneancient mode")
if !config.OnlyFullSync {
if config.Sync == downloader.FullSync {
// The database seems empty as the current block is the genesis. Yet the snap
// block is ahead, so snap sync was enabled for this node at a certain point.
// The scenarios where this can happen is
// * if the user manually (or via a bad block) rolled back a snap sync node
// below the sync point.
// * the last snap sync is not finished while user specifies a full sync this
// time. But we don't have any recent state for full sync.
// In these cases however it's safe to reenable snap sync.
fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock()
if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 {
if rawdb.ReadAncientType(h.database) == rawdb.PruneFreezerType {
log.Crit("Fast Sync not finish, can't enable pruneancient mode")
}
h.snapSync.Store(true)
log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete")
} else if !h.chain.NoTries() && !h.chain.HasState(fullBlock.Root) {
h.snapSync.Store(true)
log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing")
}
h.snapSync.Store(true)
log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete")
} else if !h.chain.NoTries() && !h.chain.HasState(fullBlock.Root) {
h.snapSync.Store(true)
log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing")
}
} else {
head := h.chain.CurrentBlock()
if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) {
// Print warning log if database is not empty to run snap sync.
log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete")
} else {
// If snap sync was requested and our database is empty, grant it
h.snapSync.Store(true)
log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash())
head := h.chain.CurrentBlock()
if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) {
// Print warning log if database is not empty to run snap sync.
log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete")
} else {
// If snap sync was requested and our database is empty, grant it
h.snapSync.Store(true)
log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash())
}
}
}
// If snap sync is requested but snapshots are disabled, fail loudly

@ -71,7 +71,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
// the internal junks created by tracing will be persisted into the disk.
// TODO(rjl493456442), clean cache is disabled to prevent memory leak,
// please re-enable it for better performance.
database = state.NewDatabaseWithConfig(eth.chainDb, triedb.HashDefaults)
database = state.NewDatabaseWithConfig(eth.chainDb, triedb.HashDefaults, false)
if statedb, err = state.New(block.Root(), database, nil); err == nil {
log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number())
return statedb, noopReleaser, nil
@ -92,7 +92,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
// TODO(rjl493456442), clean cache is disabled to prevent memory leak,
// please re-enable it for better performance.
tdb = triedb.NewDatabase(eth.chainDb, triedb.HashDefaults)
database = state.NewDatabaseWithNodeDB(eth.chainDb, tdb)
database = state.NewDatabaseWithNodeDB(eth.chainDb, tdb, false)
// If we didn't check the live database, do check state over ephemeral database,
// otherwise we would rewind past a persisted block (specific corner case is
@ -221,6 +221,7 @@ func (eth *Ethereum) stateAtBlock(ctx context.Context, block *types.Block, reexe
if eth.blockchain.TrieDB().Scheme() == rawdb.HashScheme {
return eth.hashState(ctx, block, reexec, base, readOnly, preferDisk)
}
// path and version schema use the same interface
return eth.pathState(block)
}

@ -191,33 +191,40 @@ func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp {
}
func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
// If we're in snap sync mode, return that directly
if cs.handler.snapSync.Load() {
block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
return downloader.SnapSync, td
}
// We are probably in full sync, but we might have rewound to before the
// snap sync pivot, check if we should re-enable snap sync.
head := cs.handler.chain.CurrentBlock()
if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
if head.Number.Uint64() < *pivot {
if rawdb.ReadAncientType(cs.handler.database) == rawdb.PruneFreezerType {
log.Crit("Current rewound to before the fast sync pivot, can't enable pruneancient mode", "current block number", head.Number.Uint64(), "pivot", *pivot)
}
if cs.handler.chain.TrieDB().Scheme() != rawdb.VersionScheme {
// If we're in snap sync mode, return that directly
if cs.handler.snapSync.Load() {
block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
return downloader.SnapSync, td
}
}
// We are in a full sync, but the associated head state is missing. To complete
// the head state, forcefully rerun the snap sync. Note it doesn't mean the
// persistent state is corrupted, just mismatch with the head block.
if !cs.handler.chain.NoTries() && !cs.handler.chain.HasState(head.Root) {
block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
log.Info("Reenabled snap sync as chain is stateless")
return downloader.SnapSync, td
// We are probably in full sync, but we might have rewound to before the
// snap sync pivot, check if we should re-enable snap sync.
head = cs.handler.chain.CurrentBlock()
if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
if head.Number.Uint64() < *pivot {
if rawdb.ReadAncientType(cs.handler.database) == rawdb.PruneFreezerType {
log.Crit("Current rewound to before the fast sync pivot, can't enable pruneancient mode", "current block number", head.Number.Uint64(), "pivot", *pivot)
}
block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
return downloader.SnapSync, td
}
}
// We are in a full sync, but the associated head state is missing. To complete
// the head state, forcefully rerun the snap sync. Note it doesn't mean the
// persistent state is corrupted, just mismatch with the head block.
if !cs.handler.chain.NoTries() && !cs.handler.chain.HasState(head.Root) {
block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
log.Info("Reenabled snap sync as chain is stateless")
return downloader.SnapSync, td
}
} else {
if !cs.handler.chain.HasState(head.Root) {
panic("version db not support snap sync")
}
}
// Nope, we're really full syncing
td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64())

25
go.mod

@ -12,6 +12,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2
github.com/bnb-chain/fastssz v0.1.2
github.com/bnb-chain/ics23 v0.1.0
github.com/bnb-chain/versioned-state-database v0.0.0-00010101000000-000000000000
github.com/btcsuite/btcd/btcec/v2 v2.3.2
github.com/cespare/cp v1.1.1
github.com/cloudflare/cloudflare-go v0.79.0
@ -44,7 +45,7 @@ require (
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.4
github.com/holiman/uint256 v1.3.0
github.com/huin/goupnp v1.3.0
github.com/influxdata/influxdb-client-go/v2 v2.4.0
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c
@ -67,7 +68,7 @@ require (
github.com/rs/cors v1.8.2
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/status-im/keycard-go v0.2.0
github.com/stretchr/testify v1.8.4
github.com/stretchr/testify v1.9.0
github.com/supranational/blst v0.3.11
github.com/syndtr/goleveldb v1.0.1
github.com/tendermint/go-amino v0.14.1
@ -79,13 +80,13 @@ require (
github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3
github.com/willf/bitset v1.1.3
go.uber.org/automaxprocs v1.5.2
golang.org/x/crypto v0.21.0
golang.org/x/crypto v0.25.0
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a
golang.org/x/sync v0.6.0
golang.org/x/sys v0.20.0
golang.org/x/text v0.14.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
golang.org/x/text v0.16.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.18.0
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1
)
@ -111,7 +112,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.11.0 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chzyer/readline v1.5.1 // indirect
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
@ -251,6 +252,7 @@ require (
github.com/spf13/afero v1.10.0 // indirect
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e // indirect
github.com/tidwall/btree v1.7.0 // indirect
github.com/tidwall/gjson v1.10.2 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
@ -271,10 +273,10 @@ require (
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/mod v0.15.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/term v0.18.0 // indirect
golang.org/x/term v0.22.0 // indirect
google.golang.org/api v0.44.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
@ -295,6 +297,7 @@ require (
)
replace (
github.com/bnb-chain/versioned-state-database => ../versioned-state-database
github.com/cometbft/cometbft => github.com/bnb-chain/greenfield-cometbft v1.3.1
github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20210702154020-550e1cd83ec1
github.com/syndtr/goleveldb v1.0.1 => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7

49
go.sum

@ -185,8 +185,9 @@ github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
@ -326,7 +327,6 @@ github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
@ -614,8 +614,8 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.3.0 h1:4wdcm/tnd0xXdu7iS3ruNvxkWwrb4aeBQv19ayYn8F4=
github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
@ -1121,8 +1121,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@ -1133,8 +1134,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
@ -1151,6 +1152,8 @@ github.com/tendermint/iavl v0.12.0 h1:xcaFAr+ycqCj7WN1RzL2EfcBioRDOHcU1oWcg83K02
github.com/tendermint/iavl v0.12.0/go.mod h1:EoKMMv++tDOL5qKKVnoIqtVPshRrEPeJ0WsgDOLAauM=
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo=
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e/go.mod h1:Tu4lItkATkonrYuvtVjG0/rhy15qrNGNTjPdaphtZ/8=
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo=
github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
@ -1286,8 +1289,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1326,8 +1329,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1379,8 +1382,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1411,8 +1414,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1503,13 +1506,13 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1520,8 +1523,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1595,8 +1598,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

@ -20,6 +20,7 @@ import (
"errors"
"strings"
versa "github.com/bnb-chain/versioned-state-database"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
@ -30,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/triedb/database"
"github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/ethereum/go-ethereum/triedb/versiondb"
)
// Config defines all necessary options for database.
@ -37,7 +39,9 @@ type Config struct {
Preimages bool // Flag whether the preimage of node key is recorded
Cache int
NoTries bool
IsVerkle bool // Flag whether the db is holding a verkle tree
IsVerkle bool // Flag whether the db is holding a verkle tree
IsVersion bool
VersionDB *versiondb.Config
HashDB *hashdb.Config // Configs for hash-based scheme
PathDB *pathdb.Config // Configs for experimental path-based scheme
}
@ -92,6 +96,16 @@ type Database struct {
// NewDatabase initializes the trie database with default settings, note
// the legacy hash-based scheme is used by default.
func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
if config != nil && config.IsVersion {
// TODO:: Wait for debugging to stabilize, and then consider initialization compatibility with other databases
db := &Database{
config: config,
diskdb: diskdb,
backend: versiondb.New(config.VersionDB),
}
return db
}
// Sanitize the config and use the default one if it's not specified.
var triediskdb ethdb.Database
if diskdb != nil && diskdb.StateStore() != nil {
@ -409,3 +423,12 @@ func (db *Database) GetAllRooHash() [][]string {
func (db *Database) IsVerkle() bool {
return db.config.IsVerkle
}
// VersaDB returns versioned database instance, it is useless for hashdb and pathdb
func (db *Database) VersaDB() versa.Database {
vdb, ok := db.backend.(*versiondb.VersionDB)
if !ok {
log.Crit("only version db support")
}
return vdb.VersaDB()
}

@ -139,6 +139,7 @@ func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Databas
if config.CleanCacheSize > 0 {
cleans = fastcache.New(config.CleanCacheSize)
}
log.Info("success to init hash mode triedb")
return &Database{
diskdb: diskdb,
resolver: resolver,

@ -226,6 +226,7 @@ func New(diskdb ethdb.Database, config *Config) *Database {
log.Crit("Failed to disable database", "err", err) // impossible to happen
}
}
log.Info("success to init path mode triedb")
return db
}

@ -0,0 +1,77 @@
package versiondb
import (
versa "github.com/bnb-chain/versioned-state-database"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate"
)
type Config struct {
Path string
FlushInterval int64
MaxStatesInMem int
}
type VersionDB struct {
db versa.Database
}
func New(config *Config) *VersionDB {
var (
cfg *versa.VersaDBConfig
path = "./node/version_db" // TODO:: debug code
)
if config != nil {
path = config.Path
cfg = &versa.VersaDBConfig{
FlushInterval: config.FlushInterval,
MaxStatesInMem: config.MaxStatesInMem,
}
}
db, err := versa.NewVersaDB(path, cfg)
if err != nil {
log.Crit("failed to new version db", "error", err)
}
v := &VersionDB{
db: db,
}
log.Info("success to init version mode triedb")
return v
}
func (v *VersionDB) Scheme() string {
return rawdb.VersionScheme
}
func (v *VersionDB) Initialized(genesisRoot common.Hash) bool {
version, _ := v.db.LatestStoreDiskVersionInfo()
return version >= 0
}
func (v *VersionDB) Size() (common.StorageSize, common.StorageSize, common.StorageSize) {
// TODO:: waiting versa db supported
return 0, 0, 0
}
func (v *VersionDB) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// TODO:: debug code, will change to return error.
panic("version db not supported")
}
func (v *VersionDB) Commit(root common.Hash, report bool) error {
// TODO:: debug code, will change to return error.
panic("version db not supported")
}
func (v *VersionDB) Close() error {
return v.db.Close()
}
func (v *VersionDB) VersaDB() versa.Database {
return v.db
}