feat: support separate trie database (#2021)
* feat: support separate database for state data
This commit is contained in:
parent
89c4ab2a05
commit
def3512fd8
@ -237,6 +237,15 @@ func initGenesis(ctx *cli.Context) error {
|
||||
}
|
||||
defer chaindb.Close()
|
||||
|
||||
// if the trie data dir has been set, new trie db with a new state database
|
||||
if ctx.IsSet(utils.SeparateDBFlag.Name) {
|
||||
statediskdb, dbErr := stack.OpenDatabaseWithFreezer(name+"/state", 0, 0, "", "", false, false, false, false)
|
||||
if dbErr != nil {
|
||||
utils.Fatalf("Failed to open separate trie database: %v", dbErr)
|
||||
}
|
||||
chaindb.SetStateStore(statediskdb)
|
||||
}
|
||||
|
||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
|
||||
defer triedb.Close()
|
||||
|
||||
@ -600,7 +609,6 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
|
||||
}
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
defer db.Close()
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db)
|
||||
if err != nil {
|
||||
return nil, nil, common.Hash{}, err
|
||||
@ -609,7 +617,7 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
|
||||
fmt.Println("You are using geth dump in path mode, please use `geth dump-roothash` command to get all available blocks.")
|
||||
}
|
||||
|
||||
var header *types.Header
|
||||
header := &types.Header{}
|
||||
if ctx.NArg() == 1 {
|
||||
arg := ctx.Args().First()
|
||||
if hashish(arg) {
|
||||
|
@ -203,6 +203,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
v := ctx.Uint64(utils.OverrideFeynman.Name)
|
||||
cfg.Eth.OverrideFeynman = &v
|
||||
}
|
||||
if ctx.IsSet(utils.SeparateDBFlag.Name) && !stack.IsSeparatedDB() {
|
||||
utils.Fatalf("Failed to locate separate database subdirectory when separatedb parameter has been set")
|
||||
}
|
||||
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
// Create gauge with geth system and build information
|
||||
|
@ -381,7 +381,6 @@ func inspectTrie(ctx *cli.Context) error {
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
defer db.Close()
|
||||
|
||||
var headerBlockHash common.Hash
|
||||
if ctx.NArg() >= 1 {
|
||||
if ctx.Args().Get(0) == "latest" {
|
||||
@ -562,6 +561,11 @@ func dbStats(ctx *cli.Context) error {
|
||||
defer db.Close()
|
||||
|
||||
showLeveldbStats(db)
|
||||
if db.StateStore() != nil {
|
||||
fmt.Println("show stats of state store")
|
||||
showLeveldbStats(db.StateStore())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -575,13 +579,31 @@ func dbCompact(ctx *cli.Context) error {
|
||||
log.Info("Stats before compaction")
|
||||
showLeveldbStats(db)
|
||||
|
||||
statediskdb := db.StateStore()
|
||||
if statediskdb != nil {
|
||||
fmt.Println("show stats of state store")
|
||||
showLeveldbStats(statediskdb)
|
||||
}
|
||||
|
||||
log.Info("Triggering compaction")
|
||||
if err := db.Compact(nil, nil); err != nil {
|
||||
log.Info("Compact err", "error", err)
|
||||
log.Error("Compact err", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if statediskdb != nil {
|
||||
if err := statediskdb.Compact(nil, nil); err != nil {
|
||||
log.Error("Compact err", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Stats after compaction")
|
||||
showLeveldbStats(db)
|
||||
if statediskdb != nil {
|
||||
fmt.Println("show stats of state store after compaction")
|
||||
showLeveldbStats(statediskdb)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -602,8 +624,17 @@ func dbGet(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
statediskdb := db.StateStore()
|
||||
data, err := db.Get(key)
|
||||
if err != nil {
|
||||
// if separate trie db exist, try to get it from separate db
|
||||
if statediskdb != nil {
|
||||
statedata, dberr := statediskdb.Get(key)
|
||||
if dberr == nil {
|
||||
fmt.Printf("key %#x: %#x\n", key, statedata)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
|
||||
return err
|
||||
}
|
||||
@ -619,8 +650,14 @@ func dbTrieGet(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, false, false)
|
||||
defer db.Close()
|
||||
var db ethdb.Database
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
if chaindb.StateStore() != nil {
|
||||
db = chaindb.StateStore()
|
||||
} else {
|
||||
db = chaindb
|
||||
}
|
||||
defer chaindb.Close()
|
||||
|
||||
scheme := ctx.String(utils.StateSchemeFlag.Name)
|
||||
if scheme == "" {
|
||||
@ -685,8 +722,14 @@ func dbTrieDelete(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, false, false)
|
||||
defer db.Close()
|
||||
var db ethdb.Database
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
if chaindb.StateStore() != nil {
|
||||
db = chaindb.StateStore()
|
||||
} else {
|
||||
db = chaindb
|
||||
}
|
||||
defer chaindb.Close()
|
||||
|
||||
scheme := ctx.String(utils.StateSchemeFlag.Name)
|
||||
if scheme == "" {
|
||||
@ -1076,10 +1119,16 @@ func hbss2pbss(ctx *cli.Context) error {
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, false, false)
|
||||
db.Sync()
|
||||
stateDiskDb := db.StateStore()
|
||||
defer db.Close()
|
||||
|
||||
// convert hbss trie node to pbss trie node
|
||||
lastStateID := rawdb.ReadPersistentStateID(db)
|
||||
var lastStateID uint64
|
||||
if stateDiskDb != nil {
|
||||
lastStateID = rawdb.ReadPersistentStateID(stateDiskDb)
|
||||
} else {
|
||||
lastStateID = rawdb.ReadPersistentStateID(db)
|
||||
}
|
||||
if lastStateID == 0 || force {
|
||||
config := trie.HashDefaults
|
||||
triedb := trie.NewDatabase(db, config)
|
||||
@ -1131,18 +1180,34 @@ func hbss2pbss(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
// repair state ancient offset
|
||||
lastStateID = rawdb.ReadPersistentStateID(db)
|
||||
if stateDiskDb != nil {
|
||||
lastStateID = rawdb.ReadPersistentStateID(stateDiskDb)
|
||||
} else {
|
||||
lastStateID = rawdb.ReadPersistentStateID(db)
|
||||
}
|
||||
|
||||
if lastStateID == 0 {
|
||||
log.Error("Convert hbss to pbss trie node error. The last state id is still 0")
|
||||
}
|
||||
ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
|
||||
|
||||
var ancient string
|
||||
if db.StateStore() != nil {
|
||||
dirName := filepath.Join(stack.ResolvePath("chaindata"), "state")
|
||||
ancient = filepath.Join(dirName, "ancient")
|
||||
} else {
|
||||
ancient = stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
|
||||
}
|
||||
err = rawdb.ResetStateFreezerTableOffset(ancient, lastStateID)
|
||||
if err != nil {
|
||||
log.Error("Reset state freezer table offset failed", "error", err)
|
||||
return err
|
||||
}
|
||||
// prune hbss trie node
|
||||
err = rawdb.PruneHashTrieNodeInDataBase(db)
|
||||
if stateDiskDb != nil {
|
||||
err = rawdb.PruneHashTrieNodeInDataBase(stateDiskDb)
|
||||
} else {
|
||||
err = rawdb.PruneHashTrieNodeInDataBase(db)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Prune Hash trie node in database failed", "error", err)
|
||||
return err
|
||||
|
@ -436,13 +436,15 @@ func pruneState(ctx *cli.Context) error {
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack, false, false)
|
||||
defer chaindb.Close()
|
||||
|
||||
if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme {
|
||||
log.Crit("Offline pruning is not required for path scheme")
|
||||
}
|
||||
prunerconfig := pruner.Config{
|
||||
Datadir: stack.ResolvePath(""),
|
||||
BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name),
|
||||
}
|
||||
|
||||
if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme {
|
||||
log.Crit("Offline pruning is not required for path scheme")
|
||||
}
|
||||
|
||||
pruner, err := pruner.NewPruner(chaindb, prunerconfig, ctx.Uint64(utils.TriesInMemoryFlag.Name))
|
||||
if err != nil {
|
||||
log.Error("Failed to open snapshot tree", "err", err)
|
||||
|
@ -93,6 +93,12 @@ var (
|
||||
Value: flags.DirectoryString(node.DefaultDataDir()),
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
SeparateDBFlag = &cli.BoolFlag{
|
||||
Name: "separatedb",
|
||||
Usage: "Enable a separated trie database, it will be created within a subdirectory called state, " +
|
||||
"Users can copy this state directory to another directory or disk, and then create a symbolic link to the state directory under the chaindata",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
DirectBroadcastFlag = &cli.BoolFlag{
|
||||
Name: "directbroadcast",
|
||||
Usage: "Enable directly broadcast mined block to all peers",
|
||||
@ -1112,6 +1118,7 @@ var (
|
||||
DBEngineFlag,
|
||||
StateSchemeFlag,
|
||||
HttpHeaderFlag,
|
||||
SeparateDBFlag,
|
||||
}
|
||||
)
|
||||
|
||||
@ -2314,6 +2321,11 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree
|
||||
chainDb, err = stack.OpenDatabase("lightchaindata", cache, handles, "", readonly)
|
||||
default:
|
||||
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "", readonly, disableFreeze, false, false)
|
||||
// set the separate state database
|
||||
if stack.IsSeparatedDB() && err == nil {
|
||||
stateDiskDb := MakeStateDataBase(ctx, stack, readonly, false)
|
||||
chainDb.SetStateStore(stateDiskDb)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
Fatalf("Could not open database: %v", err)
|
||||
@ -2321,6 +2333,17 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree
|
||||
return chainDb
|
||||
}
|
||||
|
||||
// MakeStateDataBase open a separate state database using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeStateDataBase(ctx *cli.Context, stack *node.Node, readonly, disableFreeze bool) ethdb.Database {
|
||||
cache := ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
|
||||
handles := MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) / 2
|
||||
statediskdb, err := stack.OpenDatabaseWithFreezer("chaindata/state", cache, handles, "", "", readonly, disableFreeze, false, false)
|
||||
if err != nil {
|
||||
Fatalf("Failed to open separate trie database: %v", err)
|
||||
}
|
||||
return statediskdb
|
||||
}
|
||||
|
||||
// tryMakeReadOnlyDatabase try to open the chain database in read-only mode,
|
||||
// or fallback to write mode if the database is not initialized.
|
||||
//
|
||||
|
@ -288,13 +288,13 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has
|
||||
// if the state is not present in database.
|
||||
func ReadStateScheme(db ethdb.Reader) string {
|
||||
// Check if state in path-based scheme is present
|
||||
blob, _ := ReadAccountTrieNode(db, nil)
|
||||
blob, _ := ReadAccountTrieNode(db.StateStoreReader(), nil)
|
||||
if len(blob) != 0 {
|
||||
return PathScheme
|
||||
}
|
||||
// The root node might be deleted during the initial snap sync, check
|
||||
// the persistent state id then.
|
||||
if id := ReadPersistentStateID(db); id != 0 {
|
||||
if id := ReadPersistentStateID(db.StateStoreReader()); id != 0 {
|
||||
return PathScheme
|
||||
}
|
||||
// In a hash-based scheme, the genesis state is consistently stored
|
||||
@ -304,7 +304,7 @@ func ReadStateScheme(db ethdb.Reader) string {
|
||||
if header == nil {
|
||||
return "" // empty datadir
|
||||
}
|
||||
blob = ReadLegacyTrieNode(db, header.Root)
|
||||
blob = ReadLegacyTrieNode(db.StateStoreReader(), header.Root)
|
||||
if len(blob) == 0 {
|
||||
return "" // no state in disk
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
|
||||
infos = append(infos, info)
|
||||
|
||||
case StateFreezerName:
|
||||
if ReadStateScheme(db) != PathScheme {
|
||||
if ReadStateScheme(db) != PathScheme || db.StateStore() != nil {
|
||||
continue
|
||||
}
|
||||
datadir, err := db.AncientDatadir()
|
||||
|
@ -41,7 +41,15 @@ type freezerdb struct {
|
||||
ancientRoot string
|
||||
ethdb.KeyValueStore
|
||||
ethdb.AncientStore
|
||||
diffStore ethdb.KeyValueStore
|
||||
diffStore ethdb.KeyValueStore
|
||||
stateStore ethdb.Database
|
||||
}
|
||||
|
||||
func (frdb *freezerdb) StateStoreReader() ethdb.Reader {
|
||||
if frdb.stateStore == nil {
|
||||
return frdb
|
||||
}
|
||||
return frdb.stateStore
|
||||
}
|
||||
|
||||
// AncientDatadir returns the path of root ancient directory.
|
||||
@ -64,6 +72,11 @@ func (frdb *freezerdb) Close() error {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if frdb.stateStore != nil {
|
||||
if err := frdb.stateStore.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
@ -81,6 +94,17 @@ func (frdb *freezerdb) SetDiffStore(diff ethdb.KeyValueStore) {
|
||||
frdb.diffStore = diff
|
||||
}
|
||||
|
||||
func (frdb *freezerdb) StateStore() ethdb.Database {
|
||||
return frdb.stateStore
|
||||
}
|
||||
|
||||
func (frdb *freezerdb) SetStateStore(state ethdb.Database) {
|
||||
if frdb.stateStore != nil {
|
||||
frdb.stateStore.Close()
|
||||
}
|
||||
frdb.stateStore = state
|
||||
}
|
||||
|
||||
// Freeze is a helper method used for external testing to trigger and block until
|
||||
// a freeze cycle completes, without having to sleep for a minute to trigger the
|
||||
// automatic background run.
|
||||
@ -104,7 +128,8 @@ func (frdb *freezerdb) Freeze(threshold uint64) error {
|
||||
// nofreezedb is a database wrapper that disables freezer data retrievals.
|
||||
type nofreezedb struct {
|
||||
ethdb.KeyValueStore
|
||||
diffStore ethdb.KeyValueStore
|
||||
diffStore ethdb.KeyValueStore
|
||||
stateStore ethdb.Database
|
||||
}
|
||||
|
||||
// HasAncient returns an error as we don't have a backing chain freezer.
|
||||
@ -170,6 +195,21 @@ func (db *nofreezedb) SetDiffStore(diff ethdb.KeyValueStore) {
|
||||
db.diffStore = diff
|
||||
}
|
||||
|
||||
func (db *nofreezedb) StateStore() ethdb.Database {
|
||||
return db.stateStore
|
||||
}
|
||||
|
||||
func (db *nofreezedb) SetStateStore(state ethdb.Database) {
|
||||
db.stateStore = state
|
||||
}
|
||||
|
||||
func (db *nofreezedb) StateStoreReader() ethdb.Reader {
|
||||
if db.stateStore != nil {
|
||||
return db.stateStore
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
|
||||
// Unlike other ancient-related methods, this method does not return
|
||||
// errNotSupported when invoked.
|
||||
@ -609,6 +649,11 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||
it := db.NewIterator(keyPrefix, keyStart)
|
||||
defer it.Release()
|
||||
|
||||
var trieIter ethdb.Iterator
|
||||
if db.StateStore() != nil {
|
||||
trieIter = db.StateStore().NewIterator(keyPrefix, nil)
|
||||
defer trieIter.Release()
|
||||
}
|
||||
var (
|
||||
count int64
|
||||
start = time.Now()
|
||||
@ -659,14 +704,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||
bodies.Add(size)
|
||||
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
|
||||
receipts.Add(size)
|
||||
case IsLegacyTrieNode(key, it.Value()):
|
||||
legacyTries.Add(size)
|
||||
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
|
||||
tds.Add(size)
|
||||
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
|
||||
numHashPairings.Add(size)
|
||||
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
|
||||
hashNumPairings.Add(size)
|
||||
case IsLegacyTrieNode(key, it.Value()):
|
||||
legacyTries.Add(size)
|
||||
case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
|
||||
stateLookups.Add(size)
|
||||
case IsAccountTrieNode(key):
|
||||
@ -728,6 +773,46 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||
logged = time.Now()
|
||||
}
|
||||
}
|
||||
// inspect separate trie db
|
||||
if trieIter != nil {
|
||||
count = 0
|
||||
logged = time.Now()
|
||||
for trieIter.Next() {
|
||||
var (
|
||||
key = trieIter.Key()
|
||||
value = trieIter.Value()
|
||||
size = common.StorageSize(len(key) + len(value))
|
||||
)
|
||||
|
||||
switch {
|
||||
case IsLegacyTrieNode(key, value):
|
||||
legacyTries.Add(size)
|
||||
case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
|
||||
stateLookups.Add(size)
|
||||
case IsAccountTrieNode(key):
|
||||
accountTries.Add(size)
|
||||
case IsStorageTrieNode(key):
|
||||
storageTries.Add(size)
|
||||
default:
|
||||
var accounted bool
|
||||
for _, meta := range [][]byte{
|
||||
fastTrieProgressKey, persistentStateIDKey, trieJournalKey} {
|
||||
if bytes.Equal(key, meta) {
|
||||
metadata.Add(size)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !accounted {
|
||||
unaccounted.Add(size)
|
||||
}
|
||||
}
|
||||
count++
|
||||
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
|
||||
log.Info("Inspecting separate state database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
logged = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
// Display the database statistic of key-value store.
|
||||
stats := [][]string{
|
||||
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
|
||||
@ -768,6 +853,28 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||
}
|
||||
total += ancient.size()
|
||||
}
|
||||
|
||||
// inspect ancient state in separate trie db if exist
|
||||
if trieIter != nil {
|
||||
stateAncients, err := inspectFreezers(db.StateStore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ancient := range stateAncients {
|
||||
for _, table := range ancient.sizes {
|
||||
if ancient.name == "chain" {
|
||||
break
|
||||
}
|
||||
stats = append(stats, []string{
|
||||
fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)),
|
||||
strings.Title(table.name),
|
||||
table.size.String(),
|
||||
fmt.Sprintf("%d", ancient.count()),
|
||||
})
|
||||
}
|
||||
total += ancient.size()
|
||||
}
|
||||
}
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
|
||||
table.SetFooter([]string{"", "Total", total.String(), " "})
|
||||
|
@ -213,6 +213,18 @@ func (t *table) SetDiffStore(diff ethdb.KeyValueStore) {
|
||||
panic("not implement")
|
||||
}
|
||||
|
||||
func (t *table) StateStore() ethdb.Database {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *table) SetStateStore(state ethdb.Database) {
|
||||
panic("not implement")
|
||||
}
|
||||
|
||||
func (t *table) StateStoreReader() ethdb.Reader {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
|
||||
func (t *table) NewBatchWithSize(size int) ethdb.Batch {
|
||||
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
|
||||
|
@ -158,13 +158,19 @@ func (p *Pruner) PruneAll(genesis *core.Genesis) error {
|
||||
}
|
||||
|
||||
func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
|
||||
var pruneDB ethdb.Database
|
||||
if maindb != nil && maindb.StateStore() != nil {
|
||||
pruneDB = maindb.StateStore()
|
||||
} else {
|
||||
pruneDB = maindb
|
||||
}
|
||||
var (
|
||||
count int
|
||||
size common.StorageSize
|
||||
pstart = time.Now()
|
||||
logged = time.Now()
|
||||
batch = maindb.NewBatch()
|
||||
iter = maindb.NewIterator(nil, nil)
|
||||
batch = pruneDB.NewBatch()
|
||||
iter = pruneDB.NewIterator(nil, nil)
|
||||
)
|
||||
start := time.Now()
|
||||
for iter.Next() {
|
||||
@ -194,7 +200,7 @@ func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
|
||||
batch.Reset()
|
||||
|
||||
iter.Release()
|
||||
iter = maindb.NewIterator(nil, key)
|
||||
iter = pruneDB.NewIterator(nil, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -218,7 +224,7 @@ func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
|
||||
end = nil
|
||||
}
|
||||
log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
|
||||
if err := maindb.Compact(start, end); err != nil {
|
||||
if err := pruneDB.Compact(start, end); err != nil {
|
||||
log.Error("Database compaction failed", "error", err)
|
||||
return err
|
||||
}
|
||||
@ -249,13 +255,19 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
|
||||
// that the false-positive is low enough(~0.05%). The probablity of the
|
||||
// dangling node is the state root is super low. So the dangling nodes in
|
||||
// theory will never ever be visited again.
|
||||
var pruneDB ethdb.Database
|
||||
if maindb != nil && maindb.StateStore() != nil {
|
||||
pruneDB = maindb.StateStore()
|
||||
} else {
|
||||
pruneDB = maindb
|
||||
}
|
||||
var (
|
||||
skipped, count int
|
||||
size common.StorageSize
|
||||
pstart = time.Now()
|
||||
logged = time.Now()
|
||||
batch = maindb.NewBatch()
|
||||
iter = maindb.NewIterator(nil, nil)
|
||||
batch = pruneDB.NewBatch()
|
||||
iter = pruneDB.NewIterator(nil, nil)
|
||||
)
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
@ -302,7 +314,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
|
||||
batch.Reset()
|
||||
|
||||
iter.Release()
|
||||
iter = maindb.NewIterator(nil, key)
|
||||
iter = pruneDB.NewIterator(nil, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -347,7 +359,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
|
||||
end = nil
|
||||
}
|
||||
log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
|
||||
if err := maindb.Compact(start, end); err != nil {
|
||||
if err := pruneDB.Compact(start, end); err != nil {
|
||||
log.Error("Database compaction failed", "error", err)
|
||||
return err
|
||||
}
|
||||
@ -585,10 +597,17 @@ func (p *Pruner) Prune(root common.Hash) error {
|
||||
// Use the bottom-most diff layer as the target
|
||||
root = layers[len(layers)-1].Root()
|
||||
}
|
||||
// if the separated state db has been set, use this db to prune data
|
||||
var trienodedb ethdb.Database
|
||||
if p.db != nil && p.db.StateStore() != nil {
|
||||
trienodedb = p.db.StateStore()
|
||||
} else {
|
||||
trienodedb = p.db
|
||||
}
|
||||
// Ensure the root is really present. The weak assumption
|
||||
// is the presence of root can indicate the presence of the
|
||||
// entire trie.
|
||||
if !rawdb.HasLegacyTrieNode(p.db, root) {
|
||||
if !rawdb.HasLegacyTrieNode(trienodedb, root) {
|
||||
// The special case is for clique based networks(goerli
|
||||
// and some other private networks), it's possible that two
|
||||
// consecutive blocks will have same root. In this case snapshot
|
||||
@ -602,7 +621,7 @@ func (p *Pruner) Prune(root common.Hash) error {
|
||||
// as the pruning target.
|
||||
var found bool
|
||||
for i := len(layers) - 2; i >= 2; i-- {
|
||||
if rawdb.HasLegacyTrieNode(p.db, layers[i].Root()) {
|
||||
if rawdb.HasLegacyTrieNode(trienodedb, layers[i].Root()) {
|
||||
root = layers[i].Root()
|
||||
found = true
|
||||
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
|
||||
@ -610,7 +629,7 @@ func (p *Pruner) Prune(root common.Hash) error {
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if blob := rawdb.ReadLegacyTrieNode(p.db, p.snaptree.DiskRoot()); len(blob) != 0 {
|
||||
if blob := rawdb.ReadLegacyTrieNode(trienodedb, p.snaptree.DiskRoot()); len(blob) != 0 {
|
||||
root = p.snaptree.DiskRoot()
|
||||
found = true
|
||||
log.Info("Selecting disk-layer as the pruning target", "root", root)
|
||||
|
@ -67,6 +67,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
||||
)
|
||||
|
||||
const (
|
||||
ChainDBNamespace = "eth/db/chaindata/"
|
||||
)
|
||||
|
||||
// Config contains the configuration options of the ETH protocol.
|
||||
// Deprecated: use ethconfig.Config instead.
|
||||
type Config = ethconfig.Config
|
||||
@ -134,7 +138,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
|
||||
// Assemble the Ethereum object
|
||||
chainDb, err := stack.OpenAndMergeDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles,
|
||||
config.DatabaseFreezer, config.DatabaseDiff, "eth/db/chaindata/", false, config.PersistDiff, config.PruneAncientData)
|
||||
config.DatabaseFreezer, config.DatabaseDiff, ChainDBNamespace, false, config.PersistDiff, config.PruneAncientData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -155,11 +155,16 @@ type AncientStater interface {
|
||||
AncientDatadir() (string, error)
|
||||
}
|
||||
|
||||
type StateStoreReader interface {
|
||||
StateStoreReader() Reader
|
||||
}
|
||||
|
||||
// Reader contains the methods required to read data from both key-value as well as
|
||||
// immutable ancient data.
|
||||
type Reader interface {
|
||||
KeyValueReader
|
||||
AncientReader
|
||||
StateStoreReader
|
||||
}
|
||||
|
||||
// Writer contains the methods required to write data to both key-value as well as
|
||||
@ -189,12 +194,18 @@ type DiffStore interface {
|
||||
SetDiffStore(diff KeyValueStore)
|
||||
}
|
||||
|
||||
type StateStore interface {
|
||||
StateStore() Database
|
||||
SetStateStore(state Database)
|
||||
}
|
||||
|
||||
// Database contains all the methods required by the high level database to not
|
||||
// only access the key-value data store but also the chain freezer.
|
||||
type Database interface {
|
||||
Reader
|
||||
Writer
|
||||
DiffStore
|
||||
StateStore
|
||||
Batcher
|
||||
Iteratee
|
||||
Stater
|
||||
|
@ -94,6 +94,18 @@ func (db *Database) SetDiffStore(diff ethdb.KeyValueStore) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (db *Database) StateStore() ethdb.Database {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (db *Database) SetStateStore(state ethdb.Database) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (db *Database) StateStoreReader() ethdb.Reader {
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *Database) ReadAncients(fn func(op ethdb.AncientReaderOp) error) (err error) {
|
||||
return fn(db)
|
||||
}
|
||||
|
@ -183,7 +183,6 @@ func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core
|
||||
func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := os.MkdirTemp("", "")
|
||||
|
||||
config := &node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
|
30
node/node.go
30
node/node.go
@ -784,10 +784,30 @@ func (n *Node) OpenAndMergeDatabase(name string, cache, handles int, freezer, di
|
||||
if persistDiff {
|
||||
chainDataHandles = handles * chainDataHandlesPercentage / 100
|
||||
}
|
||||
var statediskdb ethdb.Database
|
||||
var err error
|
||||
// Open the separated state database if the state directory exists
|
||||
if n.IsSeparatedDB() {
|
||||
// Allocate half of the handles and cache to this separate state data database
|
||||
statediskdb, err = n.OpenDatabaseWithFreezer(name+"/state", cache/2, chainDataHandles/2, "", "eth/db/statedata/", readonly, false, false, pruneAncientData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reduce the handles and cache to this separate database because it is not a complete database with no trie data storing in it.
|
||||
cache = int(float64(cache) * 0.6)
|
||||
chainDataHandles = int(float64(chainDataHandles) * 0.6)
|
||||
}
|
||||
|
||||
chainDB, err := n.OpenDatabaseWithFreezer(name, cache, chainDataHandles, freezer, namespace, readonly, false, false, pruneAncientData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if statediskdb != nil {
|
||||
chainDB.SetStateStore(statediskdb)
|
||||
}
|
||||
|
||||
if persistDiff {
|
||||
diffStore, err := n.OpenDiffDatabase(name, handles-chainDataHandles, diff, namespace, readonly)
|
||||
if err != nil {
|
||||
@ -835,6 +855,16 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient,
|
||||
return db, err
|
||||
}
|
||||
|
||||
// IsSeparatedDB check the state subdirectory of db, if subdirectory exists, return true
|
||||
func (n *Node) IsSeparatedDB() bool {
|
||||
separateDir := filepath.Join(n.ResolvePath("chaindata"), "state")
|
||||
fileInfo, err := os.Stat(separateDir)
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
return fileInfo.IsDir()
|
||||
}
|
||||
|
||||
func (n *Node) OpenDiffDatabase(name string, handles int, diff, namespace string, readonly bool) (*leveldb.Database, error) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
|
@ -91,6 +91,12 @@ type Database struct {
|
||||
// the legacy hash-based scheme is used by default.
|
||||
func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
|
||||
// Sanitize the config and use the default one if it's not specified.
|
||||
var triediskdb ethdb.Database
|
||||
if diskdb != nil && diskdb.StateStore() != nil {
|
||||
triediskdb = diskdb.StateStore()
|
||||
} else {
|
||||
triediskdb = diskdb
|
||||
}
|
||||
dbScheme := rawdb.ReadStateScheme(diskdb)
|
||||
if config == nil {
|
||||
if dbScheme == rawdb.PathScheme {
|
||||
@ -110,11 +116,11 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
|
||||
}
|
||||
var preimages *preimageStore
|
||||
if config.Preimages {
|
||||
preimages = newPreimageStore(diskdb)
|
||||
preimages = newPreimageStore(triediskdb)
|
||||
}
|
||||
db := &Database{
|
||||
config: config,
|
||||
diskdb: diskdb,
|
||||
diskdb: triediskdb,
|
||||
preimages: preimages,
|
||||
}
|
||||
/*
|
||||
@ -123,25 +129,25 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
|
||||
* 3. Last, use the default scheme, namely hash scheme
|
||||
*/
|
||||
if config.HashDB != nil {
|
||||
if rawdb.ReadStateScheme(diskdb) == rawdb.PathScheme {
|
||||
if rawdb.ReadStateScheme(triediskdb) == rawdb.PathScheme {
|
||||
log.Warn("incompatible state scheme", "old", rawdb.PathScheme, "new", rawdb.HashScheme)
|
||||
}
|
||||
db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{})
|
||||
db.backend = hashdb.New(triediskdb, config.HashDB, mptResolver{})
|
||||
} else if config.PathDB != nil {
|
||||
if rawdb.ReadStateScheme(diskdb) == rawdb.HashScheme {
|
||||
if rawdb.ReadStateScheme(triediskdb) == rawdb.HashScheme {
|
||||
log.Warn("incompatible state scheme", "old", rawdb.HashScheme, "new", rawdb.PathScheme)
|
||||
}
|
||||
db.backend = pathdb.New(diskdb, config.PathDB)
|
||||
db.backend = pathdb.New(triediskdb, config.PathDB)
|
||||
} else if strings.Compare(dbScheme, rawdb.PathScheme) == 0 {
|
||||
if config.PathDB == nil {
|
||||
config.PathDB = pathdb.Defaults
|
||||
}
|
||||
db.backend = pathdb.New(diskdb, config.PathDB)
|
||||
db.backend = pathdb.New(triediskdb, config.PathDB)
|
||||
} else {
|
||||
if config.HashDB == nil {
|
||||
config.HashDB = hashdb.Defaults
|
||||
}
|
||||
db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{})
|
||||
db.backend = hashdb.New(triediskdb, config.HashDB, mptResolver{})
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user