fix: state history hasn't write

This commit is contained in:
Fynn 2023-09-27 15:01:49 +08:00
parent b8bad314ed
commit 4259f4c1f8
7 changed files with 150 additions and 71 deletions

@ -74,7 +74,6 @@ Remove blockchain and state databases`,
// dbMigrateFreezerCmd, // dbMigrateFreezerCmd,
dbCheckStateContentCmd, dbCheckStateContentCmd,
dbHbss2PbssCmd, dbHbss2PbssCmd,
dbPruneHashTrieCmd,
dbTrieGetCmd, dbTrieGetCmd,
dbTrieDeleteCmd, dbTrieDeleteCmd,
}, },
@ -106,6 +105,8 @@ a data corruption.`,
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.ForceFlag,
utils.AncientFlag,
}, },
Usage: "Convert Hash-Base to Path-Base trie node.", Usage: "Convert Hash-Base to Path-Base trie node.",
Description: `This command iterates the entire trie node database and convert the hash-base node to path-base node.`, Description: `This command iterates the entire trie node database and convert the hash-base node to path-base node.`,
@ -136,17 +137,6 @@ a data corruption.`,
}, },
Description: "This command delete the specify trie node from the database.", Description: "This command delete the specify trie node from the database.",
} }
dbPruneHashTrieCmd = &cli.Command{
Action: pruneHashTrie,
Name: "prune-hash-trie",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
},
Usage: "[Caution]Prune all the hash trie node in diskdb",
Description: `This command iterates the entrie kv in leveldb and delete all the hash trie node.`,
}
dbStatCmd = &cli.Command{ dbStatCmd = &cli.Command{
Action: dbStats, Action: dbStats,
Name: "stats", Name: "stats",
@ -941,6 +931,8 @@ func hbss2pbss(ctx *cli.Context) error {
jobnum = 1000 jobnum = 1000
} }
force := ctx.Bool(utils.ForceFlag.Name)
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
@ -948,6 +940,9 @@ func hbss2pbss(ctx *cli.Context) error {
db.Sync() db.Sync()
defer db.Close() defer db.Close()
// convert hbss trie node to pbss trie node
lastStateID := rawdb.ReadPersistentStateID(db)
if lastStateID == 0 || force {
config := trie.HashDefaults config := trie.HashDefaults
triedb := trie.NewDatabase(db, config) triedb := trie.NewDatabase(db, config)
triedb.Cap(0) triedb.Cap(0)
@ -993,20 +988,26 @@ func hbss2pbss(ctx *cli.Context) error {
return err return err
} }
h2p.Run() h2p.Run()
} else {
return nil log.Info("Convert hbss to pbss success. Nothing to do.")
}
func pruneHashTrie(ctx *cli.Context) error {
if ctx.NArg() != 0 {
return fmt.Errorf("required none argument")
} }
stack, _ := makeConfigNode(ctx) // repair state ancient offset
defer stack.Close() lastStateID = rawdb.ReadPersistentStateID(db)
if lastStateID == 0 {
db := utils.MakeChainDatabase(ctx, stack, false, false) log.Error("Convert hbss to pbss trie node error. The last state id is still 0")
defer db.Close() }
ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
return rawdb.PruneHashTrieNodeInDataBase(db) err = rawdb.ResetStateFreezerTableOffset(ancient, lastStateID)
if err != nil {
log.Error("Reset state freezer table offset failed", "error", err)
return err
}
// prune hbss trie node
err = rawdb.PruneHashTrieNodeInDataBase(db)
if err != nil {
log.Error("Prune Hash trie node in database failed", "error", err)
return err
}
return nil
} }

@ -207,7 +207,12 @@ var (
Usage: "Exits after block synchronisation completes", Usage: "Exits after block synchronisation completes",
Category: flags.EthCategory, Category: flags.EthCategory,
} }
// hbss2pbss command options
ForceFlag = &cli.BoolFlag{
Name: "force",
Usage: "Force convert hbss trie node to pbss trie node. Ingore any metadata",
Value: false,
}
// Dump command options. // Dump command options.
IterativeOutputFlag = &cli.BoolFlag{ IterativeOutputFlag = &cli.BoolFlag{
Name: "iterative", Name: "iterative",

@ -256,11 +256,31 @@ func ReadStateHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []by
// history starts from one(zero for initial state). // history starts from one(zero for initial state).
func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIndex []byte, storageIndex []byte, accounts []byte, storages []byte) { func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIndex []byte, storageIndex []byte, accounts []byte, storages []byte) {
db.ModifyAncients(func(op ethdb.AncientWriteOp) error { db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
op.AppendRaw(stateHistoryMeta, id-1, meta) err := op.AppendRaw(stateHistoryMeta, id-1, meta)
op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex) if err != nil {
op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex) log.Error("WriteStateHistory failed", "err", err)
op.AppendRaw(stateHistoryAccountData, id-1, accounts) return err
op.AppendRaw(stateHistoryStorageData, id-1, storages) }
err = op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
err = op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
err = op.AppendRaw(stateHistoryAccountData, id-1, accounts)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
err = op.AppendRaw(stateHistoryStorageData, id-1, storages)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
return nil return nil
}) })
} }

@ -18,9 +18,12 @@ package rawdb
import ( import (
"fmt" "fmt"
"path/filepath"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
) )
type tableSize struct { type tableSize struct {
@ -144,3 +147,23 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
table.dumpIndexStdout(start, end) table.dumpIndexStdout(start, end)
return nil return nil
} }
func ResetStateFreezerTableOffset(ancient string, virtualTail uint64) error {
path, tables := filepath.Join(ancient, stateFreezerName), stateFreezerNoSnappy
for name, disableSnappy := range tables {
log.Info("Handle table", "name", name, "disableSnappy", disableSnappy)
table, err := newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, false)
if err != nil {
log.Error("New table failed", "error", err)
return err
}
// Reset the metadata of the freezer table
err = table.ResetItemsOffset(virtualTail)
if err != nil {
log.Error("Reset items offset of the table", "name", name, "error", err)
return err
}
}
return nil
}

@ -593,13 +593,13 @@ func PruneHashTrieNodeInDataBase(db ethdb.Database) error {
db.Delete(key) db.Delete(key)
total_num++ total_num++
if total_num%100000 == 0 { if total_num%100000 == 0 {
log.Info("Pruning ", "Complete progress: ", total_num, "hash-base trie nodes") log.Info("Pruning hash-base state trie nodes", "Complete progress: ", total_num)
} }
default: default:
continue continue
} }
} }
log.Info("Pruning ", "Complete progress", total_num, "hash-base trie nodes") log.Info("Pruning hash-base state trie nodes", "Complete progress", total_num)
return nil return nil
} }

@ -949,3 +949,33 @@ func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
} }
fmt.Fprintf(w, "|--------------------------|\n") fmt.Fprintf(w, "|--------------------------|\n")
} }
func (t *freezerTable) ResetItemsOffset(virtualTail uint64) error {
stat, err := t.index.Stat()
if err != nil {
return err
}
if stat.Size() == 0 {
return fmt.Errorf("Stat size is zero when ResetVirtualTail.")
}
var firstIndex indexEntry
buffer := make([]byte, indexEntrySize)
t.index.ReadAt(buffer, 0)
firstIndex.unmarshalBinary(buffer)
firstIndex.offset = uint32(virtualTail)
t.index.WriteAt(firstIndex.append(nil), 0)
var firstIndex2 indexEntry
buffer2 := make([]byte, indexEntrySize)
t.index.ReadAt(buffer2, 0)
firstIndex2.unmarshalBinary(buffer2)
log.Info("Reset Index", "filenum", t.index.Name(), "offset", firstIndex2.offset)
return nil
}

@ -83,7 +83,7 @@ func (h2p *Hbss2Pbss) Run() {
h2p.ConcurrentTraversal(h2p.trie, h2p.root, []byte{}) h2p.ConcurrentTraversal(h2p.trie, h2p.root, []byte{})
h2p.wg.Wait() h2p.wg.Wait()
log.Info("Total complete: %v, go routines Num: %v, h2p concurrentQueue: %v\n", h2p.totalNum, runtime.NumGoroutine(), len(h2p.concurrentQueue)) log.Info("Total", "complete", h2p.totalNum, "go routines Num", runtime.NumGoroutine, "h2p concurrentQueue", len(h2p.concurrentQueue))
rawdb.WritePersistentStateID(h2p.db.diskdb, h2p.blocknum) rawdb.WritePersistentStateID(h2p.db.diskdb, h2p.blocknum)
rawdb.WriteStateID(h2p.db.diskdb, h2p.stateRootHash, h2p.blocknum) rawdb.WriteStateID(h2p.db.diskdb, h2p.stateRootHash, h2p.blocknum)