fix: state history hasn't write

This commit is contained in:
Fynn 2023-09-27 15:01:49 +08:00
parent b8bad314ed
commit 4259f4c1f8
7 changed files with 150 additions and 71 deletions

@ -74,7 +74,6 @@ Remove blockchain and state databases`,
// dbMigrateFreezerCmd,
dbCheckStateContentCmd,
dbHbss2PbssCmd,
dbPruneHashTrieCmd,
dbTrieGetCmd,
dbTrieDeleteCmd,
},
@ -106,6 +105,8 @@ a data corruption.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.ForceFlag,
utils.AncientFlag,
},
Usage: "Convert Hash-Base to Path-Base trie node.",
Description: `This command iterates the entire trie node database and convert the hash-base node to path-base node.`,
@ -136,17 +137,6 @@ a data corruption.`,
},
Description: "This command delete the specify trie node from the database.",
}
dbPruneHashTrieCmd = &cli.Command{
Action: pruneHashTrie,
Name: "prune-hash-trie",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
},
Usage: "[Caution]Prune all the hash trie node in diskdb",
Description: `This command iterates the entrie kv in leveldb and delete all the hash trie node.`,
}
dbStatCmd = &cli.Command{
Action: dbStats,
Name: "stats",
@ -941,6 +931,8 @@ func hbss2pbss(ctx *cli.Context) error {
jobnum = 1000
}
force := ctx.Bool(utils.ForceFlag.Name)
stack, _ := makeConfigNode(ctx)
defer stack.Close()
@ -948,65 +940,74 @@ func hbss2pbss(ctx *cli.Context) error {
db.Sync()
defer db.Close()
config := trie.HashDefaults
triedb := trie.NewDatabase(db, config)
triedb.Cap(0)
log.Info("hbss2pbss triedb", "scheme", triedb.Scheme())
defer triedb.Close()
// convert hbss trie node to pbss trie node
lastStateID := rawdb.ReadPersistentStateID(db)
if lastStateID == 0 || force {
config := trie.HashDefaults
triedb := trie.NewDatabase(db, config)
triedb.Cap(0)
log.Info("hbss2pbss triedb", "scheme", triedb.Scheme())
defer triedb.Close()
headerHash := rawdb.ReadHeadHeaderHash(db)
blockNumber := rawdb.ReadHeaderNumber(db, headerHash)
if blockNumber == nil {
log.Error("read header number failed.")
return fmt.Errorf("read header number failed")
}
log.Info("hbss2pbss converting", "HeaderHash: ", headerHash.String(), ", blockNumber: ", *blockNumber)
var headerBlockHash common.Hash
var trieRootHash common.Hash
if *blockNumber != math.MaxUint64 {
headerBlockHash = rawdb.ReadCanonicalHash(db, *blockNumber)
if headerBlockHash == (common.Hash{}) {
return fmt.Errorf("ReadHeadBlockHash empty hash")
headerHash := rawdb.ReadHeadHeaderHash(db)
blockNumber := rawdb.ReadHeaderNumber(db, headerHash)
if blockNumber == nil {
log.Error("read header number failed.")
return fmt.Errorf("read header number failed")
}
blockHeader := rawdb.ReadHeader(db, headerBlockHash, *blockNumber)
trieRootHash = blockHeader.Root
fmt.Println("Canonical Hash: ", headerBlockHash.String(), ", TrieRootHash: ", trieRootHash.String())
}
if (trieRootHash == common.Hash{}) {
log.Error("Empty root hash")
return fmt.Errorf("Empty root hash.")
log.Info("hbss2pbss converting", "HeaderHash: ", headerHash.String(), ", blockNumber: ", *blockNumber)
var headerBlockHash common.Hash
var trieRootHash common.Hash
if *blockNumber != math.MaxUint64 {
headerBlockHash = rawdb.ReadCanonicalHash(db, *blockNumber)
if headerBlockHash == (common.Hash{}) {
return fmt.Errorf("ReadHeadBlockHash empty hash")
}
blockHeader := rawdb.ReadHeader(db, headerBlockHash, *blockNumber)
trieRootHash = blockHeader.Root
fmt.Println("Canonical Hash: ", headerBlockHash.String(), ", TrieRootHash: ", trieRootHash.String())
}
if (trieRootHash == common.Hash{}) {
log.Error("Empty root hash")
return fmt.Errorf("Empty root hash.")
}
id := trie.StateTrieID(trieRootHash)
theTrie, err := trie.New(id, triedb)
if err != nil {
log.Error("fail to new trie tree", "err", err, "rootHash", err, trieRootHash.String())
return err
}
h2p, err := trie.NewHbss2Pbss(theTrie, triedb, trieRootHash, *blockNumber, jobnum)
if err != nil {
log.Error("fail to new hash2pbss", "err", err, "rootHash", err, trieRootHash.String())
return err
}
h2p.Run()
} else {
log.Info("Convert hbss to pbss success. Nothing to do.")
}
id := trie.StateTrieID(trieRootHash)
theTrie, err := trie.New(id, triedb)
// repair state ancient offset
lastStateID = rawdb.ReadPersistentStateID(db)
if lastStateID == 0 {
log.Error("Convert hbss to pbss trie node error. The last state id is still 0")
}
ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
err = rawdb.ResetStateFreezerTableOffset(ancient, lastStateID)
if err != nil {
log.Error("fail to new trie tree", "err", err, "rootHash", err, trieRootHash.String())
log.Error("Reset state freezer table offset failed", "error", err)
return err
}
h2p, err := trie.NewHbss2Pbss(theTrie, triedb, trieRootHash, *blockNumber, jobnum)
// prune hbss trie node
err = rawdb.PruneHashTrieNodeInDataBase(db)
if err != nil {
log.Error("fail to new hash2pbss", "err", err, "rootHash", err, trieRootHash.String())
log.Error("Prune Hash trie node in database failed", "error", err)
return err
}
h2p.Run()
return nil
}
func pruneHashTrie(ctx *cli.Context) error {
if ctx.NArg() != 0 {
return fmt.Errorf("required none argument")
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false, false)
defer db.Close()
return rawdb.PruneHashTrieNodeInDataBase(db)
}

@ -207,7 +207,12 @@ var (
Usage: "Exits after block synchronisation completes",
Category: flags.EthCategory,
}
// hbss2pbss command options
ForceFlag = &cli.BoolFlag{
Name: "force",
Usage: "Force convert hbss trie node to pbss trie node. Ingore any metadata",
Value: false,
}
// Dump command options.
IterativeOutputFlag = &cli.BoolFlag{
Name: "iterative",

@ -256,11 +256,31 @@ func ReadStateHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []by
// history starts from one(zero for initial state).
func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIndex []byte, storageIndex []byte, accounts []byte, storages []byte) {
db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
op.AppendRaw(stateHistoryMeta, id-1, meta)
op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex)
op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex)
op.AppendRaw(stateHistoryAccountData, id-1, accounts)
op.AppendRaw(stateHistoryStorageData, id-1, storages)
err := op.AppendRaw(stateHistoryMeta, id-1, meta)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
err = op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
err = op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
err = op.AppendRaw(stateHistoryAccountData, id-1, accounts)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
err = op.AppendRaw(stateHistoryStorageData, id-1, storages)
if err != nil {
log.Error("WriteStateHistory failed", "err", err)
return err
}
return nil
})
}

@ -18,9 +18,12 @@ package rawdb
import (
"fmt"
"path/filepath"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
type tableSize struct {
@ -144,3 +147,23 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
table.dumpIndexStdout(start, end)
return nil
}
func ResetStateFreezerTableOffset(ancient string, virtualTail uint64) error {
path, tables := filepath.Join(ancient, stateFreezerName), stateFreezerNoSnappy
for name, disableSnappy := range tables {
log.Info("Handle table", "name", name, "disableSnappy", disableSnappy)
table, err := newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, false)
if err != nil {
log.Error("New table failed", "error", err)
return err
}
// Reset the metadata of the freezer table
err = table.ResetItemsOffset(virtualTail)
if err != nil {
log.Error("Reset items offset of the table", "name", name, "error", err)
return err
}
}
return nil
}

@ -593,13 +593,13 @@ func PruneHashTrieNodeInDataBase(db ethdb.Database) error {
db.Delete(key)
total_num++
if total_num%100000 == 0 {
log.Info("Pruning ", "Complete progress: ", total_num, "hash-base trie nodes")
log.Info("Pruning hash-base state trie nodes", "Complete progress: ", total_num)
}
default:
continue
}
}
log.Info("Pruning ", "Complete progress", total_num, "hash-base trie nodes")
log.Info("Pruning hash-base state trie nodes", "Complete progress", total_num)
return nil
}

@ -949,3 +949,33 @@ func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
}
fmt.Fprintf(w, "|--------------------------|\n")
}
func (t *freezerTable) ResetItemsOffset(virtualTail uint64) error {
stat, err := t.index.Stat()
if err != nil {
return err
}
if stat.Size() == 0 {
return fmt.Errorf("Stat size is zero when ResetVirtualTail.")
}
var firstIndex indexEntry
buffer := make([]byte, indexEntrySize)
t.index.ReadAt(buffer, 0)
firstIndex.unmarshalBinary(buffer)
firstIndex.offset = uint32(virtualTail)
t.index.WriteAt(firstIndex.append(nil), 0)
var firstIndex2 indexEntry
buffer2 := make([]byte, indexEntrySize)
t.index.ReadAt(buffer2, 0)
firstIndex2.unmarshalBinary(buffer2)
log.Info("Reset Index", "filenum", t.index.Name(), "offset", firstIndex2.offset)
return nil
}

@ -83,7 +83,7 @@ func (h2p *Hbss2Pbss) Run() {
h2p.ConcurrentTraversal(h2p.trie, h2p.root, []byte{})
h2p.wg.Wait()
log.Info("Total complete: %v, go routines Num: %v, h2p concurrentQueue: %v\n", h2p.totalNum, runtime.NumGoroutine(), len(h2p.concurrentQueue))
log.Info("Total", "complete", h2p.totalNum, "go routines Num", runtime.NumGoroutine, "h2p concurrentQueue", len(h2p.concurrentQueue))
rawdb.WritePersistentStateID(h2p.db.diskdb, h2p.blocknum)
rawdb.WriteStateID(h2p.db.diskdb, h2p.stateRootHash, h2p.blocknum)