fix: keep 9W blocks in ancient db when prune block (#2481)

This commit is contained in:
Chris Li 2024-05-29 15:11:52 +08:00 committed by GitHub
parent 05543e558d
commit 63e7eac394
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 164 additions and 89 deletions

@ -75,7 +75,7 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient
if err != nil {
return nil, err
}
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData)
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData, false)
if err != nil {
kvdb.Close()
return nil, err
@ -178,11 +178,10 @@ func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemai
// Force run a freeze cycle
type freezer interface {
Freeze() error
Freeze(threshold uint64) error
Ancients() (uint64, error)
}
blockchain.SetFinalized(blocks[len(blocks)-1].Header())
db.(freezer).Freeze()
db.(freezer).Freeze(10)
frozen, err := db.Ancients()
//make sure there're frozen items

@ -43,9 +43,11 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
cli "github.com/urfave/cli/v2"
)
@ -245,7 +247,16 @@ func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) {
NoBuild: true,
AsyncBuild: false,
}
snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, nil), headBlock.Root(), TriesInMemory, false)
dbScheme := rawdb.ReadStateScheme(chaindb)
var config *triedb.Config
if dbScheme == rawdb.PathScheme {
config = &triedb.Config{
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
}
} else if dbScheme == rawdb.HashScheme {
config = triedb.HashDefaults
}
snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, config), headBlock.Root(), TriesInMemory, false)
if err != nil {
log.Error("snaptree error", "err", err)
return nil, err // The relevant snapshot(s) might not exist
@ -333,6 +344,9 @@ func pruneBlock(ctx *cli.Context) error {
stack, config = makeConfigNode(ctx)
defer stack.Close()
blockAmountReserved = ctx.Uint64(utils.BlockAmountReserved.Name)
if blockAmountReserved < params.FullImmutabilityThreshold {
return fmt.Errorf("block-amount-reserved must be greater than or equal to %d", params.FullImmutabilityThreshold)
}
chaindb, err = accessDb(ctx, stack)
if err != nil {
return err

@ -1082,6 +1082,7 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Name: "block-amount-reserved",
Usage: "Sets the expected remained amount of blocks for offline block prune",
Category: flags.BlockHistoryCategory,
Value: params.FullImmutabilityThreshold,
}
CheckSnapshotWithMPT = &cli.BoolFlag{

@ -163,7 +163,7 @@ func TestHistoryImportAndExport(t *testing.T) {
// Now import Era.
freezer := t.TempDir()
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false)
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false)
if err != nil {
panic(err)
}

@ -1832,14 +1832,10 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
}
// Force run a freeze cycle
type freezer interface {
Freeze() error
Freeze(threshold uint64) error
Ancients() (uint64, error)
}
if tt.freezeThreshold < uint64(tt.canonicalBlocks) {
final := uint64(tt.canonicalBlocks) - tt.freezeThreshold
chain.SetFinalized(canonblocks[int(final)-1].Header())
}
db.(freezer).Freeze()
db.(freezer).Freeze(tt.freezeThreshold)
// Set the simulated pivot block
if tt.pivotBlock != nil {

@ -2045,14 +2045,10 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
// Force run a freeze cycle
type freezer interface {
Freeze() error
Freeze(threshold uint64) error
Ancients() (uint64, error)
}
if tt.freezeThreshold < uint64(tt.canonicalBlocks) {
final := uint64(tt.canonicalBlocks) - tt.freezeThreshold
chain.SetFinalized(canonblocks[int(final)-1].Header())
}
db.(freezer).Freeze()
db.(freezer).Freeze(tt.freezeThreshold)
// Set the simulated pivot block
if tt.pivotBlock != nil {

@ -974,7 +974,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
// Freezer style fast import the chain.
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
@ -1069,7 +1069,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// makeDb creates a db instance for testing.
makeDb := func() ethdb.Database {
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
@ -1957,7 +1957,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
// Import the shared chain and the original canonical one
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
defer db.Close()
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
@ -2026,7 +2026,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
// Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
@ -2097,7 +2097,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) {
}
// Set up a BlockChain that uses the ancient store.
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
@ -2167,7 +2167,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
})
// Import the canonical chain
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
defer diskdb.Close()
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
@ -2384,7 +2384,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
b.OffsetTime(-9) // A higher difficulty
})
// Import the shared chain and the original canonical one
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
@ -2555,7 +2555,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
}
})
// Import the shared chain and the original canonical one
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
@ -3858,7 +3858,7 @@ func testSetCanonical(t *testing.T, scheme string) {
}
gen.AddTx(tx)
})
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
defer diskdb.Close()
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
@ -4483,7 +4483,7 @@ func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint
func TestParliaBlobFeeReward(t *testing.T) {
// Have N headers in the freezer
frdir := t.TempDir()
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false)
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create database with ancient backend")
}

@ -518,7 +518,7 @@ func checkBlobSidecarsRLP(have, want types.BlobSidecars) error {
func TestAncientStorage(t *testing.T) {
// Freezer style fast import the chain.
frdir := t.TempDir()
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create database with ancient backend")
}
@ -657,7 +657,7 @@ func TestHashesInRange(t *testing.T) {
func BenchmarkWriteAncientBlocks(b *testing.B) {
// Open freezer database.
frdir := b.TempDir()
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
if err != nil {
b.Fatalf("failed to create database with ancient backend")
}
@ -1001,7 +1001,7 @@ func TestHeadersRLPStorage(t *testing.T) {
// Have N headers in the freezer
frdir := t.TempDir()
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create database with ancient backend")
}

@ -24,12 +24,12 @@ import (
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
const (
@ -51,25 +51,31 @@ var (
// The background thread will keep moving ancient chain segments from key-value
// database to flat files for saving space on live database.
type chainFreezer struct {
threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
*Freezer
quit chan struct{}
wg sync.WaitGroup
trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
freezeEnv atomic.Value
multiDatabase bool
}
// newChainFreezer initializes the freezer for ancient chain data.
func newChainFreezer(datadir string, namespace string, readonly bool, offset uint64) (*chainFreezer, error) {
func newChainFreezer(datadir string, namespace string, readonly bool, offset uint64, multiDatabase bool) (*chainFreezer, error) {
freezer, err := NewChainFreezer(datadir, namespace, readonly, offset)
if err != nil {
return nil, err
}
return &chainFreezer{
cf := chainFreezer{
Freezer: freezer,
quit: make(chan struct{}),
trigger: make(chan chan struct{}),
}, nil
}
cf.threshold.Store(params.FullImmutabilityThreshold)
return &cf, nil
}
// Close closes the chain freezer instance and terminates the background thread.
@ -185,13 +191,26 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
continue
}
threshold, err := f.freezeThreshold(nfdb)
var (
frozen uint64
threshold uint64
first uint64 // the first block to freeze
last uint64 // the last block to freeze
hash common.Hash
number *uint64
head *types.Header
)
// use finalized block as the chain freeze indicator was used for multiDatabase feature, if multiDatabase is false, keep 9W blocks in db
if f.multiDatabase {
threshold, err = f.freezeThreshold(nfdb)
if err != nil {
backoff = true
log.Debug("Current full block not old enough to freeze", "err", err)
continue
}
frozen := f.frozen.Load()
frozen = f.frozen.Load()
// Short circuit if the blocks below threshold are already frozen.
if frozen != 0 && frozen-1 >= threshold {
@ -199,15 +218,74 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
log.Debug("Ancient blocks frozen already", "threshold", threshold, "frozen", frozen)
continue
}
// Seems we have data ready to be frozen, process in usable batches
var (
start = time.Now()
first = frozen // the first block to freeze
last = threshold // the last block to freeze
)
hash = ReadHeadBlockHash(nfdb)
if hash == (common.Hash{}) {
log.Debug("Current full block hash unavailable") // new chain, empty database
backoff = true
continue
}
number = ReadHeaderNumber(nfdb, hash)
if number == nil {
log.Error("Current full block number unavailable", "hash", hash)
backoff = true
continue
}
head = ReadHeader(nfdb, hash, *number)
if head == nil {
log.Error("Current full block unavailable", "number", *number, "hash", hash)
backoff = true
continue
}
first = frozen
last = threshold
if last-first+1 > freezerBatchLimit {
last = freezerBatchLimit + first - 1
}
} else {
// Retrieve the freezing threshold.
hash = ReadHeadBlockHash(nfdb)
if hash == (common.Hash{}) {
log.Debug("Current full block hash unavailable") // new chain, empty database
backoff = true
continue
}
number = ReadHeaderNumber(nfdb, hash)
threshold = f.threshold.Load()
frozen = f.frozen.Load()
switch {
case number == nil:
log.Error("Current full block number unavailable", "hash", hash)
backoff = true
continue
case *number < threshold:
log.Debug("Current full block not old enough to freeze", "number", *number, "hash", hash, "delay", threshold)
backoff = true
continue
case *number-threshold <= frozen:
log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", frozen)
backoff = true
continue
}
head = ReadHeader(nfdb, hash, *number)
if head == nil {
log.Error("Current full block unavailable", "number", *number, "hash", hash)
backoff = true
continue
}
first, _ = f.Ancients()
last = *number - threshold
if last-first > freezerBatchLimit {
last = first + freezerBatchLimit
}
}
// Seems we have data ready to be frozen, process in usable batches
var (
start = time.Now()
)
ancients, err := f.freezeRangeWithBlobs(nfdb, first, last)
if err != nil {
@ -295,24 +373,6 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
log.Debug("Deep froze chain segment", context...)
env, _ := f.freezeEnv.Load().(*ethdb.FreezerEnv)
hash := ReadHeadBlockHash(nfdb)
if hash == (common.Hash{}) {
log.Debug("Current full block hash unavailable") // new chain, empty database
backoff = true
continue
}
number := ReadHeaderNumber(nfdb, hash)
if number == nil {
log.Error("Current full block number unavailable", "hash", hash)
backoff = true
continue
}
head := ReadHeader(nfdb, hash, *number)
if head == nil {
log.Error("Current full block unavailable", "number", *number, "hash", hash)
backoff = true
continue
}
// try prune blob data after cancun fork
if isCancun(env, head.Number, head.Time) {
f.tryPruneBlobAncientTable(env, *number)

@ -145,10 +145,15 @@ func (frdb *freezerdb) HasSeparateBlockStore() bool {
// Freeze is a helper method used for external testing to trigger and block until
// a freeze cycle completes, without having to sleep for a minute to trigger the
// automatic background run.
func (frdb *freezerdb) Freeze() error {
func (frdb *freezerdb) Freeze(threshold uint64) error {
if frdb.AncientStore.(*chainFreezer).readonly {
return errReadOnly
}
// Set the freezer threshold to a temporary value
defer func(old uint64) {
frdb.AncientStore.(*chainFreezer).threshold.Store(old)
}(frdb.AncientStore.(*chainFreezer).threshold.Load())
frdb.AncientStore.(*chainFreezer).threshold.Store(threshold)
// Trigger a freeze cycle and block until it's done
trigger := make(chan struct{}, 1)
frdb.AncientStore.(*chainFreezer).trigger <- trigger
@ -366,7 +371,7 @@ func resolveChainFreezerDir(ancient string) string {
// value data store with a freezer moving immutable chain segments into cold
// storage. The passed ancient indicates the path of root ancient directory
// where the chain freezer can be opened.
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) {
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData, multiDatabase bool) (ethdb.Database, error) {
var offset uint64
// The offset of ancientDB should be handled differently in different scenarios.
if isLastOffset {
@ -402,7 +407,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
}
// Create the idle freezer instance
frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, offset)
frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, offset, multiDatabase)
if err != nil {
printChainMetadata(db)
return nil, err
@ -583,6 +588,8 @@ type OpenOptions struct {
// Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
// a crash is not important. This option should typically be used in tests.
Ephemeral bool
MultiDataBase bool
}
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
@ -633,7 +640,7 @@ func Open(o OpenOptions) (ethdb.Database, error) {
if len(o.AncientsDirectory) == 0 {
return kvdb, nil
}
frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, o.DisableFreeze, o.IsLastOffset, o.PruneAncientData)
frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, o.DisableFreeze, o.IsLastOffset, o.PruneAncientData, o.MultiDataBase)
if err != nil {
kvdb.Close()
return nil, err
@ -777,7 +784,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
trieIter = db.StateStore().NewIterator(keyPrefix, nil)
defer trieIter.Release()
}
if db.HasSeparateBlockStore() {
blockIter = db.BlockStore().NewIterator(keyPrefix, nil)
defer blockIter.Release()

@ -382,7 +382,7 @@ func (p *BlockPruner) backUpOldDb(name string, cache, handles int, namespace str
log.Info("chainDB opened successfully")
// Get the number of items in old ancient db.
itemsOfAncient, err := chainDb.ItemAmountInAncient()
itemsOfAncient, err := chainDb.BlockStore().ItemAmountInAncient()
log.Info("the number of items in ancientDB is ", "itemsOfAncient", itemsOfAncient)
// If we can't access the freezer or it's empty, abort.

@ -212,7 +212,7 @@ func TestTxIndexer(t *testing.T) {
}
for _, c := range cases {
frdir := t.TempDir()
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false)
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
// Index the initial blocks from ancient store

@ -60,7 +60,7 @@ func newTester(t *testing.T) *downloadTester {
// newTester creates a new downloader test mocker.
func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
freezer := t.TempDir()
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false)
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false)
if err != nil {
panic(err)
}

@ -258,7 +258,7 @@ func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint
func newTestParliaHandlerAfterCancun(t *testing.T, config *params.ChainConfig, mode downloader.SyncMode, preCancunBlks, postCancunBlks uint64) *testHandler {
// Have N headers in the freezer
frdir := t.TempDir()
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false)
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
if err != nil {
t.Fatalf("failed to create database with ancient backend")
}

@ -779,6 +779,7 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r
Cache: cache,
Handles: handles,
ReadOnly: readonly,
MultiDataBase: n.CheckIfMultiDataBase(),
})
}

@ -98,7 +98,7 @@ type tester struct {
func newTester(t *testing.T, historyLimit uint64) *tester {
var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
db = New(disk, &Config{
StateHistory: historyLimit,
CleanCacheSize: 256 * 1024,

@ -152,12 +152,13 @@ func (kr *JournalKVReader) Close() {
}
func newJournalWriter(file string, db ethdb.Database, journalType JournalType) JournalWriter {
log.Info("New journal writer", "path", file, "journalType", journalType)
if journalType == JournalKVType {
log.Info("New journal writer for journal kv")
return &JournalKVWriter{
diskdb: db,
}
} else {
log.Info("New journal writer for journal file", "path", file)
fd, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return nil
@ -169,8 +170,8 @@ func newJournalWriter(file string, db ethdb.Database, journalType JournalType) J
}
func newJournalReader(file string, db ethdb.Database, journalType JournalType) (JournalReader, error) {
log.Info("New journal reader", "path", file, "journalType", journalType)
if journalType == JournalKVType {
log.Info("New journal reader for journal kv")
journal := rawdb.ReadTrieJournal(db)
if len(journal) == 0 {
return nil, errMissJournal
@ -179,6 +180,7 @@ func newJournalReader(file string, db ethdb.Database, journalType JournalType) (
journalBuf: bytes.NewBuffer(journal),
}, nil
} else {
log.Info("New journal reader for journal file", "path", file)
fd, err := os.Open(file)
if errors.Is(err, fs.ErrNotExist) {
return nil, errMissJournal