all: remove forkchoicer and reorgNeeded (#29179)

This PR changes how sidechains are handled. 

Before the merge, it was possible to import a chain with lower td and not set it as canonical. After the merge, we expect every chain that we get via InsertChain to be canonical. Non-canonical blocks can still be inserted
with InsertBlockWIthoutSetHead.

If during the InsertChain, the existing chain is not canonical anymore, we mark it as a sidechain and send the SideChainEvents normally.
This commit is contained in:
Marius van der Wijden 2024-09-04 15:03:06 +02:00 committed by GitHub
parent dfd33c7792
commit b0b67be0a2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 184 additions and 353 deletions

@ -262,7 +262,6 @@ func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, networ
start = time.Now() start = time.Now()
reported = time.Now() reported = time.Now()
imported = 0 imported = 0
forker = core.NewForkChoice(chain, nil)
h = sha256.New() h = sha256.New()
buf = bytes.NewBuffer(nil) buf = bytes.NewBuffer(nil)
) )
@ -305,7 +304,7 @@ func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, networ
if err != nil { if err != nil {
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err) return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
} }
if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start, forker); err != nil { if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start); err != nil {
return fmt.Errorf("error inserting header %d: %w", it.Number(), err) return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
} else if status != core.CanonStatTy { } else if status != core.CanonStatTy {
return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status) return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)

@ -2210,7 +2210,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
} }
} }
// Disable transaction indexing/unindexing by default. // Disable transaction indexing/unindexing by default.
chain, err := core.NewBlockChain(chainDb, cache, gspec, nil, engine, vmcfg, nil, nil) chain, err := core.NewBlockChain(chainDb, cache, gspec, nil, engine, vmcfg, nil)
if err != nil { if err != nil {
Fatalf("Can't create BlockChain: %v", err) Fatalf("Can't create BlockChain: %v", err)
} }

@ -78,7 +78,7 @@ func TestHistoryImportAndExport(t *testing.T) {
}) })
// Initialize BlockChain. // Initialize BlockChain.
chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil) chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("unable to initialize chain: %v", err) t.Fatalf("unable to initialize chain: %v", err)
} }
@ -171,7 +171,7 @@ func TestHistoryImportAndExport(t *testing.T) {
}) })
genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults)) genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults))
imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil) imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("unable to initialize chain: %v", err) t.Fatalf("unable to initialize chain: %v", err)
} }

@ -55,7 +55,7 @@ func TestReimportMirroredState(t *testing.T) {
copy(genspec.ExtraData[extraVanity:], addr[:]) copy(genspec.ExtraData[extraVanity:], addr[:])
// Generate a batch of blocks, each properly signed // Generate a batch of blocks, each properly signed
chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genspec, nil, engine, vm.Config{}, nil, nil) chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genspec, nil, engine, vm.Config{}, nil)
defer chain.Stop() defer chain.Stop()
_, blocks, _ := core.GenerateChainWithGenesis(genspec, engine, 3, func(i int, block *core.BlockGen) { _, blocks, _ := core.GenerateChainWithGenesis(genspec, engine, 3, func(i int, block *core.BlockGen) {
@ -87,7 +87,7 @@ func TestReimportMirroredState(t *testing.T) {
} }
// Insert the first two blocks and make sure the chain is valid // Insert the first two blocks and make sure the chain is valid
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil)
defer chain.Stop() defer chain.Stop()
if _, err := chain.InsertChain(blocks[:2]); err != nil { if _, err := chain.InsertChain(blocks[:2]); err != nil {
@ -100,7 +100,7 @@ func TestReimportMirroredState(t *testing.T) {
// Simulate a crash by creating a new chain on top of the database, without // Simulate a crash by creating a new chain on top of the database, without
// flushing the dirty states out. Insert the last block, triggering a sidechain // flushing the dirty states out. Insert the last block, triggering a sidechain
// reimport. // reimport.
chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil)
defer chain.Stop() defer chain.Stop()
if _, err := chain.InsertChain(blocks[2:]); err != nil { if _, err := chain.InsertChain(blocks[2:]); err != nil {

@ -458,7 +458,7 @@ func (tt *cliqueTest) run(t *testing.T) {
batches[len(batches)-1] = append(batches[len(batches)-1], block) batches[len(batches)-1] = append(batches[len(batches)-1], block)
} }
// Pass all the headers through clique and ensure tallying succeeds // Pass all the headers through clique and ensure tallying succeeds
chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create test chain: %v", err) t.Fatalf("failed to create test chain: %v", err)
} }

@ -195,7 +195,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain. // Time the insertion of the new chain.
// State and blocks are stored in the same DB. // State and blocks are stored in the same DB.
chainman, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) chainman, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer chainman.Stop() defer chainman.Stop()
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
@ -312,7 +312,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil { if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err) b.Fatalf("error opening database at %v: %v", dir, err)
} }
chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil) chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil { if err != nil {
b.Fatalf("error creating chain: %v", err) b.Fatalf("error creating chain: %v", err)
} }

@ -50,7 +50,7 @@ func testHeaderVerification(t *testing.T, scheme string) {
headers[i] = block.Header() headers[i] = block.Header()
} }
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer chain.Stop() defer chain.Stop()
for i := 0; i < len(blocks); i++ { for i := 0; i < len(blocks); i++ {
@ -160,7 +160,7 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
postHeaders[i] = block.Header() postHeaders[i] = block.Header()
} }
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
defer chain.Stop() defer chain.Stop()
// Verify the blocks before the merging // Verify the blocks before the merging

@ -259,7 +259,6 @@ type BlockChain struct {
validator Validator // Block and state validator interface validator Validator // Block and state validator interface
prefetcher Prefetcher prefetcher Prefetcher
processor Processor // Block transaction processor interface processor Processor // Block transaction processor interface
forker *ForkChoice
vmConfig vm.Config vmConfig vm.Config
logger *tracing.Hooks logger *tracing.Hooks
} }
@ -267,7 +266,7 @@ type BlockChain struct {
// NewBlockChain returns a fully initialised block chain using information // NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator // available in the database. It initialises the default Ethereum Validator
// and Processor. // and Processor.
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) { func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, txLookupLimit *uint64) (*BlockChain, error) {
if cacheConfig == nil { if cacheConfig == nil {
cacheConfig = defaultCacheConfig cacheConfig = defaultCacheConfig
} }
@ -312,7 +311,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
return nil, err return nil, err
} }
bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit)) bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit))
bc.forker = NewForkChoice(bc, shouldPreserve)
bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb) bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
bc.validator = NewBlockValidator(chainConfig, bc) bc.validator = NewBlockValidator(chainConfig, bc)
bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc) bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc)
@ -1243,13 +1241,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Rewind may have occurred, skip in that case. // Rewind may have occurred, skip in that case.
if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
reorg, err := bc.forker.ReorgNeeded(bc.CurrentSnapBlock(), head.Header())
if err != nil {
log.Warn("Reorg failed", "err", err)
return false
} else if !reorg {
return false
}
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
bc.currentSnapBlock.Store(head.Header()) bc.currentSnapBlock.Store(head.Header())
headFastBlockGauge.Update(int64(head.NumberU64())) headFastBlockGauge.Update(int64(head.NumberU64()))
@ -1548,42 +1539,30 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
return NonStatTy, err return NonStatTy, err
} }
currentBlock := bc.CurrentBlock() currentBlock := bc.CurrentBlock()
reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
if err != nil { // Reorganise the chain if the parent is not the head block
return NonStatTy, err if block.ParentHash() != currentBlock.Hash() {
} if err := bc.reorg(currentBlock, block); err != nil {
if reorg { return NonStatTy, err
// Reorganise the chain if the parent is not the head block
if block.ParentHash() != currentBlock.Hash() {
if err := bc.reorg(currentBlock, block); err != nil {
return NonStatTy, err
}
} }
status = CanonStatTy
} else {
status = SideStatTy
} }
// Set new head. // Set new head.
if status == CanonStatTy { bc.writeHeadBlock(block)
bc.writeHeadBlock(block)
bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
} }
if status == CanonStatTy { // In theory, we should fire a ChainHeadEvent when we inject
bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) // a canonical block, but sometimes we can insert a batch of
if len(logs) > 0 { // canonical blocks. Avoid firing too many ChainHeadEvents,
bc.logsFeed.Send(logs) // we will fire an accumulated ChainHeadEvent and disable fire
} // event here.
// In theory, we should fire a ChainHeadEvent when we inject if emitHeadEvent {
// a canonical block, but sometimes we can insert a batch of bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
// canonical blocks. Avoid firing too many ChainHeadEvents,
// we will fire an accumulated ChainHeadEvent and disable fire
// event here.
if emitHeadEvent {
bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
}
} else {
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
} }
return status, nil return CanonStatTy, nil
} }
// InsertChain attempts to insert the given batch of blocks in to the canonical // InsertChain attempts to insert the given batch of blocks in to the canonical
@ -1634,7 +1613,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
if bc.insertStopped() { if bc.insertStopped() {
return 0, nil return 0, nil
} }
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain) SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
@ -1667,24 +1645,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
// from the canonical chain, which has not been verified. // from the canonical chain, which has not been verified.
// Skip all known blocks that are behind us. // Skip all known blocks that are behind us.
var ( current := bc.CurrentBlock()
reorg bool
current = bc.CurrentBlock()
)
for block != nil && bc.skipBlock(err, it) { for block != nil && bc.skipBlock(err, it) {
reorg, err = bc.forker.ReorgNeeded(current, block.Header()) if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
if err != nil { break
return it.index, err
}
if reorg {
// Switch to import mode if the forker says the reorg is necessary
// and also the block is not on the canonical chain.
// In eth2 the forker always returns true for reorg decision (blindly trusting
// the external consensus engine), but in order to prevent the unnecessary
// reorgs when importing known blocks, the special case is handled here.
if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
break
}
} }
log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
stats.ignored++ stats.ignored++
@ -2006,9 +1970,8 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
// insertSideChain is only used pre-merge. // insertSideChain is only used pre-merge.
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) { func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
var ( var (
externTd *big.Int externTd *big.Int
lastBlock = block current = bc.CurrentBlock()
current = bc.CurrentBlock()
) )
// The first sidechain block error is already verified to be ErrPrunedAncestor. // The first sidechain block error is already verified to be ErrPrunedAncestor.
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
@ -2059,22 +2022,6 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root()) "root", block.Root())
} }
lastBlock = block
}
// At this point, we've written all sidechain blocks to database. Loop ended
// either on some other error or all were processed. If there was some other
// error, we can ignore the rest of those blocks.
//
// If the externTd was larger than our local TD, we now need to reimport the previous
// blocks to regenerate the required state
reorg, err := bc.forker.ReorgNeeded(current, lastBlock.Header())
if err != nil {
return it.index, err
}
if !reorg {
localTd := bc.GetTd(current.Hash(), current.Number.Uint64())
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
return it.index, err
} }
// Gather all the sidechain hashes (full blocks may be memory heavy) // Gather all the sidechain hashes (full blocks may be memory heavy)
var ( var (
@ -2541,7 +2488,7 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
return 0, errChainStopped return 0, errChainStopped
} }
defer bc.chainmu.Unlock() defer bc.chainmu.Unlock()
_, err := bc.hc.InsertHeaderChain(chain, start, bc.forker) _, err := bc.hc.InsertHeaderChain(chain, start)
return 0, err return 0, err
} }

@ -170,11 +170,6 @@ func (it *insertIterator) current() *types.Header {
return it.chain[it.index].Header() return it.chain[it.index].Header()
} }
// first returns the first block in it.
func (it *insertIterator) first() *types.Block {
return it.chain[0]
}
// remaining returns the number of remaining blocks. // remaining returns the number of remaining blocks.
func (it *insertIterator) remaining() int { func (it *insertIterator) remaining() int {
return len(it.chain) - it.index return len(it.chain) - it.index

@ -1794,7 +1794,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
config.SnapshotLimit = 256 config.SnapshotLimit = 256
config.SnapshotWait = true config.SnapshotWait = true
} }
chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create chain: %v", err) t.Fatalf("Failed to create chain: %v", err)
} }
@ -1859,7 +1859,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
} }
defer db.Close() defer db.Close()
newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
@ -1931,7 +1931,7 @@ func testIssue23496(t *testing.T, scheme string) {
} }
engine = ethash.NewFullFaker() engine = ethash.NewFullFaker()
) )
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create chain: %v", err) t.Fatalf("Failed to create chain: %v", err)
} }
@ -1981,7 +1981,7 @@ func testIssue23496(t *testing.T, scheme string) {
} }
defer db.Close() defer db.Close()
chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }

@ -1997,7 +1997,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
config.SnapshotLimit = 256 config.SnapshotLimit = 256
config.SnapshotWait = true config.SnapshotWait = true
} }
chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create chain: %v", err) t.Fatalf("Failed to create chain: %v", err)
} }

@ -81,7 +81,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
} }
engine = ethash.NewFullFaker() engine = ethash.NewFullFaker()
) )
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create chain: %v", err) t.Fatalf("Failed to create chain: %v", err)
} }
@ -228,7 +228,7 @@ func (snaptest *snapshotTest) test(t *testing.T) {
// Restart the chain normally // Restart the chain normally
chain.Stop() chain.Stop()
newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
@ -270,13 +270,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
// the crash, we do restart twice here: one after the crash and one // the crash, we do restart twice here: one after the crash and one
// after the normal stop. It's used to ensure the broken snapshot // after the normal stop. It's used to ensure the broken snapshot
// can be detected all the time. // can be detected all the time.
newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
newchain.Stop() newchain.Stop()
newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
@ -313,7 +313,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
SnapshotLimit: 0, SnapshotLimit: 0,
StateScheme: snaptest.scheme, StateScheme: snaptest.scheme,
} }
newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
@ -321,7 +321,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
newchain.Stop() newchain.Stop()
// Restart the chain with enabling the snapshot // Restart the chain with enabling the snapshot
newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
@ -349,7 +349,7 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
chain.SetHead(snaptest.setHead) chain.SetHead(snaptest.setHead)
chain.Stop() chain.Stop()
newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
@ -385,7 +385,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
SnapshotLimit: 0, SnapshotLimit: 0,
StateScheme: snaptest.scheme, StateScheme: snaptest.scheme,
} }
newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
@ -402,7 +402,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
SnapshotWait: false, // Don't wait rebuild SnapshotWait: false, // Don't wait rebuild
StateScheme: snaptest.scheme, StateScheme: snaptest.scheme,
} }
tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }
@ -411,7 +411,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
tmp.triedb.Close() tmp.triedb.Close()
tmp.stopWithoutSaving() tmp.stopWithoutSaving()
newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to recreate chain: %v", err) t.Fatalf("Failed to recreate chain: %v", err)
} }

@ -22,6 +22,7 @@ import (
"math/big" "math/big"
"math/rand" "math/rand"
"os" "os"
"path"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -61,7 +62,7 @@ func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (eth
} }
) )
// Initialize a fresh chain with only a genesis block // Initialize a fresh chain with only a genesis block
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
// Create and inject the requested chain // Create and inject the requested chain
if n == 0 { if n == 0 {
@ -763,7 +764,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
}) })
// Import the chain as an archive node for the comparison baseline // Import the chain as an archive node for the comparison baseline
archiveDb := rawdb.NewMemoryDatabase() archiveDb := rawdb.NewMemoryDatabase()
archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer archive.Stop() defer archive.Stop()
if n, err := archive.InsertChain(blocks); err != nil { if n, err := archive.InsertChain(blocks); err != nil {
@ -771,7 +772,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
} }
// Fast import the chain as a non-archive node to test // Fast import the chain as a non-archive node to test
fastDb := rawdb.NewMemoryDatabase() fastDb := rawdb.NewMemoryDatabase()
fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop() defer fast.Stop()
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
@ -791,7 +792,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
} }
defer ancientDb.Close() defer ancientDb.Close()
ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer ancient.Stop() defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers); err != nil { if n, err := ancient.InsertHeaderChain(headers); err != nil {
@ -911,7 +912,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
archiveCaching.TrieDirtyDisabled = true archiveCaching.TrieDirtyDisabled = true
archiveCaching.StateScheme = scheme archiveCaching.StateScheme = scheme
archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if n, err := archive.InsertChain(blocks); err != nil { if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err) t.Fatalf("failed to process block %d: %v", n, err)
} }
@ -924,7 +925,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// Import the chain as a non-archive node and ensure all pointers are updated // Import the chain as a non-archive node and ensure all pointers are updated
fastDb := makeDb() fastDb := makeDb()
defer fastDb.Close() defer fastDb.Close()
fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop() defer fast.Stop()
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
@ -944,7 +945,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// Import the chain as a ancient-first node and ensure all pointers are updated // Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb := makeDb() ancientDb := makeDb()
defer ancientDb.Close() defer ancientDb.Close()
ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer ancient.Stop() defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers); err != nil { if n, err := ancient.InsertHeaderChain(headers); err != nil {
@ -963,7 +964,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// Import the chain as a light node and ensure all pointers are updated // Import the chain as a light node and ensure all pointers are updated
lightDb := makeDb() lightDb := makeDb()
defer lightDb.Close() defer lightDb.Close()
light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if n, err := light.InsertHeaderChain(headers); err != nil { if n, err := light.InsertHeaderChain(headers); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err) t.Fatalf("failed to insert header %d: %v", n, err)
} }
@ -1036,7 +1037,7 @@ func testChainTxReorgs(t *testing.T, scheme string) {
}) })
// Import the chain. This runs all block validation rules. // Import the chain. This runs all block validation rules.
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if i, err := blockchain.InsertChain(chain); err != nil { if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err) t.Fatalf("failed to insert original chain[%d]: %v", i, err)
} }
@ -1110,7 +1111,7 @@ func testLogReorgs(t *testing.T, scheme string) {
signer = types.LatestSigner(gspec.Config) signer = types.LatestSigner(gspec.Config)
) )
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop() defer blockchain.Stop()
rmLogsCh := make(chan RemovedLogsEvent) rmLogsCh := make(chan RemovedLogsEvent)
@ -1166,7 +1167,7 @@ func testLogRebirth(t *testing.T, scheme string) {
gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
signer = types.LatestSigner(gspec.Config) signer = types.LatestSigner(gspec.Config)
engine = ethash.NewFaker() engine = ethash.NewFaker()
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
) )
defer blockchain.Stop() defer blockchain.Stop()
@ -1247,7 +1248,7 @@ func testSideLogRebirth(t *testing.T, scheme string) {
addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr1 = crypto.PubkeyToAddress(key1.PublicKey)
gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
signer = types.LatestSigner(gspec.Config) signer = types.LatestSigner(gspec.Config)
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
) )
defer blockchain.Stop() defer blockchain.Stop()
@ -1266,7 +1267,7 @@ func testSideLogRebirth(t *testing.T, scheme string) {
} }
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0) checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
// Generate side chain with lower difficulty // Generate side chain with lower difficulty, after the merge, the chain will be accepted even if it is lower difficulty
genDb, sideChain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) { genDb, sideChain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
if i == 1 { if i == 1 {
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, logCode), signer, key1) tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, logCode), signer, key1)
@ -1279,14 +1280,14 @@ func testSideLogRebirth(t *testing.T, scheme string) {
if _, err := blockchain.InsertChain(sideChain); err != nil { if _, err := blockchain.InsertChain(sideChain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err) t.Fatalf("failed to insert forked chain: %v", err)
} }
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0) checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
// Generate a new block based on side chain. // Generate a new block based on side chain. Should not emit any events anymore.
newBlocks, _ := GenerateChain(gspec.Config, sideChain[len(sideChain)-1], ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) newBlocks, _ := GenerateChain(gspec.Config, sideChain[len(sideChain)-1], ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(newBlocks); err != nil { if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err) t.Fatalf("failed to insert forked chain: %v", err)
} }
checkLogEvents(t, newLogCh, rmLogsCh, 1, 0) checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
} }
func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) { func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) {
@ -1346,7 +1347,7 @@ func testReorgSideEvent(t *testing.T, scheme string) {
} }
signer = types.LatestSigner(gspec.Config) signer = types.LatestSigner(gspec.Config)
) )
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop() defer blockchain.Stop()
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {}) _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
@ -1370,15 +1371,10 @@ func testReorgSideEvent(t *testing.T, scheme string) {
t.Fatalf("failed to insert chain: %v", err) t.Fatalf("failed to insert chain: %v", err)
} }
// first two block of the secondary chain are for a brief moment considered
// side chains because up to that point the first one is considered the
// heavier chain.
expectedSideHashes := map[common.Hash]bool{ expectedSideHashes := map[common.Hash]bool{
replacementBlocks[0].Hash(): true, chain[0].Hash(): true,
replacementBlocks[1].Hash(): true, chain[1].Hash(): true,
chain[0].Hash(): true, chain[2].Hash(): true,
chain[1].Hash(): true,
chain[2].Hash(): true,
} }
i := 0 i := 0
@ -1403,7 +1399,7 @@ done:
timeout.Reset(timeoutDura) timeout.Reset(timeoutDura)
case <-timeout.C: case <-timeout.C:
t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent") t.Fatalf("Timeout. Possibly not all blocks were triggered for sideevent: %v", i)
} }
} }
@ -1530,7 +1526,7 @@ func testEIP155Transition(t *testing.T, scheme string) {
} }
}) })
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop() defer blockchain.Stop()
if _, err := blockchain.InsertChain(blocks); err != nil { if _, err := blockchain.InsertChain(blocks); err != nil {
@ -1623,7 +1619,7 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) {
block.AddTx(tx) block.AddTx(tx)
}) })
// account must exist pre eip 161 // account must exist pre eip 161
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop() defer blockchain.Stop()
if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil { if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil {
@ -1681,7 +1677,7 @@ func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) {
} }
// Import the canonical and fork chain side by side, verifying the current block // Import the canonical and fork chain side by side, verifying the current block
// and current header consistency // and current header consistency
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -1725,7 +1721,7 @@ func TestTrieForkGC(t *testing.T) {
forks[i] = fork[0] forks[i] = fork[0]
} }
// Import the canonical and fork chain side by side, forcing the trie cache to cache both // Import the canonical and fork chain side by side, forcing the trie cache to cache both
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -1771,7 +1767,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
defer db.Close() defer db.Close()
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -1787,18 +1783,15 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
if chain.HasState(shared[len(shared)-1].Root()) { if chain.HasState(shared[len(shared)-1].Root()) {
t.Fatalf("common-but-old ancestor still cache") t.Fatalf("common-but-old ancestor still cache")
} }
// Import the competitor chain without exceeding the canonical's TD and ensure // Import the competitor chain without exceeding the canonical's TD.
// we have not processed any of the blocks (protection against malicious blocks) // Post-merge the side chain should be executed
if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil { if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil {
t.Fatalf("failed to insert competitor chain: %v", err) t.Fatalf("failed to insert competitor chain: %v", err)
} }
for i, block := range competitor[:len(competitor)-2] { if !chain.HasState(competitor[len(competitor)-3].Root()) {
if chain.HasState(block.Root()) { t.Fatalf("failed to insert low-TD chain")
t.Fatalf("competitor %d: low TD chain became processed", i)
}
} }
// Import the head of the competitor chain, triggering the reorg and ensure we // Import the head of the competitor chain.
// successfully reprocess all the stashed away blocks.
if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil { if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil {
t.Fatalf("failed to finalize competitor chain: %v", err) t.Fatalf("failed to finalize competitor chain: %v", err)
} }
@ -1842,7 +1835,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
t.Fatalf("failed to create temp freezer db: %v", err) t.Fatalf("failed to create temp freezer db: %v", err)
} }
defer ancientDb.Close() defer ancientDb.Close()
ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
@ -1862,7 +1855,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash()) rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
// Reopen broken blockchain again // Reopen broken blockchain again
ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer ancient.Stop() defer ancient.Stop()
if num := ancient.CurrentBlock().Number.Uint64(); num != 0 { if num := ancient.CurrentBlock().Number.Uint64(); num != 0 {
t.Errorf("head block mismatch: have #%v, want #%v", num, 0) t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
@ -1914,7 +1907,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) {
} }
defer ancientDb.Close() defer ancientDb.Close()
ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer ancientChain.Stop() defer ancientChain.Stop()
// Import the canonical header chain. // Import the canonical header chain.
@ -1981,7 +1974,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
defer diskdb.Close() defer diskdb.Close()
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -2042,7 +2035,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
mergeBlock = math.MaxInt32 mergeBlock = math.MaxInt32
) )
// Generate and import the canonical chain // Generate and import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -2196,7 +2189,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
} }
defer chaindb.Close() defer chaindb.Close()
chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -2280,10 +2273,10 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err) t.Fatalf("failed to insert chain data: %v", err)
} }
// The head shouldn't change. // Post-merge the chain should change even if td is lower.
asserter(t, blocks3[len(blocks3)-1]) asserter(t, blocks2[len(blocks2)-1])
// Rollback the heavier chain and re-insert the longer chain again // Rollback the heavier chain and re-insert the longer chain again.
chain.SetHead(rollback - 1) chain.SetHead(rollback - 1)
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err) t.Fatalf("failed to insert chain data: %v", err)
@ -2367,7 +2360,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
} }
defer chaindb.Close() defer chaindb.Close()
chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -2481,7 +2474,7 @@ func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types
genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) { genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1}) b.SetCoinbase(common.Address{1})
}) })
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err) return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
} }
@ -2657,7 +2650,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
// Import the shared chain and the original canonical one // Import the shared chain and the original canonical one
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
b.Fatalf("failed to create tester chain: %v", err) b.Fatalf("failed to create tester chain: %v", err)
} }
@ -2744,7 +2737,21 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
// Generate and import the canonical chain // Generate and import the canonical chain
_, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, nil) _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, nil)
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) // Construct a database with freezer enabled
datadir := t.TempDir()
ancient := path.Join(datadir, "ancient")
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
}
defer db.Close()
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -2771,7 +2778,6 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) { if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64()) t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
} }
// Now re-import some old blocks
blockToReimport := blocks[5:8] blockToReimport := blocks[5:8]
_, err = chain.InsertChain(blockToReimport) _, err = chain.InsertChain(blockToReimport)
if err != nil { if err != nil {
@ -2844,7 +2850,7 @@ func testDeleteCreateRevert(t *testing.T, scheme string) {
b.AddTx(tx) b.AddTx(tx)
}) })
// Import the canonical chain // Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -2959,7 +2965,7 @@ func testDeleteRecreateSlots(t *testing.T, scheme string) {
// Import the canonical chain // Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
Tracer: logger.NewJSONLogger(nil, os.Stdout), Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil) }, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -3041,7 +3047,7 @@ func testDeleteRecreateAccount(t *testing.T, scheme string) {
// Import the canonical chain // Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
Tracer: logger.NewJSONLogger(nil, os.Stdout), Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil) }, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -3217,7 +3223,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
//Debug: true, //Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout), //Tracer: vm.NewJSONLogger(nil, os.Stdout),
}, nil, nil) }, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -3355,7 +3361,7 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) {
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
//Debug: true, //Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout), //Tracer: vm.NewJSONLogger(nil, os.Stdout),
}, nil, nil) }, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -3442,7 +3448,7 @@ func testEIP2718Transition(t *testing.T, scheme string) {
}) })
// Import the canonical chain // Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -3536,7 +3542,7 @@ func testEIP1559Transition(t *testing.T, scheme string) {
b.AddTx(tx) b.AddTx(tx)
}) })
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -3649,7 +3655,7 @@ func testSetCanonical(t *testing.T, scheme string) {
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
defer diskdb.Close() defer diskdb.Close()
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -3758,7 +3764,7 @@ func testCanonicalHashMarker(t *testing.T, scheme string) {
_, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {}) _, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {})
// Initialize test chain // Initialize test chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -3895,7 +3901,7 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
//Debug: true, //Debug: true,
//Tracer: logger.NewJSONLogger(nil, os.Stdout), //Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil) }, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -4007,7 +4013,7 @@ func TestDeleteThenCreate(t *testing.T) {
} }
}) })
// Import the canonical chain // Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -4092,7 +4098,7 @@ func TestTransientStorageReset(t *testing.T) {
}) })
// Initialize the blockchain with 1153 enabled. // Initialize the blockchain with 1153 enabled.
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vmConfig, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vmConfig, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -4187,7 +4193,7 @@ func TestEIP3651(t *testing.T) {
b.AddTx(tx) b.AddTx(tx)
}) })
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr).Hooks()}, nil, nil) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr).Hooks()}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }

@ -123,7 +123,7 @@ func TestGeneratePOSChain(t *testing.T) {
}) })
// Import the chain. This runs all block validation rules. // Import the chain. This runs all block validation rules.
blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop() defer blockchain.Stop()
if i, err := blockchain.InsertChain(genchain); err != nil { if i, err := blockchain.InsertChain(genchain); err != nil {
@ -238,7 +238,7 @@ func ExampleGenerateChain() {
}) })
// Import the chain. This runs all block validation rules. // Import the chain. This runs all block validation rules.
blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop() defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain); err != nil { if i, err := blockchain.InsertChain(chain); err != nil {

@ -50,7 +50,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
Config: &proConf, Config: &proConf,
} }
proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer proBc.Stop() defer proBc.Stop()
conDb := rawdb.NewMemoryDatabase() conDb := rawdb.NewMemoryDatabase()
@ -62,7 +62,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
Config: &conConf, Config: &conConf,
} }
conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer conBc.Stop() defer conBc.Stop()
if _, err := proBc.InsertChain(prefix); err != nil { if _, err := proBc.InsertChain(prefix); err != nil {
@ -74,7 +74,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks // Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks
for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ { for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ {
// Create a pro-fork block, and try to feed into the no-fork chain // Create a pro-fork block, and try to feed into the no-fork chain
bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil)
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64())) blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64()))
for j := 0; j < len(blocks)/2; j++ { for j := 0; j < len(blocks)/2; j++ {
@ -97,7 +97,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err) t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err)
} }
// Create a no-fork block, and try to feed into the pro-fork chain // Create a no-fork block, and try to feed into the pro-fork chain
bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil)
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64())) blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64()))
for j := 0; j < len(blocks)/2; j++ { for j := 0; j < len(blocks)/2; j++ {
@ -121,7 +121,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
} }
} }
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes // Verify that contra-forkers accept pro-fork extra-datas after forking finishes
bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop() defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64())) blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64()))
@ -139,7 +139,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err) t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err)
} }
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes // Verify that pro-forkers accept contra-fork extra-datas after forking finishes
bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop() defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64())) blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64()))

@ -1,113 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
crand "crypto/rand"
"errors"
"math/big"
mrand "math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
// ChainReader defines a small collection of methods needed to access the local
// blockchain during header verification. It's implemented by both blockchain
// and lightchain.
type ChainReader interface {
// Config retrieves the header chain's chain configuration.
Config() *params.ChainConfig
// GetTd returns the total difficulty of a local block.
GetTd(common.Hash, uint64) *big.Int
}
// ForkChoice is the fork chooser based on the highest total difficulty of the
// chain(the fork choice used in the eth1) and the external fork choice (the fork
// choice used in the eth2). This main goal of this ForkChoice is not only for
// offering fork choice during the eth1/2 merge phase, but also keep the compatibility
// for all other proof-of-work networks.
type ForkChoice struct {
chain ChainReader
rand *mrand.Rand
// preserve is a helper function used in td fork choice.
// Miners will prefer to choose the local mined block if the
// local td is equal to the extern one. It can be nil for light
// client
preserve func(header *types.Header) bool
}
func NewForkChoice(chainReader ChainReader, preserve func(header *types.Header) bool) *ForkChoice {
// Seed a fast but crypto originating random generator
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
log.Crit("Failed to initialize random seed", "err", err)
}
return &ForkChoice{
chain: chainReader,
rand: mrand.New(mrand.NewSource(seed.Int64())),
preserve: preserve,
}
}
// ReorgNeeded returns whether the reorg should be applied
// based on the given external header and local canonical chain.
// In the td mode, the new head is chosen if the corresponding
// total difficulty is higher. In the extern mode, the trusted
// header is always selected as the head.
func (f *ForkChoice) ReorgNeeded(current *types.Header, extern *types.Header) (bool, error) {
var (
localTD = f.chain.GetTd(current.Hash(), current.Number.Uint64())
externTd = f.chain.GetTd(extern.Hash(), extern.Number.Uint64())
)
if localTD == nil || externTd == nil {
return false, errors.New("missing td")
}
// Accept the new header as the chain head if the transition
// is already triggered. We assume all the headers after the
// transition come from the trusted consensus layer.
if ttd := f.chain.Config().TerminalTotalDifficulty; ttd != nil && ttd.Cmp(externTd) <= 0 {
return true, nil
}
// If the total difficulty is higher than our known, add it to the canonical chain
if diff := externTd.Cmp(localTD); diff > 0 {
return true, nil
} else if diff < 0 {
return false, nil
}
// Local and external difficulty is identical.
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
reorg := false
externNum, localNum := extern.Number.Uint64(), current.Number.Uint64()
if externNum < localNum {
reorg = true
} else if externNum == localNum {
var currentPreserve, externPreserve bool
if f.preserve != nil {
currentPreserve, externPreserve = f.preserve(current), f.preserve(extern)
}
reorg = !currentPreserve && (externPreserve || f.rand.Float64() < 0.5)
}
return reorg, nil
}

@ -124,7 +124,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
tdb := triedb.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
oldcustomg.Commit(db, tdb) oldcustomg.Commit(db, tdb)
bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil)
defer bc.Stop() defer bc.Stop()
_, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil) _, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil)

@ -254,7 +254,7 @@ func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
// without the real blocks. Hence, writing headers directly should only be done // without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly // in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients). // separated header/block phases (non-archive clients).
func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) { func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header) (*headerWriteResult, error) {
inserted, err := hc.WriteHeaders(headers) inserted, err := hc.WriteHeaders(headers)
if err != nil { if err != nil {
return nil, err return nil, err
@ -270,15 +270,6 @@ func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *F
lastHeader: lastHeader, lastHeader: lastHeader,
} }
) )
// Ask the fork choicer if the reorg is necessary
if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil {
return nil, err
} else if !reorg {
if inserted != 0 {
result.status = SideStatTy
}
return result, nil
}
// Special case, all the inserted headers are already on the canonical // Special case, all the inserted headers are already on the canonical
// header chain, skip the reorg operation. // header chain, skip the reorg operation.
if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() { if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() {
@ -336,11 +327,11 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header) (int, error) {
// //
// The returned 'write status' says if the inserted headers are part of the canonical chain // The returned 'write status' says if the inserted headers are part of the canonical chain
// or a side chain. // or a side chain.
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) { func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) {
if hc.procInterrupt() { if hc.procInterrupt() {
return 0, errors.New("aborted") return 0, errors.New("aborted")
} }
res, err := hc.writeHeadersAndSetHead(chain, forker) res, err := hc.writeHeadersAndSetHead(chain)
if err != nil { if err != nil {
return 0, err return 0, err
} }

@ -51,10 +51,10 @@ func verifyUnbrokenCanonchain(hc *HeaderChain) error {
return nil return nil
} }
func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error, forker *ForkChoice) { func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error) {
t.Helper() t.Helper()
status, err := hc.InsertHeaderChain(chain, time.Now(), forker) status, err := hc.InsertHeaderChain(chain, time.Now())
if status != wantStatus { if status != wantStatus {
t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v", status, wantStatus) t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v", status, wantStatus)
} }
@ -83,34 +83,33 @@ func TestHeaderInsertion(t *testing.T) {
// chain B: G->A1->B1...B128 // chain B: G->A1->B1...B128
chainB := makeHeaderChain(gspec.Config, chainA[0], 128, ethash.NewFaker(), genDb, 10) chainB := makeHeaderChain(gspec.Config, chainA[0], 128, ethash.NewFaker(), genDb, 10)
forker := NewForkChoice(hc, nil)
// Inserting 64 headers on an empty chain, expecting // Inserting 64 headers on an empty chain, expecting
// 1 callbacks, 1 canon-status, 0 sidestatus, // 1 callbacks, 1 canon-status, 0 sidestatus,
testInsert(t, hc, chainA[:64], CanonStatTy, nil, forker) testInsert(t, hc, chainA[:64], CanonStatTy, nil)
// Inserting 64 identical headers, expecting // Inserting 64 identical headers, expecting
// 0 callbacks, 0 canon-status, 0 sidestatus, // 0 callbacks, 0 canon-status, 0 sidestatus,
testInsert(t, hc, chainA[:64], NonStatTy, nil, forker) testInsert(t, hc, chainA[:64], NonStatTy, nil)
// Inserting the same some old, some new headers // Inserting the same some old, some new headers
// 1 callbacks, 1 canon, 0 side // 1 callbacks, 1 canon, 0 side
testInsert(t, hc, chainA[32:96], CanonStatTy, nil, forker) testInsert(t, hc, chainA[32:96], CanonStatTy, nil)
// Inserting side blocks, but not overtaking the canon chain // Inserting headers from chain B, overtaking the canon chain blindly
testInsert(t, hc, chainB[0:32], SideStatTy, nil, forker) testInsert(t, hc, chainB[0:32], CanonStatTy, nil)
// Inserting more side blocks, but we don't have the parent // Inserting more headers on chain B, but we don't have the parent
testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor, forker) testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor)
// Inserting more sideblocks, overtaking the canon chain // Inserting more headers on chain B, extend the canon chain
testInsert(t, hc, chainB[32:97], CanonStatTy, nil, forker) testInsert(t, hc, chainB[32:97], CanonStatTy, nil)
// Inserting more A-headers, taking back the canonicality // Inserting more headers on chain A, taking back the canonicality
testInsert(t, hc, chainA[90:100], CanonStatTy, nil, forker) testInsert(t, hc, chainA[90:100], CanonStatTy, nil)
// And B becomes canon again // And B becomes canon again
testInsert(t, hc, chainB[97:107], CanonStatTy, nil, forker) testInsert(t, hc, chainB[97:107], CanonStatTy, nil)
// And B becomes even longer // And B becomes even longer
testInsert(t, hc, chainB[107:128], CanonStatTy, nil, forker) testInsert(t, hc, chainB[107:128], CanonStatTy, nil)
} }

@ -133,7 +133,7 @@ func TestStateProcessorErrors(t *testing.T) {
}, },
}, },
} }
blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
) )
@ -293,7 +293,7 @@ func TestStateProcessorErrors(t *testing.T) {
}, },
}, },
} }
blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
) )
defer blockchain.Stop() defer blockchain.Stop()
for i, tt := range []struct { for i, tt := range []struct {
@ -332,7 +332,7 @@ func TestStateProcessorErrors(t *testing.T) {
}, },
}, },
} }
blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
) )
defer blockchain.Stop() defer blockchain.Stop()
for i, tt := range []struct { for i, tt := range []struct {
@ -481,7 +481,7 @@ func TestProcessVerkle(t *testing.T) {
// genesis := gspec.MustCommit(bcdb, triedb) // genesis := gspec.MustCommit(bcdb, triedb)
cacheConfig := DefaultCacheConfigWithScheme("path") cacheConfig := DefaultCacheConfigWithScheme("path")
cacheConfig.SnapshotLimit = 0 cacheConfig.SnapshotLimit = 0
blockchain, _ := NewBlockChain(bcdb, cacheConfig, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) blockchain, _ := NewBlockChain(bcdb, cacheConfig, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
defer blockchain.Stop() defer blockchain.Stop()
txCost1 := params.TxGas txCost1 := params.TxGas

@ -216,11 +216,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if config.OverrideVerkle != nil { if config.OverrideVerkle != nil {
overrides.OverrideVerkle = config.OverrideVerkle overrides.OverrideVerkle = config.OverrideVerkle
} }
// TODO (MariusVanDerWijden) get rid of shouldPreserve in a follow-up PR eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, &config.TransactionHistory)
shouldPreserve := func(header *types.Header) bool {
return false
}
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, shouldPreserve, &config.TransactionHistory)
if err != nil { if err != nil {
return nil, err return nil, err
} }

@ -68,7 +68,7 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
} }
chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }

@ -217,7 +217,7 @@ func newTestBlockchain(blocks []*types.Block) *core.BlockChain {
if pregenerated { if pregenerated {
panic("Requested chain generation outside of init") panic("Requested chain generation outside of init")
} }
chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, testGspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, testGspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }

@ -250,7 +250,7 @@ func TestFilters(t *testing.T) {
} }
}) })
var l uint64 var l uint64
bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l) bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, &l)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -210,7 +210,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, cancunBlock *big.Int, pe
}) })
// Construct testing chain // Construct testing chain
gspec.Config.TerminalTotalDifficulty = new(big.Int).SetUint64(td) gspec.Config.TerminalTotalDifficulty = new(big.Int).SetUint64(td)
chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil, nil) chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create local chain, %v", err) t.Fatalf("Failed to create local chain, %v", err)
} }

@ -98,8 +98,8 @@ func testForkIDSplit(t *testing.T, protocol uint) {
gspecNoFork = &core.Genesis{Config: configNoFork} gspecNoFork = &core.Genesis{Config: configNoFork}
gspecProFork = &core.Genesis{Config: configProFork} gspecProFork = &core.Genesis{Config: configProFork}
chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil, nil) chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil)
chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{}, nil, nil) chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{}, nil)
_, blocksNoFork, _ = core.GenerateChainWithGenesis(gspecNoFork, engine, 2, nil) _, blocksNoFork, _ = core.GenerateChainWithGenesis(gspecNoFork, engine, 2, nil)
_, blocksProFork, _ = core.GenerateChainWithGenesis(gspecProFork, engine, 2, nil) _, blocksProFork, _ = core.GenerateChainWithGenesis(gspecProFork, engine, 2, nil)

@ -151,7 +151,7 @@ func newTestHandlerWithBlocks(blocks int) *testHandler {
Config: params.TestChainConfig, Config: params.TestChainConfig,
Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
} }
chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
_, bs, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), blocks, nil) _, bs, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), blocks, nil)
if _, err := chain.InsertChain(bs); err != nil { if _, err := chain.InsertChain(bs); err != nil {

@ -104,7 +104,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
Config: config, Config: config,
Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}}, Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}},
} }
chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil)
_, bs, _ := core.GenerateChainWithGenesis(gspec, engine, blocks, generator) _, bs, _ := core.GenerateChainWithGenesis(gspec, engine, blocks, generator)
if _, err := chain.InsertChain(bs); err != nil { if _, err := chain.InsertChain(bs); err != nil {

@ -126,7 +126,7 @@ func getChain() *core.BlockChain {
SnapshotWait: true, SnapshotWait: true,
} }
trieRoot = blocks[len(blocks)-1].Root() trieRoot = blocks[len(blocks)-1].Root()
bc, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), cacheConf, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), cacheConf, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if _, err := bc.InsertChain(blocks); err != nil { if _, err := bc.InsertChain(blocks); err != nil {
panic(err) panic(err)
} }

@ -81,7 +81,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i
SnapshotLimit: 0, SnapshotLimit: 0,
TrieDirtyDisabled: true, // Archive mode TrieDirtyDisabled: true, // Archive mode
} }
chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil) chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }
@ -1014,7 +1014,7 @@ func newTestMergedBackend(t *testing.T, n int, gspec *core.Genesis, generator fu
SnapshotLimit: 0, SnapshotLimit: 0,
TrieDirtyDisabled: true, // Archive mode TrieDirtyDisabled: true, // Archive mode
} }
chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil) chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }

@ -557,7 +557,7 @@ func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockG
return nil, nil, fmt.Errorf("failed to create call tracer: %v", err) return nil, nil, fmt.Errorf("failed to create call tracer: %v", err)
} }
chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), core.DefaultCacheConfigWithScheme(rawdb.PathScheme), genesis, nil, engine, vm.Config{Tracer: tracer}, nil, nil) chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), core.DefaultCacheConfigWithScheme(rawdb.PathScheme), genesis, nil, engine, vm.Config{Tracer: tracer}, nil)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to create tester chain: %v", err) return nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
} }

@ -449,7 +449,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E
// Generate blocks for testing // Generate blocks for testing
db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator) db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator)
txlookupLimit := uint64(0) txlookupLimit := uint64(0)
chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, &txlookupLimit) chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, &txlookupLimit)
if err != nil { if err != nil {
t.Fatalf("failed to create tester chain: %v", err) t.Fatalf("failed to create tester chain: %v", err)
} }

@ -152,7 +152,7 @@ func createMiner(t *testing.T) *Miner {
// Create consensus engine // Create consensus engine
engine := clique.New(chainConfig.Clique, chainDB) engine := clique.New(chainConfig.Clique, chainDB)
// Create Ethereum backend // Create Ethereum backend
bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{}, nil, nil) bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("can't create new chain %v", err) t.Fatalf("can't create new chain %v", err)
} }

@ -121,7 +121,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
default: default:
t.Fatalf("unexpected consensus engine type: %T", engine) t.Fatalf("unexpected consensus engine type: %T", engine)
} }
chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil, nil) chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("core.NewBlockChain failed: %v", err) t.Fatalf("core.NewBlockChain failed: %v", err)
} }

@ -49,6 +49,17 @@ func TestBlockchain(t *testing.T) {
// using 4.6 TGas // using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`) bt.skipLoad(`.*randomStatetest94.json.*`)
// After the merge we would accept side chains as canonical even if they have lower td
bt.skipLoad(`.*bcMultiChainTest/ChainAtoChainB_difficultyB.json`)
bt.skipLoad(`.*bcMultiChainTest/CallContractFromNotBestBlock.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/uncleBlockAtBlock3afterBlock4.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/lotsOfBranchesOverrideAtTheMiddle.json`)
bt.skipLoad(`.*bcTotalDifficultyTest/sideChainWithMoreTransactions.json`)
bt.skipLoad(`.*bcForkStressTest/ForkStressTest.json`)
bt.skipLoad(`.*bcMultiChainTest/lotsOfLeafs.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test) execBlockTest(t, bt, test)
}) })

@ -153,7 +153,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{ chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{
Tracer: tracer, Tracer: tracer,
EnableWitnessCollection: witness, EnableWitnessCollection: witness,
}, nil, nil) }, nil)
if err != nil { if err != nil {
return err return err
} }