2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2014-12-04 11:28:02 +02:00
|
|
|
package core
|
2014-12-17 13:57:35 +02:00
|
|
|
|
|
|
|
import (
|
2020-12-04 14:22:19 +03:00
|
|
|
"errors"
|
2019-05-07 15:26:00 +03:00
|
|
|
"fmt"
|
2015-02-18 05:02:15 +02:00
|
|
|
"math/big"
|
2015-06-08 03:19:39 +03:00
|
|
|
"math/rand"
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
"os"
|
2017-08-07 15:47:25 +03:00
|
|
|
"sync"
|
2014-12-17 13:57:35 +02:00
|
|
|
"testing"
|
2016-03-07 19:11:52 +02:00
|
|
|
"time"
|
2014-12-17 13:57:35 +02:00
|
|
|
|
2015-04-30 01:08:43 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common/math"
|
2018-06-19 14:41:13 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus"
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/beacon"
|
2017-04-05 01:16:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
2018-05-07 14:35:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-10-19 17:08:17 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
2014-12-18 14:12:54 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-08-30 11:19:10 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2015-08-17 15:01:41 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2021-11-25 15:17:09 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
2014-12-18 14:12:54 +02:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2015-08-17 15:01:41 +03:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2020-08-21 15:10:40 +03:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2014-12-17 13:57:35 +02:00
|
|
|
)
|
|
|
|
|
2018-06-19 14:41:13 +03:00
|
|
|
// So we can deterministically seed different blockchains
|
|
|
|
var (
|
|
|
|
canonicalSeed = 1
|
|
|
|
forkSeed = 2
|
|
|
|
)
|
|
|
|
|
|
|
|
// newCanonical creates a chain database, and injects a deterministic canonical
|
|
|
|
// chain. Depending on the full flag, if creates either a full block chain or a
|
2022-09-07 21:21:59 +03:00
|
|
|
// header only chain. The database and genesis specification for block generation
|
|
|
|
// are also returned in case more test blocks are needed later.
|
|
|
|
func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *Genesis, *BlockChain, error) {
|
2018-06-19 14:41:13 +03:00
|
|
|
var (
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis = &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
Config: params.AllEthashProtocolChanges,
|
|
|
|
}
|
2018-06-19 14:41:13 +03:00
|
|
|
)
|
|
|
|
// Initialize a fresh chain with only a genesis block
|
2022-09-07 21:21:59 +03:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
|
|
|
|
2018-06-19 14:41:13 +03:00
|
|
|
// Create and inject the requested chain
|
|
|
|
if n == 0 {
|
2022-09-07 21:21:59 +03:00
|
|
|
return rawdb.NewMemoryDatabase(), genesis, blockchain, nil
|
2018-06-19 14:41:13 +03:00
|
|
|
}
|
|
|
|
if full {
|
|
|
|
// Full block-chain requested
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks := makeBlockChainWithGenesis(genesis, n, engine, canonicalSeed)
|
2018-06-19 14:41:13 +03:00
|
|
|
_, err := blockchain.InsertChain(blocks)
|
2022-09-07 21:21:59 +03:00
|
|
|
return genDb, genesis, blockchain, err
|
2018-06-19 14:41:13 +03:00
|
|
|
}
|
|
|
|
// Header-only chain requested
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, headers := makeHeaderChainWithGenesis(genesis, n, engine, canonicalSeed)
|
2018-06-19 14:41:13 +03:00
|
|
|
_, err := blockchain.InsertHeaderChain(headers, 1)
|
2022-09-07 21:21:59 +03:00
|
|
|
return genDb, genesis, blockchain, err
|
2018-06-19 14:41:13 +03:00
|
|
|
}
|
|
|
|
|
2021-05-17 16:13:22 +03:00
|
|
|
func newGwei(n int64) *big.Int {
|
|
|
|
return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei))
|
|
|
|
}
|
|
|
|
|
2015-02-18 05:02:15 +02:00
|
|
|
// Test fork of length N starting from block i
|
2015-10-19 17:08:17 +03:00
|
|
|
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
2015-09-21 15:36:29 +03:00
|
|
|
// Copy old chain up to #i into a new db
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("could not make new canonical in testFork", err)
|
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer blockchain2.Stop()
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Assert the chains have the same header/block at #i
|
|
|
|
var hash1, hash2 common.Hash
|
|
|
|
if full {
|
2015-10-19 17:08:17 +03:00
|
|
|
hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
|
2015-09-21 15:36:29 +03:00
|
|
|
} else {
|
2015-10-19 17:08:17 +03:00
|
|
|
hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
|
2015-02-28 21:58:37 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
if hash1 != hash2 {
|
|
|
|
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
|
2015-02-28 21:58:37 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Extend the newly created chain
|
|
|
|
var (
|
|
|
|
blockChainB []*types.Block
|
|
|
|
headerChainB []*types.Header
|
|
|
|
)
|
|
|
|
if full {
|
2022-09-07 21:21:59 +03:00
|
|
|
blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.CurrentBlock(), n, ethash.NewFaker(), genDb, forkSeed)
|
2015-10-19 17:08:17 +03:00
|
|
|
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
|
|
|
} else {
|
2022-09-07 21:21:59 +03:00
|
|
|
headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
2015-10-19 17:08:17 +03:00
|
|
|
if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Sanity check that the forked chain can be imported into the original
|
|
|
|
var tdPre, tdPost *big.Int
|
2015-04-29 13:43:24 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
if full {
|
2021-10-12 00:16:46 +03:00
|
|
|
cur := blockchain.CurrentBlock()
|
|
|
|
tdPre = blockchain.GetTd(cur.Hash(), cur.NumberU64())
|
2015-10-19 17:08:17 +03:00
|
|
|
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to import forked block chain: %v", err)
|
|
|
|
}
|
2021-10-12 00:16:46 +03:00
|
|
|
last := blockChainB[len(blockChainB)-1]
|
|
|
|
tdPost = blockchain.GetTd(last.Hash(), last.NumberU64())
|
2015-09-21 15:36:29 +03:00
|
|
|
} else {
|
2021-10-12 00:16:46 +03:00
|
|
|
cur := blockchain.CurrentHeader()
|
|
|
|
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
|
2015-10-19 17:08:17 +03:00
|
|
|
if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to import forked header chain: %v", err)
|
|
|
|
}
|
2021-10-12 00:16:46 +03:00
|
|
|
last := headerChainB[len(headerChainB)-1]
|
|
|
|
tdPost = blockchain.GetTd(last.Hash(), last.Number.Uint64())
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
// Compare the total difficulties of the chains
|
|
|
|
comparator(tdPre, tdPost)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// testBlockChainImport tries to process a chain of blocks, writing them into
|
|
|
|
// the database if successful.
|
2015-10-19 17:08:17 +03:00
|
|
|
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
2015-09-21 15:36:29 +03:00
|
|
|
for _, block := range chain {
|
|
|
|
// Try and process the block
|
2017-04-05 01:16:29 +03:00
|
|
|
err := blockchain.engine.VerifyHeader(blockchain, block.Header(), true)
|
|
|
|
if err == nil {
|
|
|
|
err = blockchain.validator.ValidateBody(block)
|
|
|
|
}
|
2015-10-19 17:08:17 +03:00
|
|
|
if err != nil {
|
2017-04-06 14:58:03 +03:00
|
|
|
if err == ErrKnownBlock {
|
2015-02-18 05:02:15 +02:00
|
|
|
continue
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
return err
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2020-02-24 14:26:34 +03:00
|
|
|
statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache, nil)
|
2015-10-19 17:08:17 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-03-25 13:41:50 +03:00
|
|
|
receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
|
2015-10-19 17:08:17 +03:00
|
|
|
if err != nil {
|
2016-11-23 15:32:25 +03:00
|
|
|
blockchain.reportBlock(block, receipts, err)
|
2015-10-19 17:08:17 +03:00
|
|
|
return err
|
|
|
|
}
|
2019-03-13 13:31:35 +03:00
|
|
|
err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas)
|
2015-10-19 17:08:17 +03:00
|
|
|
if err != nil {
|
2016-11-23 15:32:25 +03:00
|
|
|
blockchain.reportBlock(block, receipts, err)
|
2015-10-19 17:08:17 +03:00
|
|
|
return err
|
|
|
|
}
|
2021-10-07 16:47:50 +03:00
|
|
|
|
|
|
|
blockchain.chainmu.MustLock()
|
2021-10-12 00:16:46 +03:00
|
|
|
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
|
2018-05-07 14:35:06 +03:00
|
|
|
rawdb.WriteBlock(blockchain.db, block)
|
2018-02-05 19:40:32 +03:00
|
|
|
statedb.Commit(false)
|
2019-01-11 16:27:47 +03:00
|
|
|
blockchain.chainmu.Unlock()
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// testHeaderChainImport tries to process a chain of header, writing them into
|
|
|
|
// the database if successful.
|
2015-10-19 17:08:17 +03:00
|
|
|
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
|
2015-09-21 15:36:29 +03:00
|
|
|
for _, header := range chain {
|
|
|
|
// Try and validate the header
|
2017-04-05 01:16:29 +03:00
|
|
|
if err := blockchain.engine.VerifyHeader(blockchain, header, false); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-03-15 20:55:39 +02:00
|
|
|
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
2021-10-07 16:47:50 +03:00
|
|
|
blockchain.chainmu.MustLock()
|
2021-10-12 00:16:46 +03:00
|
|
|
rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash, header.Number.Uint64()-1)))
|
2018-05-07 14:35:06 +03:00
|
|
|
rawdb.WriteHeader(blockchain.db, header)
|
2019-01-11 16:27:47 +03:00
|
|
|
blockchain.chainmu.Unlock()
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
return nil
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2015-10-05 17:51:06 +03:00
|
|
|
func TestLastBlock(t *testing.T) {
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
|
2018-02-23 15:02:33 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
|
|
|
defer blockchain.Stop()
|
2017-08-07 15:47:25 +03:00
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
blocks := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), 1, ethash.NewFullFaker(), genDb, 0)
|
2018-02-23 15:02:33 +03:00
|
|
|
if _, err := blockchain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("Failed to insert block: %v", err)
|
|
|
|
}
|
2018-05-07 14:35:06 +03:00
|
|
|
if blocks[len(blocks)-1].Hash() != rawdb.ReadHeadBlockHash(blockchain.db) {
|
2018-02-23 15:02:33 +03:00
|
|
|
t.Fatalf("Write/Get HeadBlockHash failed")
|
2015-10-05 17:51:06 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
// Test inserts the blocks/headers after the fork choice rule is changed.
|
|
|
|
// The chain is reorged to whatever specified.
|
|
|
|
func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool) {
|
|
|
|
// Copy old chain up to #i into a new db
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("could not make new canonical in testFork", err)
|
|
|
|
}
|
|
|
|
defer blockchain2.Stop()
|
|
|
|
|
|
|
|
// Assert the chains have the same header/block at #i
|
|
|
|
var hash1, hash2 common.Hash
|
|
|
|
if full {
|
|
|
|
hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
|
|
|
|
} else {
|
|
|
|
hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
|
|
|
|
}
|
|
|
|
if hash1 != hash2 {
|
|
|
|
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extend the newly created chain
|
|
|
|
if full {
|
2022-09-07 21:21:59 +03:00
|
|
|
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.CurrentBlock(), n, ethash.NewFaker(), genDb, forkSeed)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
|
|
|
if blockchain2.CurrentBlock().NumberU64() != blockChainB[len(blockChainB)-1].NumberU64() {
|
|
|
|
t.Fatalf("failed to reorg to the given chain")
|
|
|
|
}
|
|
|
|
if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() {
|
|
|
|
t.Fatalf("failed to reorg to the given chain")
|
|
|
|
}
|
|
|
|
} else {
|
2022-09-07 21:21:59 +03:00
|
|
|
headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
|
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
|
|
|
if blockchain2.CurrentHeader().Number.Uint64() != headerChainB[len(headerChainB)-1].Number.Uint64() {
|
|
|
|
t.Fatalf("failed to reorg to the given chain")
|
|
|
|
}
|
|
|
|
if blockchain2.CurrentHeader().Hash() != headerChainB[len(headerChainB)-1].Hash() {
|
|
|
|
t.Fatalf("failed to reorg to the given chain")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, it can be extended
|
|
|
|
// with various length chains.
|
|
|
|
func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
|
|
|
|
func TestExtendCanonicalBlocks(t *testing.T) { testExtendCanonical(t, true) }
|
|
|
|
|
|
|
|
func testExtendCanonical(t *testing.T, full bool) {
|
|
|
|
length := 5
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer processor.Stop()
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Define the difficulty comparator
|
|
|
|
better := func(td1, td2 *big.Int) {
|
2015-02-18 05:02:15 +02:00
|
|
|
if td2.Cmp(td1) <= 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Start fork from current height
|
|
|
|
testFork(t, processor, length, 1, full, better)
|
|
|
|
testFork(t, processor, length, 2, full, better)
|
|
|
|
testFork(t, processor, length, 5, full, better)
|
|
|
|
testFork(t, processor, length, 10, full, better)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, it can be extended
|
|
|
|
// with various length chains.
|
|
|
|
func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, false) }
|
|
|
|
func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, true) }
|
|
|
|
|
|
|
|
func testExtendCanonicalAfterMerge(t *testing.T, full bool) {
|
|
|
|
length := 5
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
|
|
|
}
|
|
|
|
defer processor.Stop()
|
|
|
|
|
|
|
|
testInsertAfterMerge(t, processor, length, 1, full)
|
|
|
|
testInsertAfterMerge(t, processor, length, 10, full)
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating shorter
|
|
|
|
// forks do not take canonical ownership.
|
|
|
|
func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
|
|
|
|
func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true) }
|
|
|
|
|
|
|
|
func testShorterFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer processor.Stop()
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Define the difficulty comparator
|
|
|
|
worse := func(td1, td2 *big.Int) {
|
2015-02-18 05:02:15 +02:00
|
|
|
if td2.Cmp(td1) >= 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Sum of numbers must be less than `length` for this to be a shorter fork
|
|
|
|
testFork(t, processor, 0, 3, full, worse)
|
|
|
|
testFork(t, processor, 0, 7, full, worse)
|
|
|
|
testFork(t, processor, 1, 1, full, worse)
|
|
|
|
testFork(t, processor, 1, 7, full, worse)
|
|
|
|
testFork(t, processor, 5, 3, full, worse)
|
|
|
|
testFork(t, processor, 5, 4, full, worse)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating shorter
|
|
|
|
// forks do not take canonical ownership.
|
|
|
|
func TestShorterForkHeadersAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, false) }
|
|
|
|
func TestShorterForkBlocksAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, true) }
|
|
|
|
|
|
|
|
func testShorterForkAfterMerge(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
|
|
|
}
|
|
|
|
defer processor.Stop()
|
|
|
|
|
|
|
|
testInsertAfterMerge(t, processor, 0, 3, full)
|
|
|
|
testInsertAfterMerge(t, processor, 0, 7, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 1, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 7, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 3, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 4, full)
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating longer
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
|
|
|
|
func TestLongerForkBlocks(t *testing.T) { testLongerFork(t, true) }
|
|
|
|
|
|
|
|
func testLongerFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer processor.Stop()
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
testInsertAfterMerge(t, processor, 0, 11, full)
|
|
|
|
testInsertAfterMerge(t, processor, 0, 15, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 10, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 12, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 6, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 8, full)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that given a starting canonical chain of a given size, creating longer
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestLongerForkHeadersAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, false) }
|
|
|
|
func TestLongerForkBlocksAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, true) }
|
|
|
|
|
|
|
|
func testLongerForkAfterMerge(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
defer processor.Stop()
|
|
|
|
|
|
|
|
testInsertAfterMerge(t, processor, 0, 11, full)
|
|
|
|
testInsertAfterMerge(t, processor, 0, 15, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 10, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 12, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 6, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 8, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating equal
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
|
|
|
|
func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true) }
|
|
|
|
|
|
|
|
func testEqualFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer processor.Stop()
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Define the difficulty comparator
|
|
|
|
equal := func(td1, td2 *big.Int) {
|
2015-02-18 05:02:15 +02:00
|
|
|
if td2.Cmp(td1) != 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Sum of numbers must be equal to `length` for this to be an equal fork
|
|
|
|
testFork(t, processor, 0, 10, full, equal)
|
|
|
|
testFork(t, processor, 1, 9, full, equal)
|
|
|
|
testFork(t, processor, 2, 8, full, equal)
|
|
|
|
testFork(t, processor, 5, 5, full, equal)
|
|
|
|
testFork(t, processor, 6, 4, full, equal)
|
|
|
|
testFork(t, processor, 9, 1, full, equal)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating equal
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestEqualForkHeadersAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, false) }
|
|
|
|
func TestEqualForkBlocksAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, true) }
|
|
|
|
|
|
|
|
func testEqualForkAfterMerge(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
|
|
|
}
|
|
|
|
defer processor.Stop()
|
|
|
|
|
|
|
|
testInsertAfterMerge(t, processor, 0, 10, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 9, full)
|
|
|
|
testInsertAfterMerge(t, processor, 2, 8, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 5, full)
|
|
|
|
testInsertAfterMerge(t, processor, 6, 4, full)
|
|
|
|
testInsertAfterMerge(t, processor, 9, 1, full)
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that chains missing links do not get accepted by the processor.
|
|
|
|
func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
|
|
|
|
func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) }
|
|
|
|
|
|
|
|
func testBrokenChain(t *testing.T, full bool) {
|
|
|
|
// Make chain starting from genesis
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create a forked chain, and try to insert with a missing link
|
|
|
|
if full {
|
2022-09-07 21:21:59 +03:00
|
|
|
chain := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
|
2015-10-19 17:08:17 +03:00
|
|
|
if err := testBlockChainImport(chain, blockchain); err == nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("broken block chain not reported")
|
|
|
|
}
|
|
|
|
} else {
|
2022-09-07 21:21:59 +03:00
|
|
|
chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
|
2015-10-19 17:08:17 +03:00
|
|
|
if err := testHeaderChainImport(chain, blockchain); err == nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("broken header chain not reported")
|
|
|
|
}
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:55:39 +02:00
|
|
|
// Tests that reorganising a long difficult chain after a short easy one
|
2015-09-21 15:36:29 +03:00
|
|
|
// overwrites the canonical numbers and links in the database.
|
|
|
|
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
|
|
|
|
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
|
2015-07-10 15:29:40 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
func testReorgLong(t *testing.T, full bool) {
|
2021-10-26 09:44:43 +03:00
|
|
|
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
2015-04-30 01:08:43 +03:00
|
|
|
|
2016-03-15 20:55:39 +02:00
|
|
|
// Tests that reorganising a short difficult chain after a long easy one
|
2015-09-21 15:36:29 +03:00
|
|
|
// overwrites the canonical numbers and links in the database.
|
|
|
|
func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
|
|
|
|
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
|
2015-04-29 13:43:24 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
func testReorgShort(t *testing.T, full bool) {
|
2018-02-23 15:02:33 +03:00
|
|
|
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
|
|
|
|
// we need a fairly long chain of blocks with different difficulties for a short
|
2022-06-10 18:47:06 +03:00
|
|
|
// one to become heavier than a long one. The 96 is an empirical value.
|
2018-02-23 15:02:33 +03:00
|
|
|
easy := make([]int64, 96)
|
|
|
|
for i := 0; i < len(easy); i++ {
|
|
|
|
easy[i] = 60
|
|
|
|
}
|
|
|
|
diff := make([]int64, len(easy)-1)
|
|
|
|
for i := 0; i < len(diff); i++ {
|
|
|
|
diff[i] = -9
|
|
|
|
}
|
2021-10-26 09:44:43 +03:00
|
|
|
testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
2015-04-29 13:43:24 +03:00
|
|
|
|
2018-02-23 15:02:33 +03:00
|
|
|
func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
|
|
|
|
// Create a pristine chain and database
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
2018-02-23 15:02:33 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
|
|
|
defer blockchain.Stop()
|
2015-09-21 15:36:29 +03:00
|
|
|
|
|
|
|
// Insert an easy and a difficult chain afterwards
|
2022-09-07 21:21:59 +03:00
|
|
|
easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), ethash.NewFaker(), genDb, len(first), func(i int, b *BlockGen) {
|
2018-02-23 15:02:33 +03:00
|
|
|
b.OffsetTime(first[i])
|
|
|
|
})
|
2022-09-07 21:21:59 +03:00
|
|
|
diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), ethash.NewFaker(), genDb, len(second), func(i int, b *BlockGen) {
|
2018-02-23 15:02:33 +03:00
|
|
|
b.OffsetTime(second[i])
|
|
|
|
})
|
2015-09-21 15:36:29 +03:00
|
|
|
if full {
|
2018-02-23 15:02:33 +03:00
|
|
|
if _, err := blockchain.InsertChain(easyBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert easy chain: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := blockchain.InsertChain(diffBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert difficult chain: %v", err)
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
} else {
|
2018-02-23 15:02:33 +03:00
|
|
|
easyHeaders := make([]*types.Header, len(easyBlocks))
|
|
|
|
for i, block := range easyBlocks {
|
|
|
|
easyHeaders[i] = block.Header()
|
|
|
|
}
|
|
|
|
diffHeaders := make([]*types.Header, len(diffBlocks))
|
|
|
|
for i, block := range diffBlocks {
|
|
|
|
diffHeaders[i] = block.Header()
|
|
|
|
}
|
|
|
|
if _, err := blockchain.InsertHeaderChain(easyHeaders, 1); err != nil {
|
|
|
|
t.Fatalf("failed to insert easy chain: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := blockchain.InsertHeaderChain(diffHeaders, 1); err != nil {
|
|
|
|
t.Fatalf("failed to insert difficult chain: %v", err)
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
// Check that the chain is valid number and link wise
|
|
|
|
if full {
|
2018-02-23 15:02:33 +03:00
|
|
|
prev := blockchain.CurrentBlock()
|
|
|
|
for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, blockchain.GetBlockByNumber(block.NumberU64()-1) {
|
2015-09-21 15:36:29 +03:00
|
|
|
if prev.ParentHash() != block.Hash() {
|
|
|
|
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2018-02-23 15:02:33 +03:00
|
|
|
prev := blockchain.CurrentHeader()
|
|
|
|
for header := blockchain.GetHeaderByNumber(blockchain.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, blockchain.GetHeaderByNumber(header.Number.Uint64()-1) {
|
2015-09-21 15:36:29 +03:00
|
|
|
if prev.ParentHash != header.Hash() {
|
|
|
|
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure the chain total difficulty is the correct one
|
2018-02-23 15:02:33 +03:00
|
|
|
want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
|
2015-09-21 15:36:29 +03:00
|
|
|
if full {
|
2021-10-12 00:16:46 +03:00
|
|
|
cur := blockchain.CurrentBlock()
|
|
|
|
if have := blockchain.GetTd(cur.Hash(), cur.NumberU64()); have.Cmp(want) != 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
|
|
|
}
|
|
|
|
} else {
|
2021-10-12 00:16:46 +03:00
|
|
|
cur := blockchain.CurrentHeader()
|
|
|
|
if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
2015-04-29 13:43:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-30 01:08:43 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that the insertion functions detect banned hashes.
|
|
|
|
func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
|
|
|
|
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
|
|
|
|
|
|
|
|
func testBadHashes(t *testing.T, full bool) {
|
2018-02-23 15:02:33 +03:00
|
|
|
// Create a pristine chain and database
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
2018-02-23 15:02:33 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
|
|
|
defer blockchain.Stop()
|
2015-09-14 17:56:33 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create a chain, ban a hash and try to import
|
|
|
|
if full {
|
2022-09-07 21:21:59 +03:00
|
|
|
blocks := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), 3, ethash.NewFaker(), genDb, 10)
|
2018-02-23 15:02:33 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
BadHashes[blocks[2].Header().Hash()] = true
|
2018-02-23 15:02:33 +03:00
|
|
|
defer func() { delete(BadHashes, blocks[2].Header().Hash()) }()
|
|
|
|
|
|
|
|
_, err = blockchain.InsertChain(blocks)
|
2015-09-21 15:36:29 +03:00
|
|
|
} else {
|
2022-09-07 21:21:59 +03:00
|
|
|
headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 3, ethash.NewFaker(), genDb, 10)
|
2018-02-23 15:02:33 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
BadHashes[headers[2].Hash()] = true
|
2018-02-23 15:02:33 +03:00
|
|
|
defer func() { delete(BadHashes, headers[2].Hash()) }()
|
|
|
|
|
|
|
|
_, err = blockchain.InsertHeaderChain(headers, 1)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
2021-07-29 11:17:40 +03:00
|
|
|
if !errors.Is(err, ErrBannedHash) {
|
|
|
|
t.Errorf("error mismatch: have: %v, want: %v", err, ErrBannedHash)
|
2015-09-14 17:56:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:08:18 +02:00
|
|
|
// Tests that bad hashes are detected on boot, and the chain rolled back to a
|
2015-09-21 15:36:29 +03:00
|
|
|
// good state prior to the bad hash.
|
|
|
|
func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
|
|
|
|
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
|
|
|
|
|
|
|
|
func testReorgBadHashes(t *testing.T, full bool) {
|
2018-02-23 15:02:33 +03:00
|
|
|
// Create a pristine chain and database
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
2018-02-23 15:02:33 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
2016-03-15 20:08:18 +02:00
|
|
|
// Create a chain, import and ban afterwards
|
2022-09-07 21:21:59 +03:00
|
|
|
headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 4, ethash.NewFaker(), genDb, 10)
|
|
|
|
blocks := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), 4, ethash.NewFaker(), genDb, 10)
|
2015-09-14 17:56:33 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
if full {
|
2018-02-23 15:02:33 +03:00
|
|
|
if _, err = blockchain.InsertChain(blocks); err != nil {
|
|
|
|
t.Errorf("failed to import blocks: %v", err)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
2018-02-23 15:02:33 +03:00
|
|
|
if blockchain.CurrentBlock().Hash() != blocks[3].Hash() {
|
|
|
|
t.Errorf("last block hash mismatch: have: %x, want %x", blockchain.CurrentBlock().Hash(), blocks[3].Header().Hash())
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
BadHashes[blocks[3].Header().Hash()] = true
|
|
|
|
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
|
|
|
|
} else {
|
2018-02-23 15:02:33 +03:00
|
|
|
if _, err = blockchain.InsertHeaderChain(headers, 1); err != nil {
|
|
|
|
t.Errorf("failed to import headers: %v", err)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
2018-02-23 15:02:33 +03:00
|
|
|
if blockchain.CurrentHeader().Hash() != headers[3].Hash() {
|
|
|
|
t.Errorf("last header hash mismatch: have: %x, want %x", blockchain.CurrentHeader().Hash(), headers[3].Hash())
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
BadHashes[headers[3].Hash()] = true
|
|
|
|
defer func() { delete(BadHashes, headers[3].Hash()) }()
|
2015-09-14 17:56:33 +03:00
|
|
|
}
|
2018-02-23 15:02:33 +03:00
|
|
|
blockchain.Stop()
|
2017-03-02 16:03:33 +03:00
|
|
|
|
|
|
|
// Create a new BlockChain and check that it rolled back the state.
|
2022-08-30 19:22:28 +03:00
|
|
|
ncm, err := NewBlockChain(blockchain.db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2015-07-10 15:29:40 +03:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to create new chain manager: %v", err)
|
2015-07-10 15:29:40 +03:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
if full {
|
|
|
|
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
|
|
|
|
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
|
|
|
|
}
|
2017-11-13 14:47:27 +03:00
|
|
|
if blocks[2].Header().GasLimit != ncm.GasLimit() {
|
|
|
|
t.Errorf("last block gasLimit mismatch: have: %d, want %d", ncm.GasLimit(), blocks[2].Header().GasLimit)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
} else {
|
2015-10-09 16:21:47 +03:00
|
|
|
if ncm.CurrentHeader().Hash() != headers[2].Hash() {
|
|
|
|
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
|
2015-04-30 01:08:43 +03:00
|
|
|
}
|
|
|
|
}
|
2018-02-23 15:02:33 +03:00
|
|
|
ncm.Stop()
|
2015-04-30 01:08:43 +03:00
|
|
|
}
|
2015-06-08 03:19:39 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
|
|
|
func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
|
|
|
|
func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true) }
|
|
|
|
|
|
|
|
func testInsertNonceError(t *testing.T, full bool) {
|
2015-06-08 03:19:39 +03:00
|
|
|
for i := 1; i < 25 && !t.Failed(); i++ {
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create a pristine chain and database
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
2015-07-10 15:29:40 +03:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
2015-07-10 15:29:40 +03:00
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create and insert a chain with a failing nonce
|
|
|
|
var (
|
2017-04-05 01:16:29 +03:00
|
|
|
failAt int
|
|
|
|
failRes int
|
|
|
|
failNum uint64
|
2015-09-21 15:36:29 +03:00
|
|
|
)
|
|
|
|
if full {
|
2022-09-07 21:21:59 +03:00
|
|
|
blocks := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), i, ethash.NewFaker(), genDb, 0)
|
2015-09-21 15:36:29 +03:00
|
|
|
|
|
|
|
failAt = rand.Int() % len(blocks)
|
|
|
|
failNum = blocks[failAt].NumberU64()
|
2015-10-07 12:14:30 +03:00
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
blockchain.engine = ethash.NewFakeFailer(failNum)
|
2015-10-19 17:08:17 +03:00
|
|
|
failRes, err = blockchain.InsertChain(blocks)
|
2015-09-21 15:36:29 +03:00
|
|
|
} else {
|
2022-09-07 21:21:59 +03:00
|
|
|
headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), i, ethash.NewFaker(), genDb, 0)
|
2015-06-08 03:19:39 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
failAt = rand.Int() % len(headers)
|
|
|
|
failNum = headers[failAt].Number.Uint64()
|
2015-10-07 12:14:30 +03:00
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
blockchain.engine = ethash.NewFakeFailer(failNum)
|
|
|
|
blockchain.hc.engine = blockchain.engine
|
2015-10-19 17:08:17 +03:00
|
|
|
failRes, err = blockchain.InsertHeaderChain(headers, 1)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
2018-11-20 15:15:26 +03:00
|
|
|
// Check that the returned error indicates the failure
|
2015-09-21 15:36:29 +03:00
|
|
|
if failRes != failAt {
|
2018-11-20 15:15:26 +03:00
|
|
|
t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
|
2015-06-08 03:19:39 +03:00
|
|
|
}
|
2018-11-20 15:15:26 +03:00
|
|
|
// Check that all blocks after the failing block have been inserted
|
2015-09-21 15:36:29 +03:00
|
|
|
for j := 0; j < i-failAt; j++ {
|
|
|
|
if full {
|
2015-10-19 17:08:17 +03:00
|
|
|
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("test %d: invalid block in chain: %v", i, block)
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-19 17:08:17 +03:00
|
|
|
if header := blockchain.GetHeaderByNumber(failNum + uint64(j)); header != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("test %d: invalid header in chain: %v", i, header)
|
|
|
|
}
|
2015-06-08 03:19:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
// Tests that fast importing a block chain produces the same chain data as the
|
|
|
|
// classical full block processing.
|
|
|
|
func TestFastVsFullChains(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
2018-05-09 15:24:25 +03:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2018-05-09 15:24:25 +03:00
|
|
|
gspec = &Genesis{
|
2021-06-15 13:56:14 +03:00
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2015-09-30 19:23:31 +03:00
|
|
|
)
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, func(i int, block *BlockGen) {
|
2015-09-30 19:23:31 +03:00
|
|
|
block.SetCoinbase(common.Address{0x00})
|
|
|
|
|
|
|
|
// If the block number is multiple of 3, send a few bonus transactions to the miner
|
|
|
|
if i%3 == 2 {
|
|
|
|
for j := 0; j < i%4+1; j++ {
|
2021-06-15 13:56:14 +03:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
|
2015-09-30 19:23:31 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
}
|
2022-08-09 01:20:46 +03:00
|
|
|
// If the block number is a multiple of 5, add an uncle to the block
|
|
|
|
if i%5 == 4 {
|
|
|
|
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i))})
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
|
|
|
})
|
|
|
|
// Import the chain as an archive node for the comparison baseline
|
2018-09-24 15:57:49 +03:00
|
|
|
archiveDb := rawdb.NewMemoryDatabase()
|
2022-08-30 19:22:28 +03:00
|
|
|
archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 15:47:25 +03:00
|
|
|
defer archive.Stop()
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
if n, err := archive.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("failed to process block %d: %v", n, err)
|
|
|
|
}
|
|
|
|
// Fast import the chain as a non-archive node to test
|
2018-09-24 15:57:49 +03:00
|
|
|
fastDb := rawdb.NewMemoryDatabase()
|
2022-08-30 19:22:28 +03:00
|
|
|
fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 15:47:25 +03:00
|
|
|
defer fast.Stop()
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
2015-10-07 12:14:30 +03:00
|
|
|
if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
|
2015-09-30 19:23:31 +03:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
|
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
|
|
|
// Freezer style fast import the chain.
|
2022-09-07 21:21:59 +03:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
defer ancientDb.Close()
|
2022-08-30 19:22:28 +03:00
|
|
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
defer ancient.Stop()
|
|
|
|
|
|
|
|
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
|
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
|
2015-09-30 19:23:31 +03:00
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
// Iterate over all chain data components, and cross reference
|
|
|
|
for i := 0; i < len(blocks); i++ {
|
|
|
|
num, hash := blocks[i].NumberU64(), blocks[i].Hash()
|
|
|
|
|
2021-10-12 00:16:46 +03:00
|
|
|
if ftd, atd := fast.GetTd(hash, num), archive.GetTd(hash, num); ftd.Cmp(atd) != 0 {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd)
|
|
|
|
}
|
2021-10-12 00:16:46 +03:00
|
|
|
if antd, artd := ancient.GetTd(hash, num), archive.GetTd(hash, num); antd.Cmp(artd) != 0 {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd)
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
2016-04-05 16:22:04 +03:00
|
|
|
if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
t.Errorf("block #%d [%x]: header mismatch: fastdb %v, archivedb %v", num, hash, fheader, aheader)
|
|
|
|
}
|
|
|
|
if anheader, arheader := ancient.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); anheader.Hash() != arheader.Hash() {
|
|
|
|
t.Errorf("block #%d [%x]: header mismatch: ancientdb %v, archivedb %v", num, hash, anheader, arheader)
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() {
|
|
|
|
t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock)
|
2021-02-02 15:09:23 +03:00
|
|
|
} else if types.DeriveSha(fblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) || types.DeriveSha(anblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions())
|
|
|
|
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
|
|
|
|
t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
|
|
|
|
// Check receipts.
|
|
|
|
freceipts := rawdb.ReadReceipts(fastDb, hash, num, fast.Config())
|
|
|
|
anreceipts := rawdb.ReadReceipts(ancientDb, hash, num, fast.Config())
|
|
|
|
areceipts := rawdb.ReadReceipts(archiveDb, hash, num, fast.Config())
|
|
|
|
if types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
|
|
|
|
// Check that hash-to-number mappings are present in all databases.
|
|
|
|
if m := rawdb.ReadHeaderNumber(fastDb, hash); m == nil || *m != num {
|
|
|
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in fastdb: %v", num, hash, m)
|
|
|
|
}
|
|
|
|
if m := rawdb.ReadHeaderNumber(ancientDb, hash); m == nil || *m != num {
|
|
|
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in ancientdb: %v", num, hash, m)
|
|
|
|
}
|
|
|
|
if m := rawdb.ReadHeaderNumber(archiveDb, hash); m == nil || *m != num {
|
|
|
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in archivedb: %v", num, hash, m)
|
|
|
|
}
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
// Check that the canonical chains are the same between the databases
|
|
|
|
for i := 0; i < len(blocks)+1; i++ {
|
2018-05-07 14:35:06 +03:00
|
|
|
if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
t.Errorf("block #%d: canonical hash mismatch: fastdb %v, archivedb %v", i, fhash, ahash)
|
|
|
|
}
|
|
|
|
if anhash, arhash := rawdb.ReadCanonicalHash(ancientDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); anhash != arhash {
|
|
|
|
t.Errorf("block #%d: canonical hash mismatch: ancientdb %v, archivedb %v", i, anhash, arhash)
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that various import methods move the chain head pointers to the correct
|
|
|
|
// positions.
|
|
|
|
func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
2018-05-09 15:24:25 +03:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(1000000000000000)
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2015-09-30 19:23:31 +03:00
|
|
|
)
|
|
|
|
height := uint64(1024)
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
2015-09-30 19:23:31 +03:00
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
// makeDb creates a db instance for testing.
|
2022-04-08 16:44:55 +03:00
|
|
|
makeDb := func() ethdb.Database {
|
2022-09-07 21:21:59 +03:00
|
|
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
return db
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
}
|
2015-10-09 16:21:47 +03:00
|
|
|
// Configure a subchain to roll back
|
2020-08-20 13:01:24 +03:00
|
|
|
remove := blocks[height/2].NumberU64()
|
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
// Create a small assertion method to check the three heads
|
|
|
|
assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
|
2020-08-20 13:01:24 +03:00
|
|
|
t.Helper()
|
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
if num := chain.CurrentBlock().NumberU64(); num != block {
|
|
|
|
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
|
|
|
|
}
|
|
|
|
if num := chain.CurrentFastBlock().NumberU64(); num != fast {
|
|
|
|
t.Errorf("%s head fast-block mismatch: have #%v, want #%v", kind, num, fast)
|
|
|
|
}
|
|
|
|
if num := chain.CurrentHeader().Number.Uint64(); num != header {
|
|
|
|
t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Import the chain as an archive node and ensure all pointers are updated
|
2022-04-08 16:44:55 +03:00
|
|
|
archiveDb := makeDb()
|
|
|
|
defer archiveDb.Close()
|
2020-08-20 13:01:24 +03:00
|
|
|
|
|
|
|
archiveCaching := *defaultCacheConfig
|
|
|
|
archiveCaching.TrieDirtyDisabled = true
|
|
|
|
|
2022-08-30 19:22:28 +03:00
|
|
|
archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2015-09-30 19:23:31 +03:00
|
|
|
if n, err := archive.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("failed to process block %d: %v", n, err)
|
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer archive.Stop()
|
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
assert(t, "archive", archive, height, height, height)
|
2020-08-20 13:01:24 +03:00
|
|
|
archive.SetHead(remove - 1)
|
2015-10-09 16:21:47 +03:00
|
|
|
assert(t, "archive", archive, height/2, height/2, height/2)
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
// Import the chain as a non-archive node and ensure all pointers are updated
|
2022-04-08 16:44:55 +03:00
|
|
|
fastDb := makeDb()
|
|
|
|
defer fastDb.Close()
|
2022-08-30 19:22:28 +03:00
|
|
|
fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 15:47:25 +03:00
|
|
|
defer fast.Stop()
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
2015-10-07 12:14:30 +03:00
|
|
|
if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
|
2015-09-30 19:23:31 +03:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
|
2015-09-30 19:23:31 +03:00
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
|
|
|
assert(t, "fast", fast, height, height, 0)
|
2020-08-20 13:01:24 +03:00
|
|
|
fast.SetHead(remove - 1)
|
2015-10-09 16:21:47 +03:00
|
|
|
assert(t, "fast", fast, height/2, height/2, 0)
|
2015-09-30 19:23:31 +03:00
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
// Import the chain as a ancient-first node and ensure all pointers are updated
|
2022-04-08 16:44:55 +03:00
|
|
|
ancientDb := makeDb()
|
|
|
|
defer ancientDb.Close()
|
2022-08-30 19:22:28 +03:00
|
|
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
defer ancient.Stop()
|
|
|
|
|
|
|
|
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
|
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
|
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
|
|
|
assert(t, "ancient", ancient, height, height, 0)
|
2020-08-20 13:01:24 +03:00
|
|
|
ancient.SetHead(remove - 1)
|
|
|
|
assert(t, "ancient", ancient, 0, 0, 0)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
|
2020-08-20 13:01:24 +03:00
|
|
|
if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
|
|
|
|
t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
|
|
|
|
}
|
2015-09-30 19:23:31 +03:00
|
|
|
// Import the chain as a light node and ensure all pointers are updated
|
2022-04-08 16:44:55 +03:00
|
|
|
lightDb := makeDb()
|
|
|
|
defer lightDb.Close()
|
2022-08-30 19:22:28 +03:00
|
|
|
light, _ := NewBlockChain(lightDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2015-10-07 12:14:30 +03:00
|
|
|
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
2015-09-30 19:23:31 +03:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer light.Stop()
|
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
assert(t, "light", light, height, 0, 0)
|
2020-08-20 13:01:24 +03:00
|
|
|
light.SetHead(remove - 1)
|
2015-10-09 16:21:47 +03:00
|
|
|
assert(t, "light", light, height/2, 0, 0)
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
|
|
|
|
2016-03-15 20:55:39 +02:00
|
|
|
// Tests that chain reorganisations handle transaction removals and reinsertions.
|
2015-08-17 15:01:41 +03:00
|
|
|
func TestChainTxReorgs(t *testing.T) {
|
|
|
|
var (
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
|
|
key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
|
|
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
2017-03-02 16:03:33 +03:00
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
GasLimit: 3141592,
|
|
|
|
Alloc: GenesisAlloc{
|
2021-06-15 13:56:14 +03:00
|
|
|
addr1: {Balance: big.NewInt(1000000000000000)},
|
|
|
|
addr2: {Balance: big.NewInt(1000000000000000)},
|
|
|
|
addr3: {Balance: big.NewInt(1000000000000000)},
|
2017-03-02 16:03:33 +03:00
|
|
|
},
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2015-08-17 15:01:41 +03:00
|
|
|
)
|
2017-03-02 16:03:33 +03:00
|
|
|
|
2015-08-17 15:01:41 +03:00
|
|
|
// Create two transactions shared between the chains:
|
|
|
|
// - postponed: transaction included at a later block in the forked chain
|
|
|
|
// - swapped: transaction included at the same block number in the forked chain
|
2021-06-15 13:56:14 +03:00
|
|
|
postponed, _ := types.SignTx(types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1)
|
|
|
|
swapped, _ := types.SignTx(types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1)
|
2015-08-17 15:01:41 +03:00
|
|
|
|
|
|
|
// Create two transactions that will be dropped by the forked chain:
|
|
|
|
// - pastDrop: transaction dropped retroactively from a past block
|
|
|
|
// - freshDrop: transaction dropped exactly at the block where the reorg is detected
|
|
|
|
var pastDrop, freshDrop *types.Transaction
|
|
|
|
|
|
|
|
// Create three transactions that will be added in the forked chain:
|
2016-03-15 20:08:18 +02:00
|
|
|
// - pastAdd: transaction added before the reorganization is detected
|
2015-08-17 15:01:41 +03:00
|
|
|
// - freshAdd: transaction added at the exact block the reorg is detected
|
|
|
|
// - futureAdd: transaction added after the reorg has already finished
|
|
|
|
var pastAdd, freshAdd, futureAdd *types.Transaction
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {
|
2015-08-17 15:01:41 +03:00
|
|
|
switch i {
|
|
|
|
case 0:
|
2021-06-15 13:56:14 +03:00
|
|
|
pastDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2)
|
2015-08-17 15:01:41 +03:00
|
|
|
|
|
|
|
gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point
|
|
|
|
gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
|
|
|
|
|
|
|
|
case 2:
|
2021-06-15 13:56:14 +03:00
|
|
|
freshDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2)
|
2015-08-17 15:01:41 +03:00
|
|
|
|
|
|
|
gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
|
|
|
|
gen.AddTx(swapped) // This transaction will be swapped out at the exact height
|
|
|
|
|
|
|
|
gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain
|
|
|
|
}
|
|
|
|
})
|
|
|
|
// Import the chain. This runs all block validation rules.
|
2022-09-07 21:21:59 +03:00
|
|
|
db := rawdb.NewMemoryDatabase()
|
2022-08-30 19:22:28 +03:00
|
|
|
blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2015-10-19 17:08:17 +03:00
|
|
|
if i, err := blockchain.InsertChain(chain); err != nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
|
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
defer blockchain.Stop()
|
2015-08-17 15:01:41 +03:00
|
|
|
|
|
|
|
// overwrite the old chain
|
2022-09-07 21:21:59 +03:00
|
|
|
_, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 5, func(i int, gen *BlockGen) {
|
2015-08-17 15:01:41 +03:00
|
|
|
switch i {
|
|
|
|
case 0:
|
2021-06-15 13:56:14 +03:00
|
|
|
pastAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
|
2015-08-17 15:01:41 +03:00
|
|
|
gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
|
|
|
|
gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain
|
|
|
|
|
2021-06-15 13:56:14 +03:00
|
|
|
freshAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
|
2015-08-17 15:01:41 +03:00
|
|
|
gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
|
|
|
|
|
|
|
|
case 3:
|
2021-06-15 13:56:14 +03:00
|
|
|
futureAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
|
2015-08-17 15:01:41 +03:00
|
|
|
gen.AddTx(futureAdd) // This transaction will be added after a full reorg
|
|
|
|
}
|
|
|
|
})
|
2015-10-19 17:08:17 +03:00
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
2015-06-08 13:12:13 +03:00
|
|
|
}
|
2015-08-17 15:01:41 +03:00
|
|
|
|
|
|
|
// removed tx
|
|
|
|
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
|
2018-05-07 14:35:06 +03:00
|
|
|
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn != nil {
|
2015-10-22 15:43:21 +03:00
|
|
|
t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn)
|
2015-08-17 15:01:41 +03:00
|
|
|
}
|
2019-04-15 12:36:27 +03:00
|
|
|
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt != nil {
|
2017-07-14 19:39:53 +03:00
|
|
|
t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt)
|
2015-08-17 15:01:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// added tx
|
|
|
|
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
|
2018-05-07 14:35:06 +03:00
|
|
|
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Errorf("add %d: expected tx to be found", i)
|
|
|
|
}
|
2019-04-15 12:36:27 +03:00
|
|
|
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Errorf("add %d: expected receipt to be found", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// shared tx
|
|
|
|
for i, tx := range (types.Transactions{postponed, swapped}) {
|
2018-05-07 14:35:06 +03:00
|
|
|
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Errorf("share %d: expected tx to be found", i)
|
|
|
|
}
|
2019-04-15 12:36:27 +03:00
|
|
|
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Errorf("share %d: expected receipt to be found", i)
|
|
|
|
}
|
2015-06-08 13:12:13 +03:00
|
|
|
}
|
|
|
|
}
|
2015-12-01 01:11:24 +02:00
|
|
|
|
|
|
|
func TestLogReorgs(t *testing.T) {
|
|
|
|
var (
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
2022-09-07 21:21:59 +03:00
|
|
|
|
2015-12-01 01:11:24 +02:00
|
|
|
// this code generates a log
|
2022-09-07 21:21:59 +03:00
|
|
|
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2015-12-01 01:11:24 +02:00
|
|
|
)
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 15:47:25 +03:00
|
|
|
defer blockchain.Stop()
|
2015-12-01 01:11:24 +02:00
|
|
|
|
2017-08-18 13:58:36 +03:00
|
|
|
rmLogsCh := make(chan RemovedLogsEvent)
|
|
|
|
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
|
2022-09-07 21:21:59 +03:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
|
2015-12-01 01:11:24 +02:00
|
|
|
if i == 1 {
|
2021-06-15 13:56:14 +03:00
|
|
|
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, code), signer, key1)
|
2015-12-01 01:11:24 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
|
2019-11-29 16:22:08 +03:00
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
ev := <-rmLogsCh
|
|
|
|
if len(ev.Logs) == 0 {
|
|
|
|
t.Error("expected logs")
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
}()
|
2015-12-01 01:11:24 +02:00
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2017-08-18 13:58:36 +03:00
|
|
|
timeout := time.NewTimer(1 * time.Second)
|
2020-04-02 17:04:45 +03:00
|
|
|
defer timeout.Stop()
|
2017-08-18 13:58:36 +03:00
|
|
|
select {
|
2019-11-29 16:22:08 +03:00
|
|
|
case <-done:
|
2017-08-18 13:58:36 +03:00
|
|
|
case <-timeout.C:
|
|
|
|
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
2015-12-01 01:11:24 +02:00
|
|
|
}
|
|
|
|
}
|
2016-03-07 19:11:52 +02:00
|
|
|
|
2020-04-28 11:06:49 +03:00
|
|
|
// This EVM code generates a log when the contract is created.
|
|
|
|
var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
|
|
|
|
|
|
|
// This test checks that log events and RemovedLogsEvent are sent
|
|
|
|
// when the chain reorganizes.
|
2018-12-17 10:23:54 +03:00
|
|
|
func TestLogRebirth(t *testing.T) {
|
|
|
|
var (
|
2020-04-28 11:06:49 +03:00
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
2021-02-25 17:26:57 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2020-04-28 11:06:49 +03:00
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2018-12-17 10:23:54 +03:00
|
|
|
)
|
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2020-04-28 11:06:49 +03:00
|
|
|
// The event channels.
|
|
|
|
newLogCh := make(chan []*types.Log, 10)
|
|
|
|
rmLogsCh := make(chan RemovedLogsEvent, 10)
|
|
|
|
blockchain.SubscribeLogsEvent(newLogCh)
|
2018-12-17 10:23:54 +03:00
|
|
|
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
|
|
|
|
|
2022-09-09 16:25:55 +03:00
|
|
|
// This chain contains 10 logs.
|
|
|
|
genDb, chain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
|
|
|
|
if i < 2 {
|
|
|
|
for ii := 0; ii < 5; ii++ {
|
|
|
|
tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{
|
|
|
|
Nonce: gen.TxNonce(addr1),
|
|
|
|
GasPrice: gen.header.BaseFee,
|
|
|
|
Gas: uint64(1000001),
|
|
|
|
Data: logCode,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
2018-12-17 10:23:54 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
2022-09-09 16:25:55 +03:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 10, 0)
|
|
|
|
|
|
|
|
// Generate long reorg chain containing more logs. Inserting the
|
|
|
|
// chain removes one log and adds four.
|
|
|
|
_, forkChain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
|
|
|
|
if i == 2 {
|
|
|
|
// The last (head) block is not part of the reorg-chain, we can ignore it
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for ii := 0; ii < 5; ii++ {
|
|
|
|
tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{
|
|
|
|
Nonce: gen.TxNonce(addr1),
|
|
|
|
GasPrice: gen.header.BaseFee,
|
|
|
|
Gas: uint64(1000000),
|
|
|
|
Data: logCode,
|
|
|
|
})
|
2018-12-17 10:23:54 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
}
|
2022-09-09 16:25:55 +03:00
|
|
|
gen.OffsetTime(-9) // higher block difficulty
|
2018-12-17 10:23:54 +03:00
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(forkChain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2022-09-09 16:25:55 +03:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 10, 10)
|
2018-12-17 10:23:54 +03:00
|
|
|
|
2020-04-28 11:06:49 +03:00
|
|
|
// This chain segment is rooted in the original chain, but doesn't contain any logs.
|
|
|
|
// When inserting it, the canonical chain switches away from forkChain and re-emits
|
|
|
|
// the log event for the old chain, as well as a RemovedLogsEvent for forkChain.
|
2022-09-07 21:21:59 +03:00
|
|
|
newBlocks, _ := GenerateChain(gspec.Config, chain[len(chain)-1], engine, genDb, 1, func(i int, gen *BlockGen) {})
|
2018-12-17 10:23:54 +03:00
|
|
|
if _, err := blockchain.InsertChain(newBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2022-09-09 16:25:55 +03:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 10, 10)
|
2018-12-17 10:23:54 +03:00
|
|
|
}
|
|
|
|
|
2020-04-28 11:06:49 +03:00
|
|
|
// This test is a variation of TestLogRebirth. It verifies that log events are emitted
|
|
|
|
// when a side chain containing log events overtakes the canonical chain.
|
2018-12-17 10:23:54 +03:00
|
|
|
func TestSideLogRebirth(t *testing.T) {
|
|
|
|
var (
|
2020-04-28 11:06:49 +03:00
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
2021-02-25 17:26:57 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2022-09-07 21:21:59 +03:00
|
|
|
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2018-12-17 10:23:54 +03:00
|
|
|
)
|
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2020-04-28 11:06:49 +03:00
|
|
|
newLogCh := make(chan []*types.Log, 10)
|
|
|
|
rmLogsCh := make(chan RemovedLogsEvent, 10)
|
|
|
|
blockchain.SubscribeLogsEvent(newLogCh)
|
|
|
|
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
|
2018-12-17 10:23:54 +03:00
|
|
|
if i == 1 {
|
2020-04-28 11:06:49 +03:00
|
|
|
gen.OffsetTime(-9) // higher block difficulty
|
2018-12-17 10:23:54 +03:00
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2020-04-28 11:06:49 +03:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
|
2018-12-17 10:23:54 +03:00
|
|
|
|
|
|
|
// Generate side chain with lower difficulty
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, sideChain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
|
2018-12-17 10:23:54 +03:00
|
|
|
if i == 1 {
|
2021-06-15 13:56:14 +03:00
|
|
|
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, logCode), signer, key1)
|
2018-12-17 10:23:54 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(sideChain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2020-04-28 11:06:49 +03:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
|
2018-12-17 10:23:54 +03:00
|
|
|
|
2020-04-28 11:06:49 +03:00
|
|
|
// Generate a new block based on side chain.
|
2022-09-07 21:21:59 +03:00
|
|
|
newBlocks, _ := GenerateChain(gspec.Config, sideChain[len(sideChain)-1], ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
|
2018-12-17 10:23:54 +03:00
|
|
|
if _, err := blockchain.InsertChain(newBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2020-04-28 11:06:49 +03:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) {
|
|
|
|
t.Helper()
|
2022-09-09 16:25:55 +03:00
|
|
|
var (
|
|
|
|
countNew int
|
|
|
|
countRm int
|
|
|
|
prev int
|
|
|
|
)
|
2020-04-28 11:06:49 +03:00
|
|
|
// Drain events.
|
2022-09-09 16:25:55 +03:00
|
|
|
for len(logsCh) > 0 {
|
|
|
|
x := <-logsCh
|
|
|
|
countNew += len(x)
|
|
|
|
for _, log := range x {
|
|
|
|
// We expect added logs to be in ascending order: 0:0, 0:1, 1:0 ...
|
|
|
|
have := 100*int(log.BlockNumber) + int(log.TxIndex)
|
|
|
|
if have < prev {
|
|
|
|
t.Fatalf("Expected new logs to arrive in ascending order (%d < %d)", have, prev)
|
|
|
|
}
|
|
|
|
prev = have
|
|
|
|
}
|
|
|
|
}
|
|
|
|
prev = 0
|
|
|
|
for len(rmLogsCh) > 0 {
|
|
|
|
x := <-rmLogsCh
|
|
|
|
countRm += len(x.Logs)
|
|
|
|
for _, log := range x.Logs {
|
|
|
|
// We expect removed logs to be in ascending order: 0:0, 0:1, 1:0 ...
|
|
|
|
have := 100*int(log.BlockNumber) + int(log.TxIndex)
|
|
|
|
if have < prev {
|
|
|
|
t.Fatalf("Expected removed logs to arrive in ascending order (%d < %d)", have, prev)
|
|
|
|
}
|
|
|
|
prev = have
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if countNew != wantNew {
|
|
|
|
t.Fatalf("wrong number of log events: got %d, want %d", countNew, wantNew)
|
2020-04-28 11:06:49 +03:00
|
|
|
}
|
2022-09-09 16:25:55 +03:00
|
|
|
if countRm != wantRemoved {
|
|
|
|
t.Fatalf("wrong number of removed log events: got %d, want %d", countRm, wantRemoved)
|
2018-12-17 10:23:54 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-07 19:11:52 +02:00
|
|
|
func TestReorgSideEvent(t *testing.T) {
|
|
|
|
var (
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
2017-04-05 01:16:29 +03:00
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
2021-06-15 13:56:14 +03:00
|
|
|
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2016-03-07 19:11:52 +02:00
|
|
|
)
|
2022-09-07 21:21:59 +03:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 15:47:25 +03:00
|
|
|
defer blockchain.Stop()
|
2016-03-07 19:11:52 +02:00
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
|
2016-03-07 19:11:52 +02:00
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, replacementBlocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, gen *BlockGen) {
|
2021-06-15 13:56:14 +03:00
|
|
|
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
|
2016-03-31 18:43:41 +03:00
|
|
|
if i == 2 {
|
2017-09-14 10:59:05 +03:00
|
|
|
gen.OffsetTime(-9)
|
2016-03-31 18:43:41 +03:00
|
|
|
}
|
2016-03-07 19:11:52 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
})
|
2017-09-11 13:13:05 +03:00
|
|
|
chainSideCh := make(chan ChainSideEvent, 64)
|
2017-08-18 13:58:36 +03:00
|
|
|
blockchain.SubscribeChainSideEvent(chainSideCh)
|
2016-03-07 19:11:52 +02:00
|
|
|
if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// first two block of the secondary chain are for a brief moment considered
|
|
|
|
// side chains because up to that point the first one is considered the
|
|
|
|
// heavier chain.
|
|
|
|
expectedSideHashes := map[common.Hash]bool{
|
|
|
|
replacementBlocks[0].Hash(): true,
|
|
|
|
replacementBlocks[1].Hash(): true,
|
|
|
|
chain[0].Hash(): true,
|
|
|
|
chain[1].Hash(): true,
|
|
|
|
chain[2].Hash(): true,
|
|
|
|
}
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
const timeoutDura = 10 * time.Second
|
|
|
|
timeout := time.NewTimer(timeoutDura)
|
|
|
|
done:
|
|
|
|
for {
|
|
|
|
select {
|
2017-08-18 13:58:36 +03:00
|
|
|
case ev := <-chainSideCh:
|
|
|
|
block := ev.Block
|
2016-03-07 19:11:52 +02:00
|
|
|
if _, ok := expectedSideHashes[block.Hash()]; !ok {
|
|
|
|
t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
|
|
|
|
if i == len(expectedSideHashes) {
|
|
|
|
timeout.Stop()
|
|
|
|
|
|
|
|
break done
|
|
|
|
}
|
|
|
|
timeout.Reset(timeoutDura)
|
|
|
|
|
|
|
|
case <-timeout.C:
|
|
|
|
t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure no more events are fired
|
|
|
|
select {
|
2017-08-18 13:58:36 +03:00
|
|
|
case e := <-chainSideCh:
|
2016-03-15 20:08:18 +02:00
|
|
|
t.Errorf("unexpected event fired: %v", e)
|
2016-03-07 19:11:52 +02:00
|
|
|
case <-time.After(250 * time.Millisecond):
|
|
|
|
}
|
|
|
|
}
|
2016-07-26 17:37:04 +03:00
|
|
|
|
|
|
|
// Tests if the canonical block can be fetched from the database during chain insertion.
|
|
|
|
func TestCanonicalBlockRetrieval(t *testing.T) {
|
2022-09-07 21:21:59 +03:00
|
|
|
_, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
|
2018-02-23 15:02:33 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
|
|
|
defer blockchain.Stop()
|
2017-08-07 15:47:25 +03:00
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 10, func(i int, gen *BlockGen) {})
|
2016-07-26 17:37:04 +03:00
|
|
|
|
2017-08-07 15:47:25 +03:00
|
|
|
var pend sync.WaitGroup
|
|
|
|
pend.Add(len(chain))
|
|
|
|
|
2017-01-06 17:52:03 +03:00
|
|
|
for i := range chain {
|
2016-07-26 17:37:04 +03:00
|
|
|
go func(block *types.Block) {
|
2017-08-07 15:47:25 +03:00
|
|
|
defer pend.Done()
|
|
|
|
|
2016-07-26 17:37:04 +03:00
|
|
|
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
|
|
|
|
for {
|
2018-05-07 14:35:06 +03:00
|
|
|
ch := rawdb.ReadCanonicalHash(blockchain.db, block.NumberU64())
|
2016-07-26 17:37:04 +03:00
|
|
|
if ch == (common.Hash{}) {
|
|
|
|
continue // busy wait for canonical hash to be written
|
|
|
|
}
|
|
|
|
if ch != block.Hash() {
|
2019-11-20 11:53:01 +03:00
|
|
|
t.Errorf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
|
|
|
|
return
|
2016-07-26 17:37:04 +03:00
|
|
|
}
|
2018-05-07 14:35:06 +03:00
|
|
|
fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64())
|
2016-07-26 17:37:04 +03:00
|
|
|
if fb == nil {
|
2019-11-20 11:53:01 +03:00
|
|
|
t.Errorf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
|
|
|
|
return
|
2016-07-26 17:37:04 +03:00
|
|
|
}
|
|
|
|
if fb.Hash() != block.Hash() {
|
2019-11-20 11:53:01 +03:00
|
|
|
t.Errorf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex())
|
|
|
|
return
|
2016-07-26 17:37:04 +03:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}(chain[i])
|
|
|
|
|
2018-02-23 15:02:33 +03:00
|
|
|
if _, err := blockchain.InsertChain(types.Blocks{chain[i]}); err != nil {
|
2017-08-07 15:47:25 +03:00
|
|
|
t.Fatalf("failed to insert block %d: %v", i, err)
|
|
|
|
}
|
2016-07-26 17:37:04 +03:00
|
|
|
}
|
2017-08-07 15:47:25 +03:00
|
|
|
pend.Wait()
|
2016-07-26 17:37:04 +03:00
|
|
|
}
|
2016-11-02 15:44:13 +03:00
|
|
|
|
|
|
|
func TestEIP155Transition(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
2016-11-28 03:33:28 +03:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(1000000000)
|
|
|
|
deleteAddr = common.Address{1}
|
2017-03-02 16:03:33 +03:00
|
|
|
gspec = &Genesis{
|
2022-09-07 21:21:59 +03:00
|
|
|
Config: ¶ms.ChainConfig{
|
|
|
|
ChainID: big.NewInt(1),
|
|
|
|
EIP150Block: big.NewInt(0),
|
|
|
|
EIP155Block: big.NewInt(2),
|
|
|
|
HomesteadBlock: new(big.Int),
|
|
|
|
},
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
|
2017-03-02 16:03:33 +03:00
|
|
|
}
|
2016-11-02 15:44:13 +03:00
|
|
|
)
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, block *BlockGen) {
|
2016-11-02 15:44:13 +03:00
|
|
|
var (
|
|
|
|
tx *types.Transaction
|
|
|
|
err error
|
|
|
|
basicTx = func(signer types.Signer) (*types.Transaction, error) {
|
2017-11-13 14:47:27 +03:00
|
|
|
return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-02 15:44:13 +03:00
|
|
|
}
|
|
|
|
)
|
|
|
|
switch i {
|
|
|
|
case 0:
|
|
|
|
tx, err = basicTx(types.HomesteadSigner{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
case 2:
|
|
|
|
tx, err = basicTx(types.HomesteadSigner{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
|
2021-02-25 17:26:57 +03:00
|
|
|
tx, err = basicTx(types.LatestSigner(gspec.Config))
|
2016-11-02 15:44:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
case 3:
|
|
|
|
tx, err = basicTx(types.HomesteadSigner{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
|
2021-02-25 17:26:57 +03:00
|
|
|
tx, err = basicTx(types.LatestSigner(gspec.Config))
|
2016-11-02 15:44:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2016-11-02 15:44:13 +03:00
|
|
|
if _, err := blockchain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block := blockchain.GetBlockByNumber(1)
|
|
|
|
if block.Transactions()[0].Protected() {
|
|
|
|
t.Error("Expected block[0].txs[0] to not be replay protected")
|
|
|
|
}
|
|
|
|
|
|
|
|
block = blockchain.GetBlockByNumber(3)
|
|
|
|
if block.Transactions()[0].Protected() {
|
|
|
|
t.Error("Expected block[3].txs[0] to not be replay protected")
|
|
|
|
}
|
|
|
|
if !block.Transactions()[1].Protected() {
|
|
|
|
t.Error("Expected block[3].txs[1] to be replay protected")
|
|
|
|
}
|
|
|
|
if _, err := blockchain.InsertChain(blocks[4:]); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// generate an invalid chain id transaction
|
2022-09-07 21:21:59 +03:00
|
|
|
config := ¶ms.ChainConfig{
|
|
|
|
ChainID: big.NewInt(2),
|
|
|
|
EIP150Block: big.NewInt(0),
|
|
|
|
EIP155Block: big.NewInt(2),
|
|
|
|
HomesteadBlock: new(big.Int),
|
|
|
|
}
|
|
|
|
blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), genDb, 4, func(i int, block *BlockGen) {
|
2016-11-02 15:44:13 +03:00
|
|
|
var (
|
|
|
|
tx *types.Transaction
|
|
|
|
err error
|
|
|
|
basicTx = func(signer types.Signer) (*types.Transaction, error) {
|
2017-11-13 14:47:27 +03:00
|
|
|
return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-02 15:44:13 +03:00
|
|
|
}
|
|
|
|
)
|
2018-07-30 12:30:09 +03:00
|
|
|
if i == 0 {
|
2021-02-25 17:26:57 +03:00
|
|
|
tx, err = basicTx(types.LatestSigner(config))
|
2016-11-02 15:44:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
_, err := blockchain.InsertChain(blocks)
|
2021-05-18 02:10:28 +03:00
|
|
|
if have, want := err, types.ErrInvalidChainId; !errors.Is(have, want) {
|
|
|
|
t.Errorf("have %v, want %v", have, want)
|
2016-11-02 15:44:13 +03:00
|
|
|
}
|
|
|
|
}
|
2016-11-28 03:33:28 +03:00
|
|
|
|
|
|
|
func TestEIP161AccountRemoval(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(1000000000)
|
|
|
|
theAddr = common.Address{1}
|
2017-03-02 16:03:33 +03:00
|
|
|
gspec = &Genesis{
|
|
|
|
Config: ¶ms.ChainConfig{
|
2018-06-05 13:31:34 +03:00
|
|
|
ChainID: big.NewInt(1),
|
2017-03-02 16:03:33 +03:00
|
|
|
HomesteadBlock: new(big.Int),
|
|
|
|
EIP155Block: new(big.Int),
|
2019-10-16 14:23:14 +03:00
|
|
|
EIP150Block: new(big.Int),
|
2017-03-02 16:03:33 +03:00
|
|
|
EIP158Block: big.NewInt(2),
|
|
|
|
},
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
}
|
2016-11-28 03:33:28 +03:00
|
|
|
)
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, block *BlockGen) {
|
2016-11-28 03:33:28 +03:00
|
|
|
var (
|
|
|
|
tx *types.Transaction
|
|
|
|
err error
|
2021-02-25 17:26:57 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2016-11-28 03:33:28 +03:00
|
|
|
)
|
|
|
|
switch i {
|
|
|
|
case 0:
|
2017-11-13 14:47:27 +03:00
|
|
|
tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-28 03:33:28 +03:00
|
|
|
case 1:
|
2017-11-13 14:47:27 +03:00
|
|
|
tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-28 03:33:28 +03:00
|
|
|
case 2:
|
2017-11-13 14:47:27 +03:00
|
|
|
tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-28 03:33:28 +03:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
})
|
|
|
|
// account must exist pre eip 161
|
2022-09-07 21:21:59 +03:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2016-11-28 03:33:28 +03:00
|
|
|
if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-06-27 16:57:06 +03:00
|
|
|
if st, _ := blockchain.State(); !st.Exist(theAddr) {
|
2016-11-28 03:33:28 +03:00
|
|
|
t.Error("expected account to exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
// account needs to be deleted post eip 161
|
|
|
|
if _, err := blockchain.InsertChain(types.Blocks{blocks[1]}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-06-27 16:57:06 +03:00
|
|
|
if st, _ := blockchain.State(); st.Exist(theAddr) {
|
2017-03-02 16:03:33 +03:00
|
|
|
t.Error("account should not exist")
|
2016-11-28 03:33:28 +03:00
|
|
|
}
|
|
|
|
|
2020-05-25 11:21:28 +03:00
|
|
|
// account mustn't be created post eip 161
|
2016-11-28 03:33:28 +03:00
|
|
|
if _, err := blockchain.InsertChain(types.Blocks{blocks[2]}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-06-27 16:57:06 +03:00
|
|
|
if st, _ := blockchain.State(); st.Exist(theAddr) {
|
2017-03-02 16:03:33 +03:00
|
|
|
t.Error("account should not exist")
|
2016-11-28 03:33:28 +03:00
|
|
|
}
|
|
|
|
}
|
2018-01-22 15:07:47 +03:00
|
|
|
|
|
|
|
// This is a regression test (i.e. as weird as it is, don't delete it ever), which
|
|
|
|
// tests that under weird reorg conditions the blockchain and its internal header-
|
|
|
|
// chain return the same latest block/header.
|
|
|
|
//
|
|
|
|
// https://github.com/ethereum/go-ethereum/pull/15941
|
|
|
|
func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
|
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis := &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
Config: params.TestChainConfig,
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
2018-01-22 15:07:47 +03:00
|
|
|
|
|
|
|
// Generate a bunch of fork blocks, each side forking from the canonical chain
|
|
|
|
forks := make([]*types.Block, len(blocks))
|
|
|
|
for i := 0; i < len(forks); i++ {
|
2022-09-07 21:21:59 +03:00
|
|
|
parent := genesis.ToBlock()
|
2018-01-22 15:07:47 +03:00
|
|
|
if i > 0 {
|
|
|
|
parent = blocks[i-1]
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
|
2018-01-22 15:07:47 +03:00
|
|
|
forks[i] = fork[0]
|
|
|
|
}
|
|
|
|
// Import the canonical and fork chain side by side, verifying the current block
|
|
|
|
// and current header consistency
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2018-01-22 15:07:47 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
for i := 0; i < len(blocks); i++ {
|
|
|
|
if _, err := chain.InsertChain(blocks[i : i+1]); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
|
2021-04-15 20:35:00 +03:00
|
|
|
t.Errorf("block %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number(), chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
|
2018-01-22 15:07:47 +03:00
|
|
|
}
|
|
|
|
if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
|
|
|
|
t.Fatalf(" fork %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
|
2021-04-15 20:35:00 +03:00
|
|
|
t.Errorf(" fork %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number(), chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
|
2018-01-22 15:07:47 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
|
|
|
|
// Tests that importing small side forks doesn't leave junk in the trie database
|
|
|
|
// cache (which would eventually cause memory issues).
|
|
|
|
func TestTrieForkGC(t *testing.T) {
|
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis := &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 21:21:59 +03:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 19:22:28 +03:00
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
2018-02-05 19:40:32 +03:00
|
|
|
|
|
|
|
// Generate a bunch of fork blocks, each side forking from the canonical chain
|
|
|
|
forks := make([]*types.Block, len(blocks))
|
|
|
|
for i := 0; i < len(forks); i++ {
|
2022-09-07 21:21:59 +03:00
|
|
|
parent := genesis.ToBlock()
|
2018-02-05 19:40:32 +03:00
|
|
|
if i > 0 {
|
|
|
|
parent = blocks[i-1]
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
|
2018-02-05 19:40:32 +03:00
|
|
|
forks[i] = fork[0]
|
|
|
|
}
|
|
|
|
// Import the canonical and fork chain side by side, forcing the trie cache to cache both
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2018-02-05 19:40:32 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
for i := 0; i < len(blocks); i++ {
|
|
|
|
if _, err := chain.InsertChain(blocks[i : i+1]); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
|
|
|
|
t.Fatalf("fork %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Dereference all the recent tries and ensure no past trie is left in
|
2019-05-30 21:51:13 +03:00
|
|
|
for i := 0; i < TriesInMemory; i++ {
|
2018-06-21 12:28:05 +03:00
|
|
|
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
|
|
|
|
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
|
2018-02-05 19:40:32 +03:00
|
|
|
}
|
|
|
|
if len(chain.stateCache.TrieDB().Nodes()) > 0 {
|
|
|
|
t.Fatalf("stale tries still alive after garbase collection")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that doing large reorgs works even if the state associated with the
|
|
|
|
// forking point is not available any more.
|
|
|
|
func TestLargeReorgTrieGC(t *testing.T) {
|
|
|
|
// Generate the original common chain segment and the two competing forks
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis := &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 21:21:59 +03:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 19:22:28 +03:00
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
|
|
|
original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
|
|
|
|
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
|
2018-02-05 19:40:32 +03:00
|
|
|
|
|
|
|
// Import the shared chain and the original canonical one
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2018-02-05 19:40:32 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := chain.InsertChain(shared); err != nil {
|
|
|
|
t.Fatalf("failed to insert shared chain: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := chain.InsertChain(original); err != nil {
|
2018-11-20 15:15:26 +03:00
|
|
|
t.Fatalf("failed to insert original chain: %v", err)
|
2018-02-05 19:40:32 +03:00
|
|
|
}
|
|
|
|
// Ensure that the state associated with the forking point is pruned away
|
|
|
|
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
|
|
|
|
t.Fatalf("common-but-old ancestor still cache")
|
|
|
|
}
|
|
|
|
// Import the competitor chain without exceeding the canonical's TD and ensure
|
|
|
|
// we have not processed any of the blocks (protection against malicious blocks)
|
|
|
|
if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil {
|
|
|
|
t.Fatalf("failed to insert competitor chain: %v", err)
|
|
|
|
}
|
|
|
|
for i, block := range competitor[:len(competitor)-2] {
|
|
|
|
if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
|
|
|
|
t.Fatalf("competitor %d: low TD chain became processed", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Import the head of the competitor chain, triggering the reorg and ensure we
|
|
|
|
// successfully reprocess all the stashed away blocks.
|
|
|
|
if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil {
|
|
|
|
t.Fatalf("failed to finalize competitor chain: %v", err)
|
|
|
|
}
|
2019-05-30 21:51:13 +03:00
|
|
|
for i, block := range competitor[:len(competitor)-TriesInMemory] {
|
2018-02-05 19:40:32 +03:00
|
|
|
if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
|
|
|
|
t.Fatalf("competitor %d: competing chain state missing", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-04-08 00:20:57 +03:00
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func TestBlockchainRecovery(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(1000000000)
|
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
|
|
|
)
|
|
|
|
height := uint64(1024)
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
|
|
|
|
// Import the chain as a ancient-first node and ensure all pointers are updated
|
2022-09-07 21:21:59 +03:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
defer ancientDb.Close()
|
2022-08-30 19:22:28 +03:00
|
|
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
|
|
|
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
|
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
|
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
2020-08-20 13:01:24 +03:00
|
|
|
rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
ancient.Stop()
|
|
|
|
|
|
|
|
// Destroy head fast block manually
|
|
|
|
midBlock := blocks[len(blocks)/2]
|
|
|
|
rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
|
|
|
|
|
|
|
|
// Reopen broken blockchain again
|
2022-08-30 19:22:28 +03:00
|
|
|
ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
defer ancient.Stop()
|
|
|
|
if num := ancient.CurrentBlock().NumberU64(); num != 0 {
|
|
|
|
t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
|
|
|
|
}
|
|
|
|
if num := ancient.CurrentFastBlock().NumberU64(); num != midBlock.NumberU64() {
|
|
|
|
t.Errorf("head fast-block mismatch: have #%v, want #%v", num, midBlock.NumberU64())
|
|
|
|
}
|
|
|
|
if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() {
|
|
|
|
t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-07 13:31:17 +03:00
|
|
|
// This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain.
|
|
|
|
func TestInsertReceiptChainRollback(t *testing.T) {
|
|
|
|
// Generate forked chain. The returned BlockChain object is used to process the side chain blocks.
|
2022-09-07 21:21:59 +03:00
|
|
|
tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains()
|
2021-09-07 13:31:17 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer tmpChain.Stop()
|
|
|
|
// Get the side chain receipts.
|
|
|
|
if _, err := tmpChain.InsertChain(sideblocks); err != nil {
|
|
|
|
t.Fatal("processing side chain failed:", err)
|
|
|
|
}
|
|
|
|
t.Log("sidechain head:", tmpChain.CurrentBlock().Number(), tmpChain.CurrentBlock().Hash())
|
|
|
|
sidechainReceipts := make([]types.Receipts, len(sideblocks))
|
|
|
|
for i, block := range sideblocks {
|
|
|
|
sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
|
|
|
|
}
|
|
|
|
// Get the canon chain receipts.
|
|
|
|
if _, err := tmpChain.InsertChain(canonblocks); err != nil {
|
|
|
|
t.Fatal("processing canon chain failed:", err)
|
|
|
|
}
|
|
|
|
t.Log("canon head:", tmpChain.CurrentBlock().Number(), tmpChain.CurrentBlock().Hash())
|
|
|
|
canonReceipts := make([]types.Receipts, len(canonblocks))
|
|
|
|
for i, block := range canonblocks {
|
|
|
|
canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
|
2021-09-07 13:31:17 +03:00
|
|
|
// Set up a BlockChain that uses the ancient store.
|
2022-08-30 19:22:28 +03:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
defer ancientDb.Close()
|
2022-08-30 19:22:28 +03:00
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
ancientChain, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2021-09-07 13:31:17 +03:00
|
|
|
defer ancientChain.Stop()
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
|
2021-09-07 13:31:17 +03:00
|
|
|
// Import the canonical header chain.
|
|
|
|
canonHeaders := make([]*types.Header, len(canonblocks))
|
|
|
|
for i, block := range canonblocks {
|
|
|
|
canonHeaders[i] = block.Header()
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
if _, err = ancientChain.InsertHeaderChain(canonHeaders, 1); err != nil {
|
|
|
|
t.Fatal("can't import canon headers:", err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
|
|
|
|
// Try to insert blocks/receipts of the side chain.
|
|
|
|
_, err = ancientChain.InsertReceiptChain(sideblocks, sidechainReceipts, uint64(len(sideblocks)))
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("expected error from InsertReceiptChain.")
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
if ancientChain.CurrentFastBlock().NumberU64() != 0 {
|
|
|
|
t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentFastBlock().NumberU64())
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 {
|
|
|
|
t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
|
|
|
|
// Insert blocks/receipts of the canonical chain.
|
|
|
|
_, err = ancientChain.InsertReceiptChain(canonblocks, canonReceipts, uint64(len(canonblocks)))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("can't import canon chain receipts: %v", err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
if ancientChain.CurrentFastBlock().NumberU64() != canonblocks[len(canonblocks)-1].NumberU64() {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
t.Fatalf("failed to insert ancient recept chain after rollback")
|
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 {
|
|
|
|
t.Fatalf("wrong ancients count %d", frozen)
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
|
|
|
|
2019-02-04 13:54:39 +03:00
|
|
|
// Tests that importing a very large side fork, which is larger than the canon chain,
|
|
|
|
// but where the difficulty per block is kept low: this means that it will not
|
|
|
|
// overtake the 'canon' chain until after it's passed canon by about 200 blocks.
|
2019-02-08 12:11:31 +03:00
|
|
|
//
|
|
|
|
// Details at:
|
2022-09-10 14:25:40 +03:00
|
|
|
// - https://github.com/ethereum/go-ethereum/issues/18977
|
|
|
|
// - https://github.com/ethereum/go-ethereum/pull/18988
|
2019-02-08 12:11:31 +03:00
|
|
|
func TestLowDiffLongChain(t *testing.T) {
|
2019-02-04 13:54:39 +03:00
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis := &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 21:21:59 +03:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 19:22:28 +03:00
|
|
|
}
|
2019-02-08 12:11:31 +03:00
|
|
|
// We must use a pretty long chain to ensure that the fork doesn't overtake us
|
|
|
|
// until after at least 128 blocks post tip
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*TriesInMemory, func(i int, b *BlockGen) {
|
2019-02-08 12:11:31 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
b.OffsetTime(-9)
|
|
|
|
})
|
2019-02-04 13:54:39 +03:00
|
|
|
|
|
|
|
// Import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2019-02-04 13:54:39 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2019-02-08 12:11:31 +03:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
2019-02-04 13:54:39 +03:00
|
|
|
}
|
|
|
|
// Generate fork chain, starting from an early block
|
|
|
|
parent := blocks[10]
|
2022-09-07 21:21:59 +03:00
|
|
|
fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*TriesInMemory, func(i int, b *BlockGen) {
|
2019-02-08 12:11:31 +03:00
|
|
|
b.SetCoinbase(common.Address{2})
|
|
|
|
})
|
2019-02-04 13:54:39 +03:00
|
|
|
|
|
|
|
// And now import the fork
|
|
|
|
if i, err := chain.InsertChain(fork); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
head := chain.CurrentBlock()
|
|
|
|
if got := fork[len(fork)-1].Hash(); got != head.Hash() {
|
|
|
|
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
|
|
|
}
|
2019-02-08 12:11:31 +03:00
|
|
|
// Sanity check that all the canonical numbers are present
|
|
|
|
header := chain.CurrentHeader()
|
|
|
|
for number := head.NumberU64(); number > 0; number-- {
|
|
|
|
if hash := chain.GetHeaderByNumber(number).Hash(); hash != header.Hash() {
|
|
|
|
t.Fatalf("header %d: canonical hash mismatch: have %x, want %x", number, hash, header.Hash())
|
|
|
|
}
|
|
|
|
header = chain.GetHeader(header.ParentHash, number-1)
|
|
|
|
}
|
2019-02-04 13:54:39 +03:00
|
|
|
}
|
2019-02-21 13:36:49 +03:00
|
|
|
|
|
|
|
// Tests that importing a sidechain (S), where
|
|
|
|
// - S is sidechain, containing blocks [Sn...Sm]
|
|
|
|
// - C is canon chain, containing blocks [G..Cn..Cm]
|
|
|
|
// - A common ancestor is placed at prune-point + blocksBetweenCommonAncestorAndPruneblock
|
|
|
|
// - The sidechain S is prepended with numCanonBlocksInSidechain blocks from the canon chain
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
//
|
|
|
|
// The mergePoint can be these values:
|
|
|
|
// -1: the transition won't happen
|
|
|
|
// 0: the transition happens since genesis
|
|
|
|
// 1: the transition happens after some chain segments
|
|
|
|
func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int, mergePoint int) {
|
2019-02-21 13:36:49 +03:00
|
|
|
// Generate a canonical chain to act as the main dataset
|
2022-09-07 21:21:59 +03:00
|
|
|
chainConfig := *params.TestChainConfig
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
var (
|
2022-09-07 21:21:59 +03:00
|
|
|
merger = consensus.NewMerger(rawdb.NewMemoryDatabase())
|
|
|
|
engine = beacon.New(ethash.NewFaker())
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
nonce = uint64(0)
|
|
|
|
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: &chainConfig,
|
|
|
|
Alloc: GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
)
|
2019-02-21 13:36:49 +03:00
|
|
|
// Generate and import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2019-02-21 13:36:49 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
// Activate the transition since genesis if required
|
|
|
|
if mergePoint == 0 {
|
|
|
|
merger.ReachTTD()
|
|
|
|
merger.FinalizePoS()
|
|
|
|
|
|
|
|
// Set the terminal total difficulty in the config
|
|
|
|
gspec.Config.TerminalTotalDifficulty = big.NewInt(0)
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
nonce++
|
|
|
|
})
|
2019-02-21 13:36:49 +03:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
2019-05-30 21:51:13 +03:00
|
|
|
lastPrunedIndex := len(blocks) - TriesInMemory - 1
|
2019-02-21 13:36:49 +03:00
|
|
|
lastPrunedBlock := blocks[lastPrunedIndex]
|
2019-05-30 21:51:13 +03:00
|
|
|
firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
|
2019-02-21 13:36:49 +03:00
|
|
|
|
|
|
|
// Verify pruning of lastPrunedBlock
|
|
|
|
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
|
|
|
|
t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
|
|
|
|
}
|
|
|
|
// Verify firstNonPrunedBlock is not pruned
|
|
|
|
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
|
|
|
|
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
|
|
|
|
}
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
|
|
|
|
// Activate the transition in the middle of the chain
|
|
|
|
if mergePoint == 1 {
|
|
|
|
merger.ReachTTD()
|
|
|
|
merger.FinalizePoS()
|
|
|
|
// Set the terminal total difficulty in the config
|
|
|
|
gspec.Config.TerminalTotalDifficulty = big.NewInt(int64(len(blocks)))
|
|
|
|
}
|
|
|
|
|
2019-02-21 13:36:49 +03:00
|
|
|
// Generate the sidechain
|
|
|
|
// First block should be a known block, block after should be a pruned block. So
|
|
|
|
// canon(pruned), side, side...
|
|
|
|
|
|
|
|
// Generate fork chain, make it longer than canon
|
|
|
|
parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock
|
|
|
|
parent := blocks[parentIndex]
|
2022-09-07 21:21:59 +03:00
|
|
|
fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) {
|
2019-02-21 13:36:49 +03:00
|
|
|
b.SetCoinbase(common.Address{2})
|
|
|
|
})
|
|
|
|
// Prepend the parent(s)
|
|
|
|
var sidechain []*types.Block
|
|
|
|
for i := numCanonBlocksInSidechain; i > 0; i-- {
|
|
|
|
sidechain = append(sidechain, blocks[parentIndex+1-i])
|
|
|
|
}
|
|
|
|
sidechain = append(sidechain, fork...)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
n, err := chain.InsertChain(sidechain)
|
2019-02-21 13:36:49 +03:00
|
|
|
if err != nil {
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
t.Errorf("Got error, %v number %d - %d", err, sidechain[n].NumberU64(), n)
|
2019-02-21 13:36:49 +03:00
|
|
|
}
|
|
|
|
head := chain.CurrentBlock()
|
|
|
|
if got := fork[len(fork)-1].Hash(); got != head.Hash() {
|
|
|
|
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that importing a sidechain (S), where
|
2022-09-10 14:25:40 +03:00
|
|
|
// - S is sidechain, containing blocks [Sn...Sm]
|
|
|
|
// - C is canon chain, containing blocks [G..Cn..Cm]
|
|
|
|
// - The common ancestor Cc is pruned
|
|
|
|
// - The first block in S: Sn, is == Cn
|
|
|
|
//
|
2019-02-21 13:36:49 +03:00
|
|
|
// That is: the sidechain for import contains some blocks already present in canon chain.
|
2022-09-10 14:25:40 +03:00
|
|
|
// So the blocks are:
|
|
|
|
//
|
|
|
|
// [ Cn, Cn+1, Cc, Sn+3 ... Sm]
|
|
|
|
// ^ ^ ^ pruned
|
2019-02-21 13:36:49 +03:00
|
|
|
func TestPrunedImportSide(t *testing.T) {
|
|
|
|
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
|
|
|
|
//glogger.Verbosity(3)
|
|
|
|
//log.Root().SetHandler(log.Handler(glogger))
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
testSideImport(t, 3, 3, -1)
|
|
|
|
testSideImport(t, 3, -3, -1)
|
|
|
|
testSideImport(t, 10, 0, -1)
|
|
|
|
testSideImport(t, 1, 10, -1)
|
|
|
|
testSideImport(t, 1, -10, -1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPrunedImportSideWithMerging(t *testing.T) {
|
|
|
|
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
|
|
|
|
//glogger.Verbosity(3)
|
|
|
|
//log.Root().SetHandler(log.Handler(glogger))
|
|
|
|
testSideImport(t, 3, 3, 0)
|
|
|
|
testSideImport(t, 3, -3, 0)
|
|
|
|
testSideImport(t, 10, 0, 0)
|
|
|
|
testSideImport(t, 1, 10, 0)
|
|
|
|
testSideImport(t, 1, -10, 0)
|
|
|
|
|
|
|
|
testSideImport(t, 3, 3, 1)
|
|
|
|
testSideImport(t, 3, -3, 1)
|
|
|
|
testSideImport(t, 10, 0, 1)
|
|
|
|
testSideImport(t, 1, 10, 1)
|
|
|
|
testSideImport(t, 1, -10, 1)
|
2019-02-21 13:36:49 +03:00
|
|
|
}
|
2019-05-07 15:26:00 +03:00
|
|
|
|
2019-05-08 14:30:36 +03:00
|
|
|
func TestInsertKnownHeaders(t *testing.T) { testInsertKnownChainData(t, "headers") }
|
|
|
|
func TestInsertKnownReceiptChain(t *testing.T) { testInsertKnownChainData(t, "receipts") }
|
|
|
|
func TestInsertKnownBlocks(t *testing.T) { testInsertKnownChainData(t, "blocks") }
|
|
|
|
|
|
|
|
func testInsertKnownChainData(t *testing.T, typ string) {
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis := &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
Config: params.TestChainConfig,
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
2019-05-08 14:30:36 +03:00
|
|
|
|
|
|
|
// A longer chain but total difficulty is lower.
|
2022-09-07 21:21:59 +03:00
|
|
|
blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
|
|
|
|
2019-05-08 14:30:36 +03:00
|
|
|
// A shorter chain but total difficulty is higher.
|
2022-09-07 21:21:59 +03:00
|
|
|
blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) {
|
2019-05-08 14:30:36 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
b.OffsetTime(-9) // A higher difficulty
|
|
|
|
})
|
|
|
|
// Import the shared chain and the original canonical one
|
2022-08-30 19:22:28 +03:00
|
|
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
defer chaindb.Close()
|
2019-05-08 14:30:36 +03:00
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2019-05-08 14:30:36 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
inserter func(blocks []*types.Block, receipts []types.Receipts) error
|
|
|
|
asserter func(t *testing.T, block *types.Block)
|
|
|
|
)
|
|
|
|
if typ == "headers" {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
headers := make([]*types.Header, 0, len(blocks))
|
|
|
|
for _, block := range blocks {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
|
|
|
_, err := chain.InsertHeaderChain(headers, 1)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentHeader().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if typ == "receipts" {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
headers := make([]*types.Header, 0, len(blocks))
|
|
|
|
for _, block := range blocks {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
|
|
|
_, err := chain.InsertHeaderChain(headers, 1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
_, err = chain.InsertReceiptChain(blocks, receipts, 0)
|
2019-05-08 14:30:36 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentFastBlock().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentFastBlock().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
_, err := chain.InsertChain(blocks)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentBlock().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reimport the chain data again. All the imported
|
|
|
|
// chain data are regarded "known" data.
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks[len(blocks)-1])
|
|
|
|
|
|
|
|
// Import a long canonical chain with some known data as prefix.
|
2020-08-20 13:01:24 +03:00
|
|
|
rollback := blocks[len(blocks)/2].NumberU64()
|
|
|
|
|
|
|
|
chain.SetHead(rollback - 1)
|
2019-05-08 14:30:36 +03:00
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks2[len(blocks2)-1])
|
|
|
|
|
|
|
|
// Import a heavier shorter but higher total difficulty chain with some known data as prefix.
|
|
|
|
if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks3[len(blocks3)-1])
|
|
|
|
|
|
|
|
// Import a longer but lower total difficulty chain with some known data as prefix.
|
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
// The head shouldn't change.
|
|
|
|
asserter(t, blocks3[len(blocks3)-1])
|
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
// Rollback the heavier chain and re-insert the longer chain again
|
2020-08-20 13:01:24 +03:00
|
|
|
chain.SetHead(rollback - 1)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
2019-05-08 14:30:36 +03:00
|
|
|
}
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
asserter(t, blocks2[len(blocks2)-1])
|
2019-05-08 14:30:36 +03:00
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
func TestInsertKnownHeadersWithMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "headers", 0)
|
|
|
|
}
|
|
|
|
func TestInsertKnownReceiptChainWithMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "receipts", 0)
|
|
|
|
}
|
|
|
|
func TestInsertKnownBlocksWithMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "blocks", 0)
|
|
|
|
}
|
|
|
|
func TestInsertKnownHeadersAfterMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "headers", 1)
|
|
|
|
}
|
|
|
|
func TestInsertKnownReceiptChainAfterMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "receipts", 1)
|
|
|
|
}
|
|
|
|
func TestInsertKnownBlocksAfterMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "blocks", 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// mergeHeight can be assigned in these values:
|
|
|
|
// 0: means the merging is applied since genesis
|
|
|
|
// 1: means the merging is applied after the first segment
|
|
|
|
func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight int) {
|
|
|
|
// Copy the TestChainConfig so we can modify it during tests
|
|
|
|
chainConfig := *params.TestChainConfig
|
|
|
|
var (
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis = &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
Config: &chainConfig,
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
engine = beacon.New(ethash.NewFaker())
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
)
|
|
|
|
// Apply merging since genesis
|
|
|
|
if mergeHeight == 0 {
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis.Config.TerminalTotalDifficulty = big.NewInt(0)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
|
|
|
|
// Apply merging after the first segment
|
|
|
|
if mergeHeight == 1 {
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis.Config.TerminalTotalDifficulty = big.NewInt(int64(len(blocks)))
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
}
|
|
|
|
// Longer chain and shorter chain
|
2022-09-07 21:21:59 +03:00
|
|
|
blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
|
|
|
blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) {
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
b.OffsetTime(-9) // Time shifted, difficulty shouldn't be changed
|
|
|
|
})
|
|
|
|
|
|
|
|
// Import the shared chain and the original canonical one
|
2022-09-07 21:21:59 +03:00
|
|
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
defer chaindb.Close()
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
var (
|
|
|
|
inserter func(blocks []*types.Block, receipts []types.Receipts) error
|
|
|
|
asserter func(t *testing.T, block *types.Block)
|
|
|
|
)
|
|
|
|
if typ == "headers" {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
headers := make([]*types.Header, 0, len(blocks))
|
|
|
|
for _, block := range blocks {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
|
|
|
_, err := chain.InsertHeaderChain(headers, 1)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentHeader().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if typ == "receipts" {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
headers := make([]*types.Header, 0, len(blocks))
|
|
|
|
for _, block := range blocks {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
|
|
|
_, err := chain.InsertHeaderChain(headers, 1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = chain.InsertReceiptChain(blocks, receipts, 0)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentFastBlock().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentFastBlock().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
_, err := chain.InsertChain(blocks)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentBlock().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reimport the chain data again. All the imported
|
|
|
|
// chain data are regarded "known" data.
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks[len(blocks)-1])
|
|
|
|
|
|
|
|
// Import a long canonical chain with some known data as prefix.
|
|
|
|
rollback := blocks[len(blocks)/2].NumberU64()
|
|
|
|
chain.SetHead(rollback - 1)
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks[len(blocks)-1])
|
|
|
|
|
|
|
|
// Import a longer chain with some known data as prefix.
|
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks2[len(blocks2)-1])
|
|
|
|
|
|
|
|
// Import a shorter chain with some known data as prefix.
|
|
|
|
// The reorg is expected since the fork choice rule is
|
|
|
|
// already changed.
|
|
|
|
if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
// The head shouldn't change.
|
|
|
|
asserter(t, blocks3[len(blocks3)-1])
|
|
|
|
|
|
|
|
// Reimport the longer chain again, the reorg is still expected
|
|
|
|
chain.SetHead(rollback - 1)
|
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks2[len(blocks2)-1])
|
|
|
|
}
|
|
|
|
|
2021-09-07 13:31:17 +03:00
|
|
|
// getLongAndShortChains returns two chains: A is longer, B is heavier.
|
2022-09-07 21:21:59 +03:00
|
|
|
func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) {
|
2019-05-07 15:26:00 +03:00
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis := &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 21:21:59 +03:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 19:22:28 +03:00
|
|
|
}
|
2019-05-07 15:26:00 +03:00
|
|
|
// Generate and import the canonical chain,
|
|
|
|
// Offset the time, to keep the difficulty low
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) {
|
2019-05-07 15:26:00 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
})
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2019-05-07 15:26:00 +03:00
|
|
|
if err != nil {
|
2022-09-07 21:21:59 +03:00
|
|
|
return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
|
2019-05-07 15:26:00 +03:00
|
|
|
}
|
|
|
|
// Generate fork chain, make it shorter than canon, with common ancestor pretty early
|
|
|
|
parentIndex := 3
|
|
|
|
parent := longChain[parentIndex]
|
2022-09-07 21:21:59 +03:00
|
|
|
heavyChainExt, _ := GenerateChain(genesis.Config, parent, engine, genDb, 75, func(i int, b *BlockGen) {
|
2019-05-07 15:26:00 +03:00
|
|
|
b.SetCoinbase(common.Address{2})
|
|
|
|
b.OffsetTime(-9)
|
|
|
|
})
|
2022-09-07 21:21:59 +03:00
|
|
|
var heavyChain []*types.Block
|
2021-09-07 13:31:17 +03:00
|
|
|
heavyChain = append(heavyChain, longChain[:parentIndex+1]...)
|
|
|
|
heavyChain = append(heavyChain, heavyChainExt...)
|
|
|
|
|
2019-05-07 15:26:00 +03:00
|
|
|
// Verify that the test is sane
|
|
|
|
var (
|
|
|
|
longerTd = new(big.Int)
|
|
|
|
shorterTd = new(big.Int)
|
|
|
|
)
|
|
|
|
for index, b := range longChain {
|
|
|
|
longerTd.Add(longerTd, b.Difficulty())
|
|
|
|
if index <= parentIndex {
|
|
|
|
shorterTd.Add(shorterTd, b.Difficulty())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, b := range heavyChain {
|
|
|
|
shorterTd.Add(shorterTd, b.Difficulty())
|
|
|
|
}
|
|
|
|
if shorterTd.Cmp(longerTd) <= 0 {
|
2022-09-07 21:21:59 +03:00
|
|
|
return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain td (%v) must be larger than canon td (%v)", shorterTd, longerTd)
|
2019-05-07 15:26:00 +03:00
|
|
|
}
|
|
|
|
longerNum := longChain[len(longChain)-1].NumberU64()
|
|
|
|
shorterNum := heavyChain[len(heavyChain)-1].NumberU64()
|
|
|
|
if shorterNum >= longerNum {
|
2022-09-07 21:21:59 +03:00
|
|
|
return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain num (%v) must be lower than canon num (%v)", shorterNum, longerNum)
|
2019-05-07 15:26:00 +03:00
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
return chain, longChain, heavyChain, genesis, nil
|
2019-05-07 15:26:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestReorgToShorterRemovesCanonMapping tests that if we
|
|
|
|
// 1. Have a chain [0 ... N .. X]
|
|
|
|
// 2. Reorg to shorter but heavier chain [0 ... N ... Y]
|
|
|
|
// 3. Then there should be no canon mapping for the block at height X
|
2021-10-11 20:25:21 +03:00
|
|
|
// 4. The forked block should still be retrievable by hash
|
2019-05-07 15:26:00 +03:00
|
|
|
func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, canonblocks, sideblocks, _, err := getLongAndShortChains()
|
2019-05-07 15:26:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(canonblocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
canonNum := chain.CurrentBlock().NumberU64()
|
2021-10-11 20:25:21 +03:00
|
|
|
canonHash := chain.CurrentBlock().Hash()
|
2019-05-07 15:26:00 +03:00
|
|
|
_, err = chain.InsertChain(sideblocks)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Got error, %v", err)
|
|
|
|
}
|
|
|
|
head := chain.CurrentBlock()
|
|
|
|
if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() {
|
|
|
|
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
|
|
|
}
|
|
|
|
// We have now inserted a sidechain.
|
|
|
|
if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil {
|
|
|
|
t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
|
|
|
|
}
|
|
|
|
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
|
|
|
|
t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
|
|
|
|
}
|
2021-10-11 20:25:21 +03:00
|
|
|
if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
|
|
|
|
t.Errorf("expected block to be present: %x", blockByHash.Hash())
|
|
|
|
}
|
|
|
|
if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
|
|
|
|
t.Errorf("expected header to be present: %x", headerByHash.Hash())
|
|
|
|
}
|
2019-05-07 15:26:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestReorgToShorterRemovesCanonMappingHeaderChain is the same scenario
|
|
|
|
// as TestReorgToShorterRemovesCanonMapping, but applied on headerchain
|
|
|
|
// imports -- that is, for fast sync
|
|
|
|
func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, canonblocks, sideblocks, _, err := getLongAndShortChains()
|
2019-05-07 15:26:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// Convert into headers
|
|
|
|
canonHeaders := make([]*types.Header, len(canonblocks))
|
|
|
|
for i, block := range canonblocks {
|
|
|
|
canonHeaders[i] = block.Header()
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertHeaderChain(canonHeaders, 0); err != nil {
|
|
|
|
t.Fatalf("header %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
canonNum := chain.CurrentHeader().Number.Uint64()
|
2021-10-11 20:25:21 +03:00
|
|
|
canonHash := chain.CurrentBlock().Hash()
|
2019-05-07 15:26:00 +03:00
|
|
|
sideHeaders := make([]*types.Header, len(sideblocks))
|
|
|
|
for i, block := range sideblocks {
|
|
|
|
sideHeaders[i] = block.Header()
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertHeaderChain(sideHeaders, 0); err != nil {
|
|
|
|
t.Fatalf("header %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
head := chain.CurrentHeader()
|
|
|
|
if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() {
|
|
|
|
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
|
|
|
}
|
|
|
|
// We have now inserted a sidechain.
|
|
|
|
if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil {
|
|
|
|
t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
|
|
|
|
}
|
|
|
|
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
|
|
|
|
t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
|
|
|
|
}
|
2021-10-11 20:25:21 +03:00
|
|
|
if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
|
|
|
|
t.Errorf("expected block to be present: %x", blockByHash.Hash())
|
|
|
|
}
|
|
|
|
if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
|
|
|
|
t.Errorf("expected header to be present: %x", headerByHash.Hash())
|
|
|
|
}
|
2019-05-07 15:26:00 +03:00
|
|
|
}
|
2019-05-08 14:30:36 +03:00
|
|
|
|
2020-05-11 18:58:43 +03:00
|
|
|
func TestTransactionIndices(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(100000000000000000)
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2020-05-11 18:58:43 +03:00
|
|
|
)
|
|
|
|
height := uint64(128)
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), func(i int, block *BlockGen) {
|
2021-06-15 13:56:14 +03:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
})
|
2022-09-07 21:21:59 +03:00
|
|
|
blocks2, _ := GenerateChain(gspec.Config, blocks[len(blocks)-1], ethash.NewFaker(), genDb, 10, nil)
|
2020-05-11 18:58:43 +03:00
|
|
|
|
|
|
|
check := func(tail *uint64, chain *BlockChain) {
|
|
|
|
stored := rawdb.ReadTxIndexTail(chain.db)
|
|
|
|
if tail == nil && stored != nil {
|
|
|
|
t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
|
|
|
|
}
|
|
|
|
if tail != nil && *stored != *tail {
|
|
|
|
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
|
|
|
|
}
|
|
|
|
if tail != nil {
|
|
|
|
for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ {
|
|
|
|
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
|
|
if block.Transactions().Len() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
|
|
|
|
t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := uint64(0); i < *tail; i++ {
|
|
|
|
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
|
|
if block.Transactions().Len() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
|
|
|
|
t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
frdir := t.TempDir()
|
2021-03-22 21:06:30 +03:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
|
|
|
// Import all blocks into ancient db
|
|
|
|
l := uint64(0)
|
2022-08-30 19:22:28 +03:00
|
|
|
chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
|
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertReceiptChain(blocks, receipts, 128); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
chain.Stop()
|
|
|
|
ancientDb.Close()
|
|
|
|
|
|
|
|
// Init block chain with external ancients, check all needed indices has been indexed.
|
|
|
|
limit := []uint64{0, 32, 64, 128}
|
|
|
|
for _, l := range limit {
|
2021-03-22 21:06:30 +03:00
|
|
|
ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-06-13 17:24:45 +03:00
|
|
|
l := l
|
2022-08-30 19:22:28 +03:00
|
|
|
chain, err = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
time.Sleep(50 * time.Millisecond) // Wait for indices initialisation
|
|
|
|
var tail uint64
|
|
|
|
if l != 0 {
|
|
|
|
tail = uint64(128) - l + 1
|
|
|
|
}
|
|
|
|
check(&tail, chain)
|
|
|
|
chain.Stop()
|
|
|
|
ancientDb.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reconstruct a block chain which only reserves HEAD-64 tx indices
|
2021-03-22 21:06:30 +03:00
|
|
|
ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
defer ancientDb.Close()
|
2020-05-11 18:58:43 +03:00
|
|
|
|
|
|
|
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
|
|
|
|
tails := []uint64{0, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */, 69 /* 132 - 64 + 1 */, 0}
|
|
|
|
for i, l := range limit {
|
2022-06-13 17:24:45 +03:00
|
|
|
l := l
|
2022-08-30 19:22:28 +03:00
|
|
|
chain, err = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
chain.InsertChain(blocks2[i : i+1]) // Feed chain a higher block to trigger indices updater.
|
|
|
|
time.Sleep(50 * time.Millisecond) // Wait for indices initialisation
|
|
|
|
check(&tails[i], chain)
|
|
|
|
chain.Stop()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-26 14:26:03 +03:00
|
|
|
func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
|
2020-05-11 18:58:43 +03:00
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(100000000000000000)
|
2020-05-11 18:58:43 +03:00
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
2021-02-25 17:26:57 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2020-05-11 18:58:43 +03:00
|
|
|
)
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) {
|
2021-06-15 13:56:14 +03:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
})
|
|
|
|
|
|
|
|
check := func(tail *uint64, chain *BlockChain) {
|
|
|
|
stored := rawdb.ReadTxIndexTail(chain.db)
|
|
|
|
if tail == nil && stored != nil {
|
|
|
|
t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
|
|
|
|
}
|
|
|
|
if tail != nil && *stored != *tail {
|
|
|
|
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
|
|
|
|
}
|
|
|
|
if tail != nil {
|
|
|
|
for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ {
|
|
|
|
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
|
|
if block.Transactions().Len() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
|
|
|
|
t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := uint64(0); i < *tail; i++ {
|
|
|
|
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
|
|
if block.Transactions().Len() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
|
|
|
|
t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 16:44:55 +03:00
|
|
|
defer ancientDb.Close()
|
2020-05-11 18:58:43 +03:00
|
|
|
|
|
|
|
// Import all blocks into ancient db, only HEAD-32 indices are kept.
|
|
|
|
l := uint64(32)
|
2022-08-30 19:22:28 +03:00
|
|
|
chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
2020-05-11 18:58:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
|
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
// The indices before ancient-N(32) should be ignored. After that all blocks should be indexed.
|
|
|
|
if n, err := chain.InsertReceiptChain(blocks, receipts, 64); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
tail := uint64(32)
|
|
|
|
check(&tail, chain)
|
|
|
|
}
|
|
|
|
|
2019-05-08 14:30:36 +03:00
|
|
|
// Benchmarks large blocks with value transfers to non-existing accounts
|
|
|
|
func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
|
|
|
|
var (
|
|
|
|
signer = types.HomesteadSigner{}
|
|
|
|
testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
|
|
|
|
bankFunds = big.NewInt(100000000000000000)
|
2022-08-30 19:22:28 +03:00
|
|
|
gspec = &Genesis{
|
2019-05-08 14:30:36 +03:00
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
testBankAddress: {Balance: bankFunds},
|
|
|
|
common.HexToAddress("0xc0de"): {
|
|
|
|
Code: []byte{0x60, 0x01, 0x50},
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
}, // push 1, pop
|
|
|
|
},
|
|
|
|
GasLimit: 100e6, // 100 M
|
|
|
|
}
|
|
|
|
)
|
|
|
|
// Generate the original common chain segment and the two competing forks
|
|
|
|
engine := ethash.NewFaker()
|
|
|
|
|
|
|
|
blockGenerator := func(i int, block *BlockGen) {
|
|
|
|
block.SetCoinbase(common.Address{1})
|
|
|
|
for txi := 0; txi < numTxs; txi++ {
|
|
|
|
uniq := uint64(i*numTxs + txi)
|
|
|
|
recipient := recipientFn(uniq)
|
2021-10-27 14:08:51 +03:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey)
|
2019-05-08 14:30:36 +03:00
|
|
|
if err != nil {
|
|
|
|
b.Error(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, shared, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, blockGenerator)
|
2019-05-08 14:30:36 +03:00
|
|
|
b.StopTimer()
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
// Import the shared chain and the original canonical one
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2019-05-08 14:30:36 +03:00
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
b.StartTimer()
|
|
|
|
if _, err := chain.InsertChain(shared); err != nil {
|
|
|
|
b.Fatalf("failed to insert shared chain: %v", err)
|
|
|
|
}
|
|
|
|
b.StopTimer()
|
|
|
|
if got := chain.CurrentBlock().Transactions().Len(); got != numTxs*numBlocks {
|
|
|
|
b.Fatalf("Transactions were not included, expected %d, got %d", numTxs*numBlocks, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBlockChain_1x1000ValueTransferToNonexisting(b *testing.B) {
|
|
|
|
var (
|
|
|
|
numTxs = 1000
|
|
|
|
numBlocks = 1
|
|
|
|
)
|
|
|
|
recipientFn := func(nonce uint64) common.Address {
|
2022-06-14 15:09:48 +03:00
|
|
|
return common.BigToAddress(new(big.Int).SetUint64(1337 + nonce))
|
2019-05-08 14:30:36 +03:00
|
|
|
}
|
|
|
|
dataFn := func(nonce uint64) []byte {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBlockChain_1x1000ValueTransferToExisting(b *testing.B) {
|
|
|
|
var (
|
|
|
|
numTxs = 1000
|
|
|
|
numBlocks = 1
|
|
|
|
)
|
|
|
|
b.StopTimer()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
recipientFn := func(nonce uint64) common.Address {
|
2022-06-14 15:09:48 +03:00
|
|
|
return common.BigToAddress(new(big.Int).SetUint64(1337))
|
2019-05-08 14:30:36 +03:00
|
|
|
}
|
|
|
|
dataFn := func(nonce uint64) []byte {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
|
|
|
|
var (
|
|
|
|
numTxs = 1000
|
|
|
|
numBlocks = 1
|
|
|
|
)
|
|
|
|
b.StopTimer()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
recipientFn := func(nonce uint64) common.Address {
|
2022-06-14 15:09:48 +03:00
|
|
|
return common.BigToAddress(new(big.Int).SetUint64(0xc0de))
|
2019-05-08 14:30:36 +03:00
|
|
|
}
|
|
|
|
dataFn := func(nonce uint64) []byte {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
|
|
|
|
}
|
2019-08-21 10:17:19 +03:00
|
|
|
|
|
|
|
// Tests that importing a some old blocks, where all blocks are before the
|
|
|
|
// pruning point.
|
|
|
|
// This internally leads to a sidechain import, since the blocks trigger an
|
|
|
|
// ErrPrunedAncestor error.
|
|
|
|
// This may e.g. happen if
|
2022-09-10 14:25:40 +03:00
|
|
|
// 1. Downloader rollbacks a batch of inserted blocks and exits
|
|
|
|
// 2. Downloader starts to sync again
|
|
|
|
// 3. The blocks fetched are all known and canonical blocks
|
2019-08-21 10:17:19 +03:00
|
|
|
func TestSideImportPrunedBlocks(t *testing.T) {
|
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
genesis := &Genesis{
|
2022-08-30 19:22:28 +03:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 21:21:59 +03:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 19:22:28 +03:00
|
|
|
}
|
2019-08-21 10:17:19 +03:00
|
|
|
// Generate and import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 14:23:02 +03:00
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2019-08-21 10:17:19 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
lastPrunedIndex := len(blocks) - TriesInMemory - 1
|
|
|
|
lastPrunedBlock := blocks[lastPrunedIndex]
|
|
|
|
|
|
|
|
// Verify pruning of lastPrunedBlock
|
|
|
|
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
|
|
|
|
t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
|
|
|
|
}
|
|
|
|
firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
|
|
|
|
// Verify firstNonPrunedBlock is not pruned
|
|
|
|
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
|
|
|
|
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
|
|
|
|
}
|
|
|
|
// Now re-import some old blocks
|
|
|
|
blockToReimport := blocks[5:8]
|
|
|
|
_, err = chain.InsertChain(blockToReimport)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Got error, %v", err)
|
|
|
|
}
|
|
|
|
}
|
2019-09-16 11:39:41 +03:00
|
|
|
|
|
|
|
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
|
|
|
|
// while changing the internals of statedb. The workflow is that a contract is
|
|
|
|
// self destructed, then in a followup transaction (but same block) it's created
|
|
|
|
// again and the transaction reverted.
|
|
|
|
//
|
|
|
|
// The original statedb implementation flushed dirty objects to the tries after
|
|
|
|
// each transaction, so this works ok. The rework accumulated writes in memory
|
|
|
|
// first, but the journal wiped the entire state object on create-revert.
|
|
|
|
func TestDeleteCreateRevert(t *testing.T) {
|
|
|
|
var (
|
2022-09-07 21:21:59 +03:00
|
|
|
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
2019-09-16 11:39:41 +03:00
|
|
|
engine = ethash.NewFaker()
|
|
|
|
|
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(100000000000000000)
|
2019-09-16 11:39:41 +03:00
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAAA selfdestructs if called
|
|
|
|
aa: {
|
|
|
|
// Code needs to just selfdestruct
|
2020-03-03 16:52:00 +03:00
|
|
|
Code: []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)},
|
2019-09-16 11:39:41 +03:00
|
|
|
Nonce: 1,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
},
|
|
|
|
// The address 0xBBBB send 1 wei to 0xAAAA, then reverts
|
|
|
|
bb: {
|
|
|
|
Code: []byte{
|
|
|
|
byte(vm.PC), // [0]
|
|
|
|
byte(vm.DUP1), // [0,0]
|
|
|
|
byte(vm.DUP1), // [0,0,0]
|
|
|
|
byte(vm.DUP1), // [0,0,0,0]
|
|
|
|
byte(vm.PUSH1), 0x01, // [0,0,0,0,1] (value)
|
|
|
|
byte(vm.PUSH2), 0xaa, 0xaa, // [0,0,0,0,1, 0xaaaa]
|
|
|
|
byte(vm.GAS),
|
|
|
|
byte(vm.CALL),
|
|
|
|
byte(vm.REVERT),
|
|
|
|
},
|
|
|
|
Balance: big.NewInt(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2019-09-16 11:39:41 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
// One transaction to AAAA
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(0, aa,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2019-09-16 11:39:41 +03:00
|
|
|
b.AddTx(tx)
|
|
|
|
// One transaction to BBBB
|
|
|
|
tx, _ = types.SignTx(types.NewTransaction(1, bb,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2019-09-16 11:39:41 +03:00
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2019-09-16 11:39:41 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
}
|
2020-03-02 15:46:56 +03:00
|
|
|
|
2020-03-02 16:06:44 +03:00
|
|
|
// TestDeleteRecreateSlots tests a state-transition that contains both deletion
|
2020-03-02 15:46:56 +03:00
|
|
|
// and recreation of contract state.
|
|
|
|
// Contract A exists, has slots 1 and 2 set
|
|
|
|
// Tx 1: Selfdestruct A
|
|
|
|
// Tx 2: Re-create A, set slots 3 and 4
|
|
|
|
// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
|
|
|
|
// and then the new slots exist
|
2020-03-02 16:06:44 +03:00
|
|
|
func TestDeleteRecreateSlots(t *testing.T) {
|
2020-03-02 15:46:56 +03:00
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
|
2020-03-02 15:46:56 +03:00
|
|
|
// A sender who makes transactions, has some funds
|
2020-03-04 15:39:27 +03:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2020-03-02 15:46:56 +03:00
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
2020-03-03 16:52:00 +03:00
|
|
|
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
|
|
|
aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
|
2020-03-02 15:46:56 +03:00
|
|
|
)
|
|
|
|
// Populate two slots
|
|
|
|
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
|
|
|
aaStorage[common.HexToHash("02")] = common.HexToHash("02")
|
|
|
|
|
|
|
|
// The bb-code needs to CREATE2 the aa contract. It consists of
|
|
|
|
// both initcode and deployment code
|
|
|
|
// initcode:
|
|
|
|
// 1. Set slots 3=3, 4=4,
|
|
|
|
// 2. Return aaCode
|
|
|
|
|
|
|
|
initCode := []byte{
|
|
|
|
byte(vm.PUSH1), 0x3, // value
|
|
|
|
byte(vm.PUSH1), 0x3, // location
|
2022-01-21 11:12:40 +03:00
|
|
|
byte(vm.SSTORE), // Set slot[3] = 3
|
2020-03-02 15:46:56 +03:00
|
|
|
byte(vm.PUSH1), 0x4, // value
|
|
|
|
byte(vm.PUSH1), 0x4, // location
|
2022-01-21 11:12:40 +03:00
|
|
|
byte(vm.SSTORE), // Set slot[4] = 4
|
2020-03-02 15:46:56 +03:00
|
|
|
// Slots are set, now return the code
|
2020-03-04 15:39:27 +03:00
|
|
|
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
2020-03-02 15:46:56 +03:00
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
// Code is now in memory.
|
|
|
|
byte(vm.PUSH1), 0x2, // size
|
|
|
|
byte(vm.PUSH1), byte(32 - 2), // offset
|
|
|
|
byte(vm.RETURN),
|
|
|
|
}
|
|
|
|
if l := len(initCode); l > 32 {
|
|
|
|
t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
|
|
|
|
}
|
|
|
|
bbCode := []byte{
|
|
|
|
// Push initcode onto stack
|
|
|
|
byte(vm.PUSH1) + byte(len(initCode)-1)}
|
|
|
|
bbCode = append(bbCode, initCode...)
|
|
|
|
bbCode = append(bbCode, []byte{
|
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
byte(vm.PUSH1), 0x00, // salt
|
|
|
|
byte(vm.PUSH1), byte(len(initCode)), // size
|
|
|
|
byte(vm.PUSH1), byte(32 - len(initCode)), // offset
|
|
|
|
byte(vm.PUSH1), 0x00, // endowment
|
|
|
|
byte(vm.CREATE2),
|
|
|
|
}...)
|
|
|
|
|
2020-03-04 15:39:27 +03:00
|
|
|
initHash := crypto.Keccak256Hash(initCode)
|
|
|
|
aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
|
|
|
|
t.Logf("Destination address: %x\n", aa)
|
|
|
|
|
2020-03-02 15:46:56 +03:00
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAAA selfdestructs if called
|
|
|
|
aa: {
|
|
|
|
// Code needs to just selfdestruct
|
|
|
|
Code: aaCode,
|
|
|
|
Nonce: 1,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
Storage: aaStorage,
|
|
|
|
},
|
|
|
|
// The contract BB recreates AA
|
|
|
|
bb: {
|
|
|
|
Code: bbCode,
|
|
|
|
Balance: big.NewInt(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2020-03-02 15:46:56 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
// One transaction to AA, to kill it
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(0, aa,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-02 15:46:56 +03:00
|
|
|
b.AddTx(tx)
|
|
|
|
// One transaction to BB, to recreate AA
|
|
|
|
tx, _ = types.SignTx(types.NewTransaction(1, bb,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-02 15:46:56 +03:00
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
2020-03-02 15:46:56 +03:00
|
|
|
Debug: true,
|
2021-11-25 15:17:09 +03:00
|
|
|
Tracer: logger.NewJSONLogger(nil, os.Stdout),
|
2020-05-12 08:46:10 +03:00
|
|
|
}, nil, nil)
|
2020-03-02 15:46:56 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
statedb, _ := chain.State()
|
|
|
|
|
|
|
|
// If all is correct, then slot 1 and 2 are zero
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
// Also, 3 and 4 should be set
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("03")), common.HexToHash("03"); got != exp {
|
|
|
|
t.Fatalf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("04")), common.HexToHash("04"); got != exp {
|
|
|
|
t.Fatalf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
}
|
2020-03-02 16:06:44 +03:00
|
|
|
|
|
|
|
// TestDeleteRecreateAccount tests a state-transition that contains deletion of a
|
|
|
|
// contract with storage, and a recreate of the same contract via a
|
|
|
|
// regular value-transfer
|
|
|
|
// Expected outcome is that _all_ slots are cleared from A
|
|
|
|
func TestDeleteRecreateAccount(t *testing.T) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
|
2020-03-02 16:06:44 +03:00
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2020-03-02 16:06:44 +03:00
|
|
|
|
|
|
|
aa = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43")
|
2020-03-03 16:52:00 +03:00
|
|
|
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
|
|
|
aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
|
2020-03-02 16:06:44 +03:00
|
|
|
)
|
|
|
|
// Populate two slots
|
|
|
|
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
|
|
|
aaStorage[common.HexToHash("02")] = common.HexToHash("02")
|
|
|
|
|
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAAA selfdestructs if called
|
|
|
|
aa: {
|
|
|
|
// Code needs to just selfdestruct
|
|
|
|
Code: aaCode,
|
|
|
|
Nonce: 1,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
Storage: aaStorage,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2020-03-02 16:06:44 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
// One transaction to AA, to kill it
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(0, aa,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-02 16:06:44 +03:00
|
|
|
b.AddTx(tx)
|
|
|
|
// One transaction to AA, to recreate it (but without storage
|
|
|
|
tx, _ = types.SignTx(types.NewTransaction(1, aa,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(1), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-02 16:06:44 +03:00
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
2020-03-02 16:06:44 +03:00
|
|
|
Debug: true,
|
2021-11-25 15:17:09 +03:00
|
|
|
Tracer: logger.NewJSONLogger(nil, os.Stdout),
|
2020-05-12 08:46:10 +03:00
|
|
|
}, nil, nil)
|
2020-03-02 16:06:44 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
statedb, _ := chain.State()
|
|
|
|
|
|
|
|
// If all is correct, then both slots are zero
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
}
|
2020-03-04 15:39:27 +03:00
|
|
|
|
|
|
|
// TestDeleteRecreateSlotsAcrossManyBlocks tests multiple state-transition that contains both deletion
|
|
|
|
// and recreation of contract state.
|
|
|
|
// Contract A exists, has slots 1 and 2 set
|
|
|
|
// Tx 1: Selfdestruct A
|
|
|
|
// Tx 2: Re-create A, set slots 3 and 4
|
|
|
|
// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
|
|
|
|
// and then the new slots exist
|
|
|
|
func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
|
2020-03-04 15:39:27 +03:00
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2020-03-04 15:39:27 +03:00
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
|
|
|
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
|
|
|
aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
|
|
|
|
)
|
|
|
|
// Populate two slots
|
|
|
|
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
|
|
|
aaStorage[common.HexToHash("02")] = common.HexToHash("02")
|
|
|
|
|
|
|
|
// The bb-code needs to CREATE2 the aa contract. It consists of
|
|
|
|
// both initcode and deployment code
|
|
|
|
// initcode:
|
|
|
|
// 1. Set slots 3=blocknum+1, 4=4,
|
|
|
|
// 2. Return aaCode
|
|
|
|
|
|
|
|
initCode := []byte{
|
|
|
|
byte(vm.PUSH1), 0x1, //
|
|
|
|
byte(vm.NUMBER), // value = number + 1
|
|
|
|
byte(vm.ADD), //
|
|
|
|
byte(vm.PUSH1), 0x3, // location
|
|
|
|
byte(vm.SSTORE), // Set slot[3] = number + 1
|
|
|
|
byte(vm.PUSH1), 0x4, // value
|
|
|
|
byte(vm.PUSH1), 0x4, // location
|
|
|
|
byte(vm.SSTORE), // Set slot[4] = 4
|
|
|
|
// Slots are set, now return the code
|
|
|
|
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
// Code is now in memory.
|
|
|
|
byte(vm.PUSH1), 0x2, // size
|
|
|
|
byte(vm.PUSH1), byte(32 - 2), // offset
|
|
|
|
byte(vm.RETURN),
|
|
|
|
}
|
|
|
|
if l := len(initCode); l > 32 {
|
|
|
|
t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
|
|
|
|
}
|
|
|
|
bbCode := []byte{
|
|
|
|
// Push initcode onto stack
|
|
|
|
byte(vm.PUSH1) + byte(len(initCode)-1)}
|
|
|
|
bbCode = append(bbCode, initCode...)
|
|
|
|
bbCode = append(bbCode, []byte{
|
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
byte(vm.PUSH1), 0x00, // salt
|
|
|
|
byte(vm.PUSH1), byte(len(initCode)), // size
|
|
|
|
byte(vm.PUSH1), byte(32 - len(initCode)), // offset
|
|
|
|
byte(vm.PUSH1), 0x00, // endowment
|
|
|
|
byte(vm.CREATE2),
|
|
|
|
}...)
|
|
|
|
|
|
|
|
initHash := crypto.Keccak256Hash(initCode)
|
|
|
|
aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
|
|
|
|
t.Logf("Destination address: %x\n", aa)
|
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAAA selfdestructs if called
|
|
|
|
aa: {
|
|
|
|
// Code needs to just selfdestruct
|
|
|
|
Code: aaCode,
|
|
|
|
Nonce: 1,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
Storage: aaStorage,
|
|
|
|
},
|
|
|
|
// The contract BB recreates AA
|
|
|
|
bb: {
|
|
|
|
Code: bbCode,
|
|
|
|
Balance: big.NewInt(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var nonce uint64
|
|
|
|
|
|
|
|
type expectation struct {
|
|
|
|
exist bool
|
|
|
|
blocknum int
|
|
|
|
values map[int]int
|
|
|
|
}
|
|
|
|
var current = &expectation{
|
|
|
|
exist: true, // exists in genesis
|
|
|
|
blocknum: 0,
|
|
|
|
values: map[int]int{1: 1, 2: 2},
|
|
|
|
}
|
|
|
|
var expectations []*expectation
|
2021-06-15 13:56:14 +03:00
|
|
|
var newDestruct = func(e *expectation, b *BlockGen) *types.Transaction {
|
2020-03-04 15:39:27 +03:00
|
|
|
tx, _ := types.SignTx(types.NewTransaction(nonce, aa,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-04 15:39:27 +03:00
|
|
|
nonce++
|
|
|
|
if e.exist {
|
|
|
|
e.exist = false
|
|
|
|
e.values = nil
|
|
|
|
}
|
|
|
|
t.Logf("block %d; adding destruct\n", e.blocknum)
|
|
|
|
return tx
|
|
|
|
}
|
2021-06-15 13:56:14 +03:00
|
|
|
var newResurrect = func(e *expectation, b *BlockGen) *types.Transaction {
|
2020-03-04 15:39:27 +03:00
|
|
|
tx, _ := types.SignTx(types.NewTransaction(nonce, bb,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-04 15:39:27 +03:00
|
|
|
nonce++
|
|
|
|
if !e.exist {
|
|
|
|
e.exist = true
|
|
|
|
e.values = map[int]int{3: e.blocknum + 1, 4: 4}
|
|
|
|
}
|
|
|
|
t.Logf("block %d; adding resurrect\n", e.blocknum)
|
|
|
|
return tx
|
|
|
|
}
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 150, func(i int, b *BlockGen) {
|
2020-03-04 15:39:27 +03:00
|
|
|
var exp = new(expectation)
|
|
|
|
exp.blocknum = i + 1
|
|
|
|
exp.values = make(map[int]int)
|
|
|
|
for k, v := range current.values {
|
|
|
|
exp.values[k] = v
|
|
|
|
}
|
|
|
|
exp.exist = current.exist
|
|
|
|
|
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
if i%2 == 0 {
|
2021-06-15 13:56:14 +03:00
|
|
|
b.AddTx(newDestruct(exp, b))
|
2020-03-04 15:39:27 +03:00
|
|
|
}
|
|
|
|
if i%3 == 0 {
|
2021-06-15 13:56:14 +03:00
|
|
|
b.AddTx(newResurrect(exp, b))
|
2020-03-04 15:39:27 +03:00
|
|
|
}
|
|
|
|
if i%5 == 0 {
|
2021-06-15 13:56:14 +03:00
|
|
|
b.AddTx(newDestruct(exp, b))
|
2020-03-04 15:39:27 +03:00
|
|
|
}
|
|
|
|
if i%7 == 0 {
|
2021-06-15 13:56:14 +03:00
|
|
|
b.AddTx(newResurrect(exp, b))
|
2020-03-04 15:39:27 +03:00
|
|
|
}
|
|
|
|
expectations = append(expectations, exp)
|
|
|
|
current = exp
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
2020-03-04 15:39:27 +03:00
|
|
|
//Debug: true,
|
|
|
|
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
|
2020-05-12 08:46:10 +03:00
|
|
|
}, nil, nil)
|
2020-03-04 15:39:27 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
var asHash = func(num int) common.Hash {
|
|
|
|
return common.BytesToHash([]byte{byte(num)})
|
|
|
|
}
|
|
|
|
for i, block := range blocks {
|
|
|
|
blockNum := i + 1
|
|
|
|
if n, err := chain.InsertChain([]*types.Block{block}); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
statedb, _ := chain.State()
|
|
|
|
// If all is correct, then slot 1 and 2 are zero
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("block %d, got %x exp %x", blockNum, got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("block %d, got %x exp %x", blockNum, got, exp)
|
|
|
|
}
|
|
|
|
exp := expectations[i]
|
|
|
|
if exp.exist {
|
|
|
|
if !statedb.Exist(aa) {
|
|
|
|
t.Fatalf("block %d, expected %v to exist, it did not", blockNum, aa)
|
|
|
|
}
|
|
|
|
for slot, val := range exp.values {
|
|
|
|
if gotValue, expValue := statedb.GetState(aa, asHash(slot)), asHash(val); gotValue != expValue {
|
|
|
|
t.Fatalf("block %d, slot %d, got %x exp %x", blockNum, slot, gotValue, expValue)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if statedb.Exist(aa) {
|
|
|
|
t.Fatalf("block %d, expected %v to not exist, it did", blockNum, aa)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-06 15:05:44 +03:00
|
|
|
|
|
|
|
// TestInitThenFailCreateContract tests a pretty notorious case that happened
|
|
|
|
// on mainnet over blocks 7338108, 7338110 and 7338115.
|
2022-09-10 14:25:40 +03:00
|
|
|
// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated
|
|
|
|
// with 0.001 ether (thus created but no code)
|
|
|
|
// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on
|
|
|
|
// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the
|
|
|
|
// deployment fails due to OOG during initcode execution
|
|
|
|
// - Block 7338115: another tx checks the balance of
|
|
|
|
// e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as
|
|
|
|
// zero.
|
2020-03-06 15:05:44 +03:00
|
|
|
//
|
|
|
|
// The problem being that the snapshotter maintains a destructset, and adds items
|
|
|
|
// to the destructset in case something is created "onto" an existing item.
|
|
|
|
// We need to either roll back the snapDestructs, or not place it into snapDestructs
|
|
|
|
// in the first place.
|
|
|
|
func TestInitThenFailCreateContract(t *testing.T) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 21:21:59 +03:00
|
|
|
|
2020-03-06 15:05:44 +03:00
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2020-03-06 15:05:44 +03:00
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
|
|
|
)
|
|
|
|
|
|
|
|
// The bb-code needs to CREATE2 the aa contract. It consists of
|
|
|
|
// both initcode and deployment code
|
|
|
|
// initcode:
|
|
|
|
// 1. If blocknum < 1, error out (e.g invalid opcode)
|
|
|
|
// 2. else, return a snippet of code
|
|
|
|
initCode := []byte{
|
|
|
|
byte(vm.PUSH1), 0x1, // y (2)
|
|
|
|
byte(vm.NUMBER), // x (number)
|
|
|
|
byte(vm.GT), // x > y?
|
|
|
|
byte(vm.PUSH1), byte(0x8),
|
|
|
|
byte(vm.JUMPI), // jump to label if number > 2
|
|
|
|
byte(0xFE), // illegal opcode
|
|
|
|
byte(vm.JUMPDEST),
|
|
|
|
byte(vm.PUSH1), 0x2, // size
|
|
|
|
byte(vm.PUSH1), 0x0, // offset
|
|
|
|
byte(vm.RETURN), // return 2 bytes of zero-code
|
|
|
|
}
|
|
|
|
if l := len(initCode); l > 32 {
|
|
|
|
t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
|
|
|
|
}
|
|
|
|
bbCode := []byte{
|
|
|
|
// Push initcode onto stack
|
|
|
|
byte(vm.PUSH1) + byte(len(initCode)-1)}
|
|
|
|
bbCode = append(bbCode, initCode...)
|
|
|
|
bbCode = append(bbCode, []byte{
|
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
byte(vm.PUSH1), 0x00, // salt
|
|
|
|
byte(vm.PUSH1), byte(len(initCode)), // size
|
|
|
|
byte(vm.PUSH1), byte(32 - len(initCode)), // offset
|
|
|
|
byte(vm.PUSH1), 0x00, // endowment
|
|
|
|
byte(vm.CREATE2),
|
|
|
|
}...)
|
|
|
|
|
|
|
|
initHash := crypto.Keccak256Hash(initCode)
|
|
|
|
aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
|
|
|
|
t.Logf("Destination address: %x\n", aa)
|
|
|
|
|
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address aa has some funds
|
|
|
|
aa: {Balance: big.NewInt(100000)},
|
|
|
|
// The contract BB tries to create code onto AA
|
|
|
|
bb: {
|
|
|
|
Code: bbCode,
|
|
|
|
Balance: big.NewInt(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
nonce := uint64(0)
|
2022-09-07 21:21:59 +03:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 4, func(i int, b *BlockGen) {
|
2020-03-06 15:05:44 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
// One transaction to BB
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(nonce, bb,
|
2021-06-15 13:56:14 +03:00
|
|
|
big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-06 15:05:44 +03:00
|
|
|
b.AddTx(tx)
|
|
|
|
nonce++
|
|
|
|
})
|
|
|
|
|
|
|
|
// Import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
2020-03-06 15:05:44 +03:00
|
|
|
//Debug: true,
|
|
|
|
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
|
2020-05-12 08:46:10 +03:00
|
|
|
}, nil, nil)
|
2020-03-06 15:05:44 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
statedb, _ := chain.State()
|
|
|
|
if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 {
|
|
|
|
t.Fatalf("Genesis err, got %v exp %v", got, exp)
|
|
|
|
}
|
|
|
|
// First block tries to create, but fails
|
|
|
|
{
|
|
|
|
block := blocks[0]
|
|
|
|
if _, err := chain.InsertChain([]*types.Block{blocks[0]}); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
|
|
|
|
}
|
|
|
|
statedb, _ = chain.State()
|
|
|
|
if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 {
|
|
|
|
t.Fatalf("block %d: got %v exp %v", block.NumberU64(), got, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Import the rest of the blocks
|
|
|
|
for _, block := range blocks[1:] {
|
|
|
|
if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-02-25 17:26:57 +03:00
|
|
|
|
|
|
|
// TestEIP2718Transition tests that an EIP-2718 transaction will be accepted
|
|
|
|
// after the fork block has passed. This is verified by sending an EIP-2930
|
|
|
|
// access list transaction, which specifies a single slot access, and then
|
|
|
|
// checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated
|
|
|
|
// correctly.
|
|
|
|
func TestEIP2718Transition(t *testing.T) {
|
|
|
|
var (
|
2022-09-07 21:21:59 +03:00
|
|
|
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
2021-02-25 17:26:57 +03:00
|
|
|
engine = ethash.NewFaker()
|
|
|
|
|
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 13:56:14 +03:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2021-02-25 17:26:57 +03:00
|
|
|
gspec = &Genesis{
|
2021-05-06 12:07:42 +03:00
|
|
|
Config: params.TestChainConfig,
|
2021-02-25 17:26:57 +03:00
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAA sloads 0x00 and 0x01
|
|
|
|
aa: {
|
|
|
|
Code: []byte{
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
},
|
|
|
|
Nonce: 0,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
2022-09-07 21:21:59 +03:00
|
|
|
// Generate blocks
|
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2021-02-25 17:26:57 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
|
|
|
|
// One transaction to 0xAAAA
|
|
|
|
signer := types.LatestSigner(gspec.Config)
|
|
|
|
tx, _ := types.SignNewTx(key, signer, &types.AccessListTx{
|
|
|
|
ChainID: gspec.Config.ChainID,
|
|
|
|
Nonce: 0,
|
|
|
|
To: &aa,
|
|
|
|
Gas: 30000,
|
2021-06-15 13:56:14 +03:00
|
|
|
GasPrice: b.header.BaseFee,
|
2021-02-25 17:26:57 +03:00
|
|
|
AccessList: types.AccessList{{
|
|
|
|
Address: aa,
|
|
|
|
StorageKeys: []common.Hash{{0}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2021-02-25 17:26:57 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
block := chain.GetBlockByNumber(1)
|
|
|
|
|
|
|
|
// Expected gas is intrinsic + 2 * pc + hot load + cold load, since only one load is in the access list
|
2021-05-07 09:25:32 +03:00
|
|
|
expected := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas +
|
|
|
|
vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929
|
2021-02-25 17:26:57 +03:00
|
|
|
if block.GasUsed() != expected {
|
|
|
|
t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expected, block.GasUsed())
|
|
|
|
}
|
|
|
|
}
|
2021-05-17 16:13:22 +03:00
|
|
|
|
|
|
|
// TestEIP1559Transition tests the following:
|
|
|
|
//
|
2022-09-10 14:25:40 +03:00
|
|
|
// 1. A transaction whose gasFeeCap is greater than the baseFee is valid.
|
|
|
|
// 2. Gas accounting for access lists on EIP-1559 transactions is correct.
|
|
|
|
// 3. Only the transaction's tip will be received by the coinbase.
|
|
|
|
// 4. The transaction sender pays for both the tip and baseFee.
|
|
|
|
// 5. The coinbase receives only the partially realized tip when
|
|
|
|
// gasFeeCap - gasTipCap < baseFee.
|
|
|
|
// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
|
2021-05-17 16:13:22 +03:00
|
|
|
func TestEIP1559Transition(t *testing.T) {
|
|
|
|
var (
|
2022-09-07 21:21:59 +03:00
|
|
|
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
2021-05-17 16:13:22 +03:00
|
|
|
engine = ethash.NewFaker()
|
|
|
|
|
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
|
|
funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.AllEthashProtocolChanges,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
addr1: {Balance: funds},
|
|
|
|
addr2: {Balance: funds},
|
|
|
|
// The address 0xAAAA sloads 0x00 and 0x01
|
|
|
|
aa: {
|
|
|
|
Code: []byte{
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
},
|
|
|
|
Nonce: 0,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
gspec.Config.BerlinBlock = common.Big0
|
|
|
|
gspec.Config.LondonBlock = common.Big0
|
|
|
|
signer := types.LatestSigner(gspec.Config)
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2021-05-17 16:13:22 +03:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
|
|
|
|
// One transaction to 0xAAAA
|
|
|
|
accesses := types.AccessList{types.AccessTuple{
|
|
|
|
Address: aa,
|
|
|
|
StorageKeys: []common.Hash{{0}},
|
|
|
|
}}
|
|
|
|
|
|
|
|
txdata := &types.DynamicFeeTx{
|
|
|
|
ChainID: gspec.Config.ChainID,
|
|
|
|
Nonce: 0,
|
|
|
|
To: &aa,
|
|
|
|
Gas: 30000,
|
2021-06-08 13:05:41 +03:00
|
|
|
GasFeeCap: newGwei(5),
|
|
|
|
GasTipCap: big.NewInt(2),
|
2021-05-17 16:13:22 +03:00
|
|
|
AccessList: accesses,
|
|
|
|
Data: []byte{},
|
|
|
|
}
|
|
|
|
tx := types.NewTx(txdata)
|
|
|
|
tx, _ = types.SignTx(tx, signer, key1)
|
|
|
|
|
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2021-05-17 16:13:22 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
block := chain.GetBlockByNumber(1)
|
|
|
|
|
|
|
|
// 1+2: Ensure EIP-1559 access lists are accounted for via gas usage.
|
|
|
|
expectedGas := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas +
|
|
|
|
vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929
|
|
|
|
if block.GasUsed() != expectedGas {
|
|
|
|
t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())
|
|
|
|
}
|
|
|
|
|
|
|
|
state, _ := chain.State()
|
|
|
|
|
|
|
|
// 3: Ensure that miner received only the tx's tip.
|
|
|
|
actual := state.GetBalance(block.Coinbase())
|
|
|
|
expected := new(big.Int).Add(
|
2021-06-08 13:05:41 +03:00
|
|
|
new(big.Int).SetUint64(block.GasUsed()*block.Transactions()[0].GasTipCap().Uint64()),
|
2021-05-17 16:13:22 +03:00
|
|
|
ethash.ConstantinopleBlockReward,
|
|
|
|
)
|
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
|
|
|
|
actual = new(big.Int).Sub(funds, state.GetBalance(addr1))
|
2021-06-08 13:05:41 +03:00
|
|
|
expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
|
2021-05-17 16:13:22 +03:00
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
|
2022-09-07 21:21:59 +03:00
|
|
|
blocks, _ = GenerateChain(gspec.Config, block, engine, genDb, 1, func(i int, b *BlockGen) {
|
2021-05-17 16:13:22 +03:00
|
|
|
b.SetCoinbase(common.Address{2})
|
|
|
|
|
|
|
|
txdata := &types.LegacyTx{
|
|
|
|
Nonce: 0,
|
|
|
|
To: &aa,
|
|
|
|
Gas: 30000,
|
|
|
|
GasPrice: newGwei(5),
|
|
|
|
}
|
|
|
|
tx := types.NewTx(txdata)
|
|
|
|
tx, _ = types.SignTx(tx, signer, key2)
|
|
|
|
|
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
block = chain.GetBlockByNumber(2)
|
|
|
|
state, _ = chain.State()
|
2021-06-08 13:05:41 +03:00
|
|
|
effectiveTip := block.Transactions()[0].GasTipCap().Uint64() - block.BaseFee().Uint64()
|
2021-05-17 16:13:22 +03:00
|
|
|
|
|
|
|
// 6+5: Ensure that miner received only the tx's effective tip.
|
|
|
|
actual = state.GetBalance(block.Coinbase())
|
|
|
|
expected = new(big.Int).Add(
|
|
|
|
new(big.Int).SetUint64(block.GasUsed()*effectiveTip),
|
|
|
|
ethash.ConstantinopleBlockReward,
|
|
|
|
)
|
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4: Ensure the tx sender paid for the gasUsed * (effectiveTip + block baseFee).
|
|
|
|
actual = new(big.Int).Sub(funds, state.GetBalance(addr2))
|
|
|
|
expected = new(big.Int).SetUint64(block.GasUsed() * (effectiveTip + block.BaseFee().Uint64()))
|
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
}
|
2022-05-05 10:36:26 +03:00
|
|
|
|
|
|
|
// Tests the scenario the chain is requested to another point with the missing state.
|
|
|
|
// It expects the state is recovered and all relevant chain markers are set correctly.
|
|
|
|
func TestSetCanonical(t *testing.T) {
|
|
|
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
|
|
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(100000000000000000)
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
|
|
|
engine = ethash.NewFaker()
|
2022-05-05 10:36:26 +03:00
|
|
|
)
|
|
|
|
// Generate and import the canonical chain
|
2022-09-07 21:21:59 +03:00
|
|
|
_, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
|
2022-05-05 10:36:26 +03:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
})
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2022-05-05 10:36:26 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(canon); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate the side chain and import them
|
2022-09-07 21:21:59 +03:00
|
|
|
_, side, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
|
2022-05-05 10:36:26 +03:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
})
|
|
|
|
for _, block := range side {
|
|
|
|
err := chain.InsertBlockWithoutSetHead(block)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to insert into chain: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, block := range side {
|
|
|
|
got := chain.GetBlockByHash(block.Hash())
|
|
|
|
if got == nil {
|
|
|
|
t.Fatalf("Lost the inserted block")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the chain head to the side chain, ensure all the relevant markers are updated.
|
|
|
|
verify := func(head *types.Block) {
|
|
|
|
if chain.CurrentBlock().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
|
|
|
|
}
|
|
|
|
if chain.CurrentFastBlock().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentFastBlock().Hash())
|
|
|
|
}
|
|
|
|
if chain.CurrentHeader().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
|
|
|
|
}
|
|
|
|
if !chain.HasState(head.Root()) {
|
|
|
|
t.Fatalf("Lost block state %v %x", head.Number(), head.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
chain.SetCanonical(side[len(side)-1])
|
|
|
|
verify(side[len(side)-1])
|
|
|
|
|
|
|
|
// Reset the chain head to original chain
|
|
|
|
chain.SetCanonical(canon[TriesInMemory-1])
|
|
|
|
verify(canon[TriesInMemory-1])
|
|
|
|
}
|
2022-06-01 12:03:24 +03:00
|
|
|
|
|
|
|
// TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted
|
|
|
|
// correctly in case reorg is called.
|
|
|
|
func TestCanonicalHashMarker(t *testing.T) {
|
|
|
|
var cases = []struct {
|
|
|
|
forkA int
|
|
|
|
forkB int
|
|
|
|
}{
|
|
|
|
// ForkA: 10 blocks
|
|
|
|
// ForkB: 1 blocks
|
|
|
|
//
|
|
|
|
// reorged:
|
|
|
|
// markers [2, 10] should be deleted
|
|
|
|
// markers [1] should be updated
|
|
|
|
{10, 1},
|
|
|
|
|
|
|
|
// ForkA: 10 blocks
|
|
|
|
// ForkB: 2 blocks
|
|
|
|
//
|
|
|
|
// reorged:
|
|
|
|
// markers [3, 10] should be deleted
|
|
|
|
// markers [1, 2] should be updated
|
|
|
|
{10, 2},
|
|
|
|
|
|
|
|
// ForkA: 10 blocks
|
|
|
|
// ForkB: 10 blocks
|
|
|
|
//
|
|
|
|
// reorged:
|
|
|
|
// markers [1, 10] should be updated
|
|
|
|
{10, 10},
|
|
|
|
|
|
|
|
// ForkA: 10 blocks
|
|
|
|
// ForkB: 11 blocks
|
|
|
|
//
|
|
|
|
// reorged:
|
|
|
|
// markers [1, 11] should be updated
|
|
|
|
{10, 11},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
var (
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 21:21:59 +03:00
|
|
|
engine = ethash.NewFaker()
|
2022-06-01 12:03:24 +03:00
|
|
|
)
|
2022-09-07 21:21:59 +03:00
|
|
|
_, forkA, _ := GenerateChainWithGenesis(gspec, engine, c.forkA, func(i int, gen *BlockGen) {})
|
|
|
|
_, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {})
|
2022-06-01 12:03:24 +03:00
|
|
|
|
|
|
|
// Initialize test chain
|
2022-09-07 21:21:59 +03:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2022-06-01 12:03:24 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
// Insert forkA and forkB, the canonical should on forkA still
|
|
|
|
if n, err := chain.InsertChain(forkA); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(forkB); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
verify := func(head *types.Block) {
|
|
|
|
if chain.CurrentBlock().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
|
|
|
|
}
|
|
|
|
if chain.CurrentFastBlock().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentFastBlock().Hash())
|
|
|
|
}
|
|
|
|
if chain.CurrentHeader().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
|
|
|
|
}
|
|
|
|
if !chain.HasState(head.Root()) {
|
|
|
|
t.Fatalf("Lost block state %v %x", head.Number(), head.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Switch canonical chain to forkB if necessary
|
|
|
|
if len(forkA) < len(forkB) {
|
|
|
|
verify(forkB[len(forkB)-1])
|
|
|
|
} else {
|
|
|
|
verify(forkA[len(forkA)-1])
|
|
|
|
chain.SetCanonical(forkB[len(forkB)-1])
|
|
|
|
verify(forkB[len(forkB)-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure all hash markers are updated correctly
|
|
|
|
for i := 0; i < len(forkB); i++ {
|
|
|
|
block := forkB[i]
|
|
|
|
hash := chain.GetCanonicalHash(block.NumberU64())
|
|
|
|
if hash != block.Hash() {
|
|
|
|
t.Fatalf("Unexpected canonical hash %d", block.NumberU64())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if c.forkA > c.forkB {
|
|
|
|
for i := uint64(c.forkB) + 1; i <= uint64(c.forkA); i++ {
|
|
|
|
hash := chain.GetCanonicalHash(i)
|
|
|
|
if hash != (common.Hash{}) {
|
|
|
|
t.Fatalf("Unexpected canonical hash %d", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|