2ce00adb55
* focus on performance improvement in many aspects. 1. Do BlockBody verification concurrently; 2. Do calculation of intermediate root concurrently; 3. Preload accounts before processing blocks; 4. Make the snapshot layers configurable. 5. Reuse some object to reduce GC. add * rlp: improve decoder stream implementation (#22858) This commit makes various cleanup changes to rlp.Stream. * rlp: shrink Stream struct This removes a lot of unused padding space in Stream by reordering the fields. The size of Stream changes from 120 bytes to 88 bytes. Stream instances are internally cached and reused using sync.Pool, so this does not improve performance. * rlp: simplify list stack The list stack kept track of the size of the current list context as well as the current offset into it. The size had to be stored in the stack in order to subtract it from the remaining bytes of any enclosing list in ListEnd. It seems that this can be implemented in a simpler way: just subtract the size from the enclosing list context in List instead. * rlp: use atomic.Value for type cache (#22902) All encoding/decoding operations read the type cache to find the writer/decoder function responsible for a type. When analyzing CPU profiles of geth during sync, I found that the use of sync.RWMutex in cache lookups appears in the profiles. It seems we are running into CPU cache contention problems when package rlp is heavily used on all CPU cores during sync. This change makes it use atomic.Value + a writer lock instead of sync.RWMutex. In the common case where the typeinfo entry is present in the cache, we simply fetch the map and lookup the type. * rlp: optimize byte array handling (#22924) This change improves the performance of encoding/decoding [N]byte. name old time/op new time/op delta DecodeByteArrayStruct-8 336ns ± 0% 246ns ± 0% -26.98% (p=0.000 n=9+10) EncodeByteArrayStruct-8 225ns ± 1% 148ns ± 1% -34.12% (p=0.000 n=10+10) name old alloc/op new alloc/op delta DecodeByteArrayStruct-8 120B ± 0% 48B ± 0% -60.00% (p=0.000 n=10+10) EncodeByteArrayStruct-8 0.00B 0.00B ~ (all equal) * rlp: optimize big.Int decoding for size <= 32 bytes (#22927) This change grows the static integer buffer in Stream to 32 bytes, making it possible to decode 256bit integers without allocating a temporary buffer. In the recent commit 088da24, Stream struct size decreased from 120 bytes down to 88 bytes. This commit grows the struct to 112 bytes again, but the size change will not degrade performance because Stream instances are internally cached in sync.Pool. name old time/op new time/op delta DecodeBigInts-8 12.2µs ± 0% 8.6µs ± 4% -29.58% (p=0.000 n=9+10) name old speed new speed delta DecodeBigInts-8 230MB/s ± 0% 326MB/s ± 4% +42.04% (p=0.000 n=9+10) * eth/protocols/eth, les: avoid Raw() when decoding HashOrNumber (#22841) Getting the raw value is not necessary to decode this type, and decoding it directly from the stream is faster. * fix testcase * debug no lazy * fix can not repair * address comments Co-authored-by: Felix Lange <fjl@twurst.com>
721 lines
23 KiB
Go
721 lines
23 KiB
Go
// Copyright 2020 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
// Tests that abnormal program termination (i.e.crash) and restart can recovery
|
|
// the snapshot properly if the snapshot is enabled.
|
|
|
|
package core
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"os"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ethereum/go-ethereum/consensus"
|
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
"github.com/ethereum/go-ethereum/params"
|
|
)
|
|
|
|
// snapshotTestBasic wraps the common testing fields in the snapshot tests.
|
|
type snapshotTestBasic struct {
|
|
chainBlocks int // Number of blocks to generate for the canonical chain
|
|
snapshotBlock uint64 // Block number of the relevant snapshot disk layer
|
|
commitBlock uint64 // Block number for which to commit the state to disk
|
|
|
|
expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis)
|
|
expHeadHeader uint64 // Block number of the expected head header
|
|
expHeadFastBlock uint64 // Block number of the expected head fast sync block
|
|
expHeadBlock uint64 // Block number of the expected head full block
|
|
expSnapshotBottom uint64 // The block height corresponding to the snapshot disk layer
|
|
|
|
// share fields, set in runtime
|
|
datadir string
|
|
db ethdb.Database
|
|
gendb ethdb.Database
|
|
engine consensus.Engine
|
|
}
|
|
|
|
func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) {
|
|
// Create a temporary persistent database
|
|
datadir, err := ioutil.TempDir("", "")
|
|
if err != nil {
|
|
t.Fatalf("Failed to create temporary datadir: %v", err)
|
|
}
|
|
os.RemoveAll(datadir)
|
|
|
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
|
if err != nil {
|
|
t.Fatalf("Failed to create persistent database: %v", err)
|
|
}
|
|
// Initialize a fresh chain
|
|
var (
|
|
genesis = new(Genesis).MustCommit(db)
|
|
engine = ethash.NewFullFaker()
|
|
gendb = rawdb.NewMemoryDatabase()
|
|
|
|
// Snapshot is enabled, the first snapshot is created from the Genesis.
|
|
// The snapshot memory allowance is 256MB, it means no snapshot flush
|
|
// will happen during the block insertion.
|
|
cacheConfig = defaultCacheConfig
|
|
)
|
|
chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to create chain: %v", err)
|
|
}
|
|
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, func(i int, b *BlockGen) {})
|
|
|
|
// Insert the blocks with configured settings.
|
|
var breakpoints []uint64
|
|
if basic.commitBlock > basic.snapshotBlock {
|
|
breakpoints = append(breakpoints, basic.snapshotBlock, basic.commitBlock)
|
|
} else {
|
|
breakpoints = append(breakpoints, basic.commitBlock, basic.snapshotBlock)
|
|
}
|
|
var startPoint uint64
|
|
for _, point := range breakpoints {
|
|
if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil {
|
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
|
}
|
|
startPoint = point
|
|
|
|
if basic.commitBlock > 0 && basic.commitBlock == point {
|
|
chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
|
|
}
|
|
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
|
|
// Flushing the entire snap tree into the disk, the
|
|
// relavant (a) snapshot root and (b) snapshot generator
|
|
// will be persisted atomically.
|
|
chain.snaps.Cap(blocks[point-1].Root(), 0)
|
|
diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
|
|
if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
|
|
t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
|
|
}
|
|
}
|
|
}
|
|
if _, err := chain.InsertChain(blocks[startPoint:]); err != nil {
|
|
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
|
}
|
|
|
|
// Set runtime fields
|
|
basic.datadir = datadir
|
|
basic.db = db
|
|
basic.gendb = gendb
|
|
basic.engine = engine
|
|
return chain, blocks
|
|
}
|
|
|
|
func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks []*types.Block) {
|
|
// Iterate over all the remaining blocks and ensure there are no gaps
|
|
verifyNoGaps(t, chain, true, blocks)
|
|
verifyCutoff(t, chain, true, blocks, basic.expCanonicalBlocks)
|
|
|
|
if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader {
|
|
t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader)
|
|
}
|
|
if head := chain.CurrentFastBlock(); head.NumberU64() != basic.expHeadFastBlock {
|
|
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadFastBlock)
|
|
}
|
|
if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock {
|
|
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock)
|
|
}
|
|
|
|
// Check the disk layer, ensure they are matched
|
|
block := chain.GetBlockByNumber(basic.expSnapshotBottom)
|
|
if block == nil {
|
|
t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
|
|
} else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
|
|
t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
|
|
}
|
|
|
|
// Check the snapshot, ensure it's integrated
|
|
if err := chain.snaps.Verify(block.Root()); err != nil {
|
|
t.Errorf("The disk layer is not integrated %v", err)
|
|
}
|
|
}
|
|
|
|
func (basic *snapshotTestBasic) dump() string {
|
|
buffer := new(strings.Builder)
|
|
|
|
fmt.Fprint(buffer, "Chain:\n G")
|
|
for i := 0; i < basic.chainBlocks; i++ {
|
|
fmt.Fprintf(buffer, "->C%d", i+1)
|
|
}
|
|
fmt.Fprint(buffer, " (HEAD)\n\n")
|
|
|
|
fmt.Fprintf(buffer, "Commit: G")
|
|
if basic.commitBlock > 0 {
|
|
fmt.Fprintf(buffer, ", C%d", basic.commitBlock)
|
|
}
|
|
fmt.Fprint(buffer, "\n")
|
|
|
|
fmt.Fprintf(buffer, "Snapshot: G")
|
|
if basic.snapshotBlock > 0 {
|
|
fmt.Fprintf(buffer, ", C%d", basic.snapshotBlock)
|
|
}
|
|
fmt.Fprint(buffer, "\n")
|
|
|
|
//if crash {
|
|
// fmt.Fprintf(buffer, "\nCRASH\n\n")
|
|
//} else {
|
|
// fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", basic.setHead)
|
|
//}
|
|
fmt.Fprintf(buffer, "------------------------------\n\n")
|
|
|
|
fmt.Fprint(buffer, "Expected in leveldb:\n G")
|
|
for i := 0; i < basic.expCanonicalBlocks; i++ {
|
|
fmt.Fprintf(buffer, "->C%d", i+1)
|
|
}
|
|
fmt.Fprintf(buffer, "\n\n")
|
|
fmt.Fprintf(buffer, "Expected head header : C%d\n", basic.expHeadHeader)
|
|
fmt.Fprintf(buffer, "Expected head fast block: C%d\n", basic.expHeadFastBlock)
|
|
if basic.expHeadBlock == 0 {
|
|
fmt.Fprintf(buffer, "Expected head block : G\n")
|
|
} else {
|
|
fmt.Fprintf(buffer, "Expected head block : C%d\n", basic.expHeadBlock)
|
|
}
|
|
if basic.expSnapshotBottom == 0 {
|
|
fmt.Fprintf(buffer, "Expected snapshot disk : G\n")
|
|
} else {
|
|
fmt.Fprintf(buffer, "Expected snapshot disk : C%d\n", basic.expSnapshotBottom)
|
|
}
|
|
return buffer.String()
|
|
}
|
|
|
|
func (basic *snapshotTestBasic) teardown() {
|
|
basic.db.Close()
|
|
basic.gendb.Close()
|
|
os.RemoveAll(basic.datadir)
|
|
}
|
|
|
|
// snapshotTest is a test case type for normal snapshot recovery.
|
|
// It can be used for testing that restart Geth normally.
|
|
type snapshotTest struct {
|
|
snapshotTestBasic
|
|
}
|
|
|
|
func (snaptest *snapshotTest) test(t *testing.T) {
|
|
// It's hard to follow the test case, visualize the input
|
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
// fmt.Println(tt.dump())
|
|
chain, blocks := snaptest.prepare(t)
|
|
|
|
// Restart the chain normally
|
|
chain.Stop()
|
|
newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
defer newchain.Stop()
|
|
|
|
snaptest.verify(t, newchain, blocks)
|
|
}
|
|
|
|
// crashSnapshotTest is a test case type for innormal snapshot recovery.
|
|
// It can be used for testing that restart Geth after the crash.
|
|
type crashSnapshotTest struct {
|
|
snapshotTestBasic
|
|
}
|
|
|
|
func (snaptest *crashSnapshotTest) test(t *testing.T) {
|
|
// It's hard to follow the test case, visualize the input
|
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
// fmt.Println(tt.dump())
|
|
chain, blocks := snaptest.prepare(t)
|
|
|
|
// Pull the plug on the database, simulating a hard crash
|
|
db := chain.db
|
|
db.Close()
|
|
|
|
// Start a new blockchain back up and see where the repair leads us
|
|
newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false)
|
|
if err != nil {
|
|
t.Fatalf("Failed to reopen persistent database: %v", err)
|
|
}
|
|
defer newdb.Close()
|
|
|
|
// The interesting thing is: instead of starting the blockchain after
|
|
// the crash, we do restart twice here: one after the crash and one
|
|
// after the normal stop. It's used to ensure the broken snapshot
|
|
// can be detected all the time.
|
|
newchain, err := NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
newchain.Stop()
|
|
|
|
newchain, err = NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
defer newchain.Stop()
|
|
|
|
snaptest.verify(t, newchain, blocks)
|
|
}
|
|
|
|
// gappedSnapshotTest is a test type used to test this scenario:
|
|
// - have a complete snapshot
|
|
// - restart without enabling the snapshot
|
|
// - insert a few blocks
|
|
// - restart with enabling the snapshot again
|
|
type gappedSnapshotTest struct {
|
|
snapshotTestBasic
|
|
gapped int // Number of blocks to insert without enabling snapshot
|
|
}
|
|
|
|
func (snaptest *gappedSnapshotTest) test(t *testing.T) {
|
|
// It's hard to follow the test case, visualize the input
|
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
// fmt.Println(tt.dump())
|
|
chain, blocks := snaptest.prepare(t)
|
|
|
|
// Insert blocks without enabling snapshot if gapping is required.
|
|
chain.Stop()
|
|
gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, func(i int, b *BlockGen) {})
|
|
|
|
// Insert a few more blocks without enabling snapshot
|
|
var cacheConfig = &CacheConfig{
|
|
TrieCleanLimit: 256,
|
|
TrieDirtyLimit: 256,
|
|
TrieTimeLimit: 5 * time.Minute,
|
|
TriesInMemory: 128,
|
|
SnapshotLimit: 0,
|
|
}
|
|
newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
newchain.InsertChain(gappedBlocks)
|
|
newchain.Stop()
|
|
|
|
// Restart the chain with enabling the snapshot
|
|
newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
defer newchain.Stop()
|
|
|
|
snaptest.verify(t, newchain, blocks)
|
|
}
|
|
|
|
// setHeadSnapshotTest is the test type used to test this scenario:
|
|
// - have a complete snapshot
|
|
// - set the head to a lower point
|
|
// - restart
|
|
type setHeadSnapshotTest struct {
|
|
snapshotTestBasic
|
|
setHead uint64 // Block number to set head back to
|
|
}
|
|
|
|
func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
|
|
// It's hard to follow the test case, visualize the input
|
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
// fmt.Println(tt.dump())
|
|
chain, blocks := snaptest.prepare(t)
|
|
|
|
// Rewind the chain if setHead operation is required.
|
|
chain.SetHead(snaptest.setHead)
|
|
chain.Stop()
|
|
|
|
newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
defer newchain.Stop()
|
|
|
|
snaptest.verify(t, newchain, blocks)
|
|
}
|
|
|
|
// restartCrashSnapshotTest is the test type used to test this scenario:
|
|
// - have a complete snapshot
|
|
// - restart chain
|
|
// - insert more blocks with enabling the snapshot
|
|
// - commit the snapshot
|
|
// - crash
|
|
// - restart again
|
|
type restartCrashSnapshotTest struct {
|
|
snapshotTestBasic
|
|
newBlocks int
|
|
}
|
|
|
|
func (snaptest *restartCrashSnapshotTest) test(t *testing.T) {
|
|
// It's hard to follow the test case, visualize the input
|
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
// fmt.Println(tt.dump())
|
|
chain, blocks := snaptest.prepare(t)
|
|
|
|
// Firstly, stop the chain properly, with all snapshot journal
|
|
// and state committed.
|
|
chain.Stop()
|
|
|
|
newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {})
|
|
newchain.InsertChain(newBlocks)
|
|
|
|
// Commit the entire snapshot into the disk if requested. Note only
|
|
// (a) snapshot root and (b) snapshot generator will be committed,
|
|
// the diff journal is not.
|
|
newchain.Snapshots().Cap(newBlocks[len(newBlocks)-1].Root(), 0)
|
|
|
|
// Simulate the blockchain crash
|
|
// Don't call chain.Stop here, so that no snapshot
|
|
// journal and latest state will be committed
|
|
|
|
// Restart the chain after the crash
|
|
newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
defer newchain.Stop()
|
|
|
|
snaptest.verify(t, newchain, blocks)
|
|
}
|
|
|
|
// wipeCrashSnapshotTest is the test type used to test this scenario:
|
|
// - have a complete snapshot
|
|
// - restart, insert more blocks without enabling the snapshot
|
|
// - restart again with enabling the snapshot
|
|
// - crash
|
|
type wipeCrashSnapshotTest struct {
|
|
snapshotTestBasic
|
|
newBlocks int
|
|
}
|
|
|
|
func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
|
|
// It's hard to follow the test case, visualize the input
|
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
// fmt.Println(tt.dump())
|
|
chain, blocks := snaptest.prepare(t)
|
|
|
|
// Firstly, stop the chain properly, with all snapshot journal
|
|
// and state committed.
|
|
chain.Stop()
|
|
|
|
config := &CacheConfig{
|
|
TrieCleanLimit: 256,
|
|
TrieDirtyLimit: 256,
|
|
TrieTimeLimit: 5 * time.Minute,
|
|
SnapshotLimit: 0,
|
|
TriesInMemory: 128,
|
|
}
|
|
newchain, err := NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {})
|
|
newchain.InsertChain(newBlocks)
|
|
newchain.Stop()
|
|
|
|
// Restart the chain, the wiper should starts working
|
|
config = &CacheConfig{
|
|
TrieCleanLimit: 256,
|
|
TrieDirtyLimit: 256,
|
|
TrieTimeLimit: 5 * time.Minute,
|
|
SnapshotLimit: 256,
|
|
SnapshotWait: false, // Don't wait rebuild
|
|
TriesInMemory: 128,
|
|
}
|
|
newchain, err = NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
// Simulate the blockchain crash.
|
|
|
|
newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to recreate chain: %v", err)
|
|
}
|
|
snaptest.verify(t, newchain, blocks)
|
|
}
|
|
|
|
// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot
|
|
// journal will be persisted correctly. In this case no snapshot recovery is
|
|
// required.
|
|
func TestRestartWithNewSnapshot(t *testing.T) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Commit: G
|
|
// Snapshot: G
|
|
//
|
|
// SetHead(0)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Expected head header : C8
|
|
// Expected head fast block: C8
|
|
// Expected head block : C8
|
|
// Expected snapshot disk : G
|
|
test := &snapshotTest{
|
|
snapshotTestBasic{
|
|
chainBlocks: 8,
|
|
snapshotBlock: 0,
|
|
commitBlock: 0,
|
|
expCanonicalBlocks: 8,
|
|
expHeadHeader: 8,
|
|
expHeadFastBlock: 8,
|
|
expHeadBlock: 8,
|
|
expSnapshotBottom: 0, // Initial disk layer built from genesis
|
|
},
|
|
}
|
|
test.test(t)
|
|
test.teardown()
|
|
}
|
|
|
|
// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
|
|
// chain head should be rewound to the point with available state. And also the
|
|
// new head should must be lower than disk layer. But there is no committed point
|
|
// so the chain should be rewound to genesis and the disk layer should be left
|
|
// for recovery.
|
|
func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Commit: G
|
|
// Snapshot: G, C4
|
|
//
|
|
// CRASH
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Expected head header : C8
|
|
// Expected head fast block: C8
|
|
// Expected head block : G
|
|
// Expected snapshot disk : C4
|
|
test := &crashSnapshotTest{
|
|
snapshotTestBasic{
|
|
chainBlocks: 8,
|
|
snapshotBlock: 4,
|
|
commitBlock: 0,
|
|
expCanonicalBlocks: 8,
|
|
expHeadHeader: 8,
|
|
expHeadFastBlock: 8,
|
|
expHeadBlock: 0,
|
|
expSnapshotBottom: 4, // Last committed disk layer, wait recovery
|
|
},
|
|
}
|
|
test.test(t)
|
|
test.teardown()
|
|
}
|
|
|
|
// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
|
|
// chain head should be rewound to the point with available state. And also the
|
|
// new head should must be lower than disk layer. But there is only a low committed
|
|
// point so the chain should be rewound to committed point and the disk layer
|
|
// should be left for recovery.
|
|
func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Commit: G, C2
|
|
// Snapshot: G, C4
|
|
//
|
|
// CRASH
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Expected head header : C8
|
|
// Expected head fast block: C8
|
|
// Expected head block : C2
|
|
// Expected snapshot disk : C4
|
|
test := &crashSnapshotTest{
|
|
snapshotTestBasic{
|
|
chainBlocks: 8,
|
|
snapshotBlock: 4,
|
|
commitBlock: 2,
|
|
expCanonicalBlocks: 8,
|
|
expHeadHeader: 8,
|
|
expHeadFastBlock: 8,
|
|
expHeadBlock: 2,
|
|
expSnapshotBottom: 4, // Last committed disk layer, wait recovery
|
|
},
|
|
}
|
|
test.test(t)
|
|
test.teardown()
|
|
}
|
|
|
|
// Tests a Geth was crashed and restarts with a broken snapshot. In this case
|
|
// the chain head should be rewound to the point with available state. And also
|
|
// the new head should must be lower than disk layer. But there is only a high
|
|
// committed point so the chain should be rewound to genesis and the disk layer
|
|
// should be left for recovery.
|
|
func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Commit: G, C6
|
|
// Snapshot: G, C4
|
|
//
|
|
// CRASH
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Expected head header : C8
|
|
// Expected head fast block: C8
|
|
// Expected head block : G
|
|
// Expected snapshot disk : C4
|
|
test := &crashSnapshotTest{
|
|
snapshotTestBasic{
|
|
chainBlocks: 8,
|
|
snapshotBlock: 4,
|
|
commitBlock: 6,
|
|
expCanonicalBlocks: 8,
|
|
expHeadHeader: 8,
|
|
expHeadFastBlock: 8,
|
|
expHeadBlock: 0,
|
|
expSnapshotBottom: 4, // Last committed disk layer, wait recovery
|
|
},
|
|
}
|
|
test.test(t)
|
|
test.teardown()
|
|
}
|
|
|
|
// Tests a Geth was running with snapshot enabled. Then restarts without
|
|
// enabling snapshot and after that re-enable the snapshot again. In this
|
|
// case the snapshot should be rebuilt with latest chain head.
|
|
func TestGappedNewSnapshot(t *testing.T) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Commit: G
|
|
// Snapshot: G
|
|
//
|
|
// SetHead(0)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
|
|
//
|
|
// Expected head header : C10
|
|
// Expected head fast block: C10
|
|
// Expected head block : C10
|
|
// Expected snapshot disk : C10
|
|
test := &gappedSnapshotTest{
|
|
snapshotTestBasic: snapshotTestBasic{
|
|
chainBlocks: 8,
|
|
snapshotBlock: 0,
|
|
commitBlock: 0,
|
|
expCanonicalBlocks: 10,
|
|
expHeadHeader: 10,
|
|
expHeadFastBlock: 10,
|
|
expHeadBlock: 10,
|
|
expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD
|
|
},
|
|
gapped: 2,
|
|
}
|
|
test.test(t)
|
|
test.teardown()
|
|
}
|
|
|
|
// Tests the Geth was running with snapshot enabled and resetHead is applied.
|
|
// In this case the head is rewound to the target(with state available). After
|
|
// that the chain is restarted and the original disk layer is kept.
|
|
func TestSetHeadWithNewSnapshot(t *testing.T) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Commit: G
|
|
// Snapshot: G
|
|
//
|
|
// SetHead(4)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4
|
|
//
|
|
// Expected head header : C4
|
|
// Expected head fast block: C4
|
|
// Expected head block : C4
|
|
// Expected snapshot disk : G
|
|
test := &setHeadSnapshotTest{
|
|
snapshotTestBasic: snapshotTestBasic{
|
|
chainBlocks: 8,
|
|
snapshotBlock: 0,
|
|
commitBlock: 0,
|
|
expCanonicalBlocks: 4,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
expSnapshotBottom: 0, // The initial disk layer is built from the genesis
|
|
},
|
|
setHead: 4,
|
|
}
|
|
test.test(t)
|
|
test.teardown()
|
|
}
|
|
|
|
// Tests the Geth was running with a complete snapshot and then imports a few
|
|
// more new blocks on top without enabling the snapshot. After the restart,
|
|
// crash happens. Check everything is ok after the restart.
|
|
func TestRecoverSnapshotFromWipingCrash(t *testing.T) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Commit: G
|
|
// Snapshot: G
|
|
//
|
|
// SetHead(0)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
|
|
//
|
|
// Expected head header : C10
|
|
// Expected head fast block: C10
|
|
// Expected head block : C8
|
|
// Expected snapshot disk : C10
|
|
test := &wipeCrashSnapshotTest{
|
|
snapshotTestBasic: snapshotTestBasic{
|
|
chainBlocks: 8,
|
|
snapshotBlock: 4,
|
|
commitBlock: 0,
|
|
expCanonicalBlocks: 10,
|
|
expHeadHeader: 10,
|
|
expHeadFastBlock: 10,
|
|
expHeadBlock: 10,
|
|
expSnapshotBottom: 10,
|
|
},
|
|
newBlocks: 2,
|
|
}
|
|
test.test(t)
|
|
test.teardown()
|
|
}
|