2ce00adb55
* focus on performance improvement in many aspects. 1. Do BlockBody verification concurrently; 2. Do calculation of intermediate root concurrently; 3. Preload accounts before processing blocks; 4. Make the snapshot layers configurable. 5. Reuse some object to reduce GC. add * rlp: improve decoder stream implementation (#22858) This commit makes various cleanup changes to rlp.Stream. * rlp: shrink Stream struct This removes a lot of unused padding space in Stream by reordering the fields. The size of Stream changes from 120 bytes to 88 bytes. Stream instances are internally cached and reused using sync.Pool, so this does not improve performance. * rlp: simplify list stack The list stack kept track of the size of the current list context as well as the current offset into it. The size had to be stored in the stack in order to subtract it from the remaining bytes of any enclosing list in ListEnd. It seems that this can be implemented in a simpler way: just subtract the size from the enclosing list context in List instead. * rlp: use atomic.Value for type cache (#22902) All encoding/decoding operations read the type cache to find the writer/decoder function responsible for a type. When analyzing CPU profiles of geth during sync, I found that the use of sync.RWMutex in cache lookups appears in the profiles. It seems we are running into CPU cache contention problems when package rlp is heavily used on all CPU cores during sync. This change makes it use atomic.Value + a writer lock instead of sync.RWMutex. In the common case where the typeinfo entry is present in the cache, we simply fetch the map and lookup the type. * rlp: optimize byte array handling (#22924) This change improves the performance of encoding/decoding [N]byte. name old time/op new time/op delta DecodeByteArrayStruct-8 336ns ± 0% 246ns ± 0% -26.98% (p=0.000 n=9+10) EncodeByteArrayStruct-8 225ns ± 1% 148ns ± 1% -34.12% (p=0.000 n=10+10) name old alloc/op new alloc/op delta DecodeByteArrayStruct-8 120B ± 0% 48B ± 0% -60.00% (p=0.000 n=10+10) EncodeByteArrayStruct-8 0.00B 0.00B ~ (all equal) * rlp: optimize big.Int decoding for size <= 32 bytes (#22927) This change grows the static integer buffer in Stream to 32 bytes, making it possible to decode 256bit integers without allocating a temporary buffer. In the recent commit 088da24, Stream struct size decreased from 120 bytes down to 88 bytes. This commit grows the struct to 112 bytes again, but the size change will not degrade performance because Stream instances are internally cached in sync.Pool. name old time/op new time/op delta DecodeBigInts-8 12.2µs ± 0% 8.6µs ± 4% -29.58% (p=0.000 n=9+10) name old speed new speed delta DecodeBigInts-8 230MB/s ± 0% 326MB/s ± 4% +42.04% (p=0.000 n=9+10) * eth/protocols/eth, les: avoid Raw() when decoding HashOrNumber (#22841) Getting the raw value is not necessary to decode this type, and decoding it directly from the stream is faster. * fix testcase * debug no lazy * fix can not repair * address comments Co-authored-by: Felix Lange <fjl@twurst.com>
2172 lines
69 KiB
Go
2172 lines
69 KiB
Go
// Copyright 2020 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
// Tests that setting the chain head backwards doesn't leave the database in some
|
|
// strange state with gaps in the chain, nor with block data dangling in the future.
|
|
|
|
package core
|
|
|
|
import (
|
|
"fmt"
|
|
"io/ioutil"
|
|
"math/big"
|
|
"os"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
|
"github.com/ethereum/go-ethereum/params"
|
|
)
|
|
|
|
// rewindTest is a test case for chain rollback upon user request.
|
|
type rewindTest struct {
|
|
canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier)
|
|
sidechainBlocks int // Number of blocks to generate for the side chain (lighter)
|
|
freezeThreshold uint64 // Block number until which to move things into the freezer
|
|
commitBlock uint64 // Block number for which to commit the state to disk
|
|
pivotBlock *uint64 // Pivot block number in case of fast sync
|
|
|
|
setheadBlock uint64 // Block number to set head back to
|
|
expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis)
|
|
expSidechainBlocks int // Number of sidechain blocks expected to remain in the database (excl. genesis)
|
|
expFrozen int // Number of canonical blocks expected to be in the freezer (incl. genesis)
|
|
expHeadHeader uint64 // Block number of the expected head header
|
|
expHeadFastBlock uint64 // Block number of the expected head fast sync block
|
|
expHeadBlock uint64 // Block number of the expected head full block
|
|
}
|
|
|
|
func (tt *rewindTest) dump(crash bool) string {
|
|
buffer := new(strings.Builder)
|
|
|
|
fmt.Fprint(buffer, "Chain:\n G")
|
|
for i := 0; i < tt.canonicalBlocks; i++ {
|
|
fmt.Fprintf(buffer, "->C%d", i+1)
|
|
}
|
|
fmt.Fprint(buffer, " (HEAD)\n")
|
|
if tt.sidechainBlocks > 0 {
|
|
fmt.Fprintf(buffer, " └")
|
|
for i := 0; i < tt.sidechainBlocks; i++ {
|
|
fmt.Fprintf(buffer, "->S%d", i+1)
|
|
}
|
|
fmt.Fprintf(buffer, "\n")
|
|
}
|
|
fmt.Fprintf(buffer, "\n")
|
|
|
|
if tt.canonicalBlocks > int(tt.freezeThreshold) {
|
|
fmt.Fprint(buffer, "Frozen:\n G")
|
|
for i := 0; i < tt.canonicalBlocks-int(tt.freezeThreshold); i++ {
|
|
fmt.Fprintf(buffer, "->C%d", i+1)
|
|
}
|
|
fmt.Fprintf(buffer, "\n\n")
|
|
} else {
|
|
fmt.Fprintf(buffer, "Frozen: none\n")
|
|
}
|
|
fmt.Fprintf(buffer, "Commit: G")
|
|
if tt.commitBlock > 0 {
|
|
fmt.Fprintf(buffer, ", C%d", tt.commitBlock)
|
|
}
|
|
fmt.Fprint(buffer, "\n")
|
|
|
|
if tt.pivotBlock == nil {
|
|
fmt.Fprintf(buffer, "Pivot : none\n")
|
|
} else {
|
|
fmt.Fprintf(buffer, "Pivot : C%d\n", *tt.pivotBlock)
|
|
}
|
|
if crash {
|
|
fmt.Fprintf(buffer, "\nCRASH\n\n")
|
|
} else {
|
|
fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setheadBlock)
|
|
}
|
|
fmt.Fprintf(buffer, "------------------------------\n\n")
|
|
|
|
if tt.expFrozen > 0 {
|
|
fmt.Fprint(buffer, "Expected in freezer:\n G")
|
|
for i := 0; i < tt.expFrozen-1; i++ {
|
|
fmt.Fprintf(buffer, "->C%d", i+1)
|
|
}
|
|
fmt.Fprintf(buffer, "\n\n")
|
|
}
|
|
if tt.expFrozen > 0 {
|
|
if tt.expFrozen >= tt.expCanonicalBlocks {
|
|
fmt.Fprintf(buffer, "Expected in leveldb: none\n")
|
|
} else {
|
|
fmt.Fprintf(buffer, "Expected in leveldb:\n C%d)", tt.expFrozen-1)
|
|
for i := tt.expFrozen - 1; i < tt.expCanonicalBlocks; i++ {
|
|
fmt.Fprintf(buffer, "->C%d", i+1)
|
|
}
|
|
fmt.Fprint(buffer, "\n")
|
|
if tt.expSidechainBlocks > tt.expFrozen {
|
|
fmt.Fprintf(buffer, " └")
|
|
for i := tt.expFrozen - 1; i < tt.expSidechainBlocks; i++ {
|
|
fmt.Fprintf(buffer, "->S%d", i+1)
|
|
}
|
|
fmt.Fprintf(buffer, "\n")
|
|
}
|
|
}
|
|
} else {
|
|
fmt.Fprint(buffer, "Expected in leveldb:\n G")
|
|
for i := tt.expFrozen; i < tt.expCanonicalBlocks; i++ {
|
|
fmt.Fprintf(buffer, "->C%d", i+1)
|
|
}
|
|
fmt.Fprint(buffer, "\n")
|
|
if tt.expSidechainBlocks > tt.expFrozen {
|
|
fmt.Fprintf(buffer, " └")
|
|
for i := tt.expFrozen; i < tt.expSidechainBlocks; i++ {
|
|
fmt.Fprintf(buffer, "->S%d", i+1)
|
|
}
|
|
fmt.Fprintf(buffer, "\n")
|
|
}
|
|
}
|
|
fmt.Fprintf(buffer, "\n")
|
|
fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader)
|
|
fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock)
|
|
if tt.expHeadBlock == 0 {
|
|
fmt.Fprintf(buffer, "Expected head block : G\n")
|
|
} else {
|
|
fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock)
|
|
}
|
|
return buffer.String()
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain where a recent block was already
|
|
// committed to disk and then the sethead called. In this case we expect the full
|
|
// chain to be rolled back to the committed block. Everything above the sethead
|
|
// point should be deleted. In between the committed block and the requested head
|
|
// the data can remain as "fast sync" data to avoid redownloading it.
|
|
func TestShortSetHead(t *testing.T) { testShortSetHead(t, false) }
|
|
func TestShortSetHeadWithSnapshots(t *testing.T) { testShortSetHead(t, true) }
|
|
|
|
func testShortSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Frozen: none
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain where the fast sync pivot point was
|
|
// already committed, after which sethead was called. In this case we expect the
|
|
// chain to behave like in full sync mode, rolling back to the committed block
|
|
// Everything above the sethead point should be deleted. In between the committed
|
|
// block and the requested head the data can remain as "fast sync" data to avoid
|
|
// redownloading it.
|
|
func TestShortFastSyncedSetHead(t *testing.T) { testShortFastSyncedSetHead(t, false) }
|
|
func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) }
|
|
|
|
func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Frozen: none
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain where the fast sync pivot point was
|
|
// not yet committed, but sethead was called. In this case we expect the chain to
|
|
// detect that it was fast syncing and delete everything from the new head, since
|
|
// we can just pick up fast syncing from there. The head full block should be set
|
|
// to the genesis.
|
|
func TestShortFastSyncingSetHead(t *testing.T) { testShortFastSyncingSetHead(t, false) }
|
|
func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) }
|
|
|
|
func testShortFastSyncingSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
//
|
|
// Frozen: none
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a shorter side chain, where a
|
|
// recent block was already committed to disk and then sethead was called. In this
|
|
// test scenario the side chain is below the committed block. In this case we expect
|
|
// the canonical full chain to be rolled back to the committed block. Everything
|
|
// above the sethead point should be deleted. In between the committed block and
|
|
// the requested head the data can remain as "fast sync" data to avoid redownloading
|
|
// it. The side chain should be left alone as it was shorter.
|
|
func TestShortOldForkedSetHead(t *testing.T) { testShortOldForkedSetHead(t, false) }
|
|
func TestShortOldForkedSetHeadWithSnapshots(t *testing.T) { testShortOldForkedSetHead(t, true) }
|
|
|
|
func testShortOldForkedSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen: none
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 3,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a shorter side chain, where
|
|
// the fast sync pivot point was already committed to disk and then sethead was
|
|
// called. In this test scenario the side chain is below the committed block. In
|
|
// this case we expect the canonical full chain to be rolled back to the committed
|
|
// block. Everything above the sethead point should be deleted. In between the
|
|
// committed block and the requested head the data can remain as "fast sync" data
|
|
// to avoid redownloading it. The side chain should be left alone as it was shorter.
|
|
func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
|
|
testShortOldForkedFastSyncedSetHead(t, false)
|
|
}
|
|
func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
|
testShortOldForkedFastSyncedSetHead(t, true)
|
|
}
|
|
|
|
func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen: none
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 3,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a shorter side chain, where
|
|
// the fast sync pivot point was not yet committed, but sethead was called. In this
|
|
// test scenario the side chain is below the committed block. In this case we expect
|
|
// the chain to detect that it was fast syncing and delete everything from the new
|
|
// head, since we can just pick up fast syncing from there. The head full block
|
|
// should be set to the genesis.
|
|
func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
|
|
testShortOldForkedFastSyncingSetHead(t, false)
|
|
}
|
|
func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
|
testShortOldForkedFastSyncingSetHead(t, true)
|
|
}
|
|
|
|
func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen: none
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 3,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a shorter side chain, where a
|
|
// recent block was already committed to disk and then sethead was called. In this
|
|
// test scenario the side chain reaches above the committed block. In this case we
|
|
// expect the canonical full chain to be rolled back to the committed block. All
|
|
// data above the sethead point should be deleted. In between the committed block
|
|
// and the requested head the data can remain as "fast sync" data to avoid having
|
|
// to redownload it. The side chain should be truncated to the head set.
|
|
//
|
|
// The side chain could be left to be if the fork point was before the new head
|
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
|
func TestShortNewlyForkedSetHead(t *testing.T) { testShortNewlyForkedSetHead(t, false) }
|
|
func TestShortNewlyForkedSetHeadWithSnapshots(t *testing.T) { testShortNewlyForkedSetHead(t, true) }
|
|
|
|
func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
|
//
|
|
// Frozen: none
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3->S4->S5->S6->S7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 10,
|
|
sidechainBlocks: 8,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 7,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a shorter side chain, where
|
|
// the fast sync pivot point was already committed to disk and then sethead was
|
|
// called. In this case we expect the canonical full chain to be rolled back to
|
|
// between the committed block and the requested head the data can remain as
|
|
// "fast sync" data to avoid having to redownload it. The side chain should be
|
|
// truncated to the head set.
|
|
//
|
|
// The side chain could be left to be if the fork point was before the new head
|
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
|
func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
|
|
testShortNewlyForkedFastSyncedSetHead(t, false)
|
|
}
|
|
func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
|
testShortNewlyForkedFastSyncedSetHead(t, true)
|
|
}
|
|
|
|
func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
|
//
|
|
// Frozen: none
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3->S4->S5->S6->S7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 10,
|
|
sidechainBlocks: 8,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 7,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a shorter side chain, where
|
|
// the fast sync pivot point was not yet committed, but sethead was called. In
|
|
// this test scenario the side chain reaches above the committed block. In this
|
|
// case we expect the chain to detect that it was fast syncing and delete
|
|
// everything from the new head, since we can just pick up fast syncing from
|
|
// there.
|
|
//
|
|
// The side chain could be left to be if the fork point was before the new head
|
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
|
func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
|
|
testShortNewlyForkedFastSyncingSetHead(t, false)
|
|
}
|
|
func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
|
testShortNewlyForkedFastSyncingSetHead(t, true)
|
|
}
|
|
|
|
func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8
|
|
//
|
|
// Frozen: none
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3->S4->S5->S6->S7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 10,
|
|
sidechainBlocks: 8,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 7,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a longer side chain, where a
|
|
// recent block was already committed to disk and then sethead was called. In this
|
|
// case we expect the canonical full chain to be rolled back to the committed block.
|
|
// All data above the sethead point should be deleted. In between the committed
|
|
// block and the requested head the data can remain as "fast sync" data to avoid
|
|
// having to redownload it. The side chain should be truncated to the head set.
|
|
//
|
|
// The side chain could be left to be if the fork point was before the new head
|
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
|
func TestShortReorgedSetHead(t *testing.T) { testShortReorgedSetHead(t, false) }
|
|
func TestShortReorgedSetHeadWithSnapshots(t *testing.T) { testShortReorgedSetHead(t, true) }
|
|
|
|
func testShortReorgedSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
|
//
|
|
// Frozen: none
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3->S4->S5->S6->S7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 10,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 7,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a longer side chain, where
|
|
// the fast sync pivot point was already committed to disk and then sethead was
|
|
// called. In this case we expect the canonical full chain to be rolled back to
|
|
// the committed block. All data above the sethead point should be deleted. In
|
|
// between the committed block and the requested head the data can remain as
|
|
// "fast sync" data to avoid having to redownload it. The side chain should be
|
|
// truncated to the head set.
|
|
//
|
|
// The side chain could be left to be if the fork point was before the new head
|
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
|
func TestShortReorgedFastSyncedSetHead(t *testing.T) {
|
|
testShortReorgedFastSyncedSetHead(t, false)
|
|
}
|
|
func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) {
|
|
testShortReorgedFastSyncedSetHead(t, true)
|
|
}
|
|
|
|
func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
|
//
|
|
// Frozen: none
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3->S4->S5->S6->S7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 10,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 7,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a short canonical chain and a longer side chain, where
|
|
// the fast sync pivot point was not yet committed, but sethead was called. In
|
|
// this case we expect the chain to detect that it was fast syncing and delete
|
|
// everything from the new head, since we can just pick up fast syncing from
|
|
// there.
|
|
//
|
|
// The side chain could be left to be if the fork point was before the new head
|
|
// we are deleting to, but it would be exceedingly hard to detect that case and
|
|
// properly handle it, so we'll trade extra work in exchange for simpler code.
|
|
func TestShortReorgedFastSyncingSetHead(t *testing.T) {
|
|
testShortReorgedFastSyncingSetHead(t, false)
|
|
}
|
|
func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) {
|
|
testShortReorgedFastSyncingSetHead(t, true)
|
|
}
|
|
|
|
func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10
|
|
//
|
|
// Frozen: none
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(7)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in leveldb:
|
|
// G->C1->C2->C3->C4->C5->C6->C7
|
|
// └->S1->S2->S3->S4->S5->S6->S7
|
|
//
|
|
// Expected head header : C7
|
|
// Expected head fast block: C7
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 8,
|
|
sidechainBlocks: 10,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 7,
|
|
expCanonicalBlocks: 7,
|
|
expSidechainBlocks: 7,
|
|
expFrozen: 0,
|
|
expHeadHeader: 7,
|
|
expHeadFastBlock: 7,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks where a recent
|
|
// block - newer than the ancient limit - was already committed to disk and then
|
|
// sethead was called. In this case we expect the full chain to be rolled back
|
|
// to the committed block. Everything above the sethead point should be deleted.
|
|
// In between the committed block and the requested head the data can remain as
|
|
// "fast sync" data to avoid redownloading it.
|
|
func TestLongShallowSetHead(t *testing.T) { testLongShallowSetHead(t, false) }
|
|
func TestLongShallowSetHeadWithSnapshots(t *testing.T) { testLongShallowSetHead(t, true) }
|
|
|
|
func testLongShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks where a recent
|
|
// block - older than the ancient limit - was already committed to disk and then
|
|
// sethead was called. In this case we expect the full chain to be rolled back
|
|
// to the committed block. Since the ancient limit was underflown, everything
|
|
// needs to be deleted onwards to avoid creating a gap.
|
|
func TestLongDeepSetHead(t *testing.T) { testLongDeepSetHead(t, false) }
|
|
func TestLongDeepSetHeadWithSnapshots(t *testing.T) { testLongDeepSetHead(t, true) }
|
|
|
|
func testLongDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C4
|
|
// Expected head fast block: C4
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 4,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 5,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks where the fast
|
|
// sync pivot point - newer than the ancient limit - was already committed, after
|
|
// which sethead was called. In this case we expect the full chain to be rolled
|
|
// back to the committed block. Everything above the sethead point should be
|
|
// deleted. In between the committed block and the requested head the data can
|
|
// remain as "fast sync" data to avoid redownloading it.
|
|
func TestLongFastSyncedShallowSetHead(t *testing.T) {
|
|
testLongFastSyncedShallowSetHead(t, false)
|
|
}
|
|
func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongFastSyncedShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks where the fast
|
|
// sync pivot point - older than the ancient limit - was already committed, after
|
|
// which sethead was called. In this case we expect the full chain to be rolled
|
|
// back to the committed block. Since the ancient limit was underflown, everything
|
|
// needs to be deleted onwards to avoid creating a gap.
|
|
func TestLongFastSyncedDeepSetHead(t *testing.T) { testLongFastSyncedDeepSetHead(t, false) }
|
|
func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) }
|
|
|
|
func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C4
|
|
// Expected head fast block: C4
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 4,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 5,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks where the fast
|
|
// sync pivot point - newer than the ancient limit - was not yet committed, but
|
|
// sethead was called. In this case we expect the chain to detect that it was fast
|
|
// syncing and delete everything from the new head, since we can just pick up fast
|
|
// syncing from there.
|
|
func TestLongFastSyncingShallowSetHead(t *testing.T) {
|
|
testLongFastSyncingShallowSetHead(t, false)
|
|
}
|
|
func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongFastSyncingShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks where the fast
|
|
// sync pivot point - older than the ancient limit - was not yet committed, but
|
|
// sethead was called. In this case we expect the chain to detect that it was fast
|
|
// syncing and delete everything from the new head, since we can just pick up fast
|
|
// syncing from there.
|
|
func TestLongFastSyncingDeepSetHead(t *testing.T) {
|
|
testLongFastSyncingDeepSetHead(t, false)
|
|
}
|
|
func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
|
testLongFastSyncingDeepSetHead(t, true)
|
|
}
|
|
|
|
func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4->C5->C6
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 0,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 7,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
|
|
// chain, where a recent block - newer than the ancient limit - was already committed
|
|
// to disk and then sethead was called. In this case we expect the canonical full
|
|
// chain to be rolled back to the committed block. Everything above the sethead point
|
|
// should be deleted. In between the committed block and the requested head the data
|
|
// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked
|
|
// by the freezer.
|
|
func TestLongOldForkedShallowSetHead(t *testing.T) {
|
|
testLongOldForkedShallowSetHead(t, false)
|
|
}
|
|
func TestLongOldForkedShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongOldForkedShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongOldForkedShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
|
|
// chain, where a recent block - older than the ancient limit - was already committed
|
|
// to disk and then sethead was called. In this case we expect the canonical full
|
|
// chain to be rolled back to the committed block. Since the ancient limit was
|
|
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
|
|
// side chain is nuked by the freezer.
|
|
func TestLongOldForkedDeepSetHead(t *testing.T) { testLongOldForkedDeepSetHead(t, false) }
|
|
func TestLongOldForkedDeepSetHeadWithSnapshots(t *testing.T) { testLongOldForkedDeepSetHead(t, true) }
|
|
|
|
func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C4
|
|
// Expected head fast block: C4
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 4,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 5,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
|
// was already committed to disk and then sethead was called. In this test scenario
|
|
// the side chain is below the committed block. In this case we expect the canonical
|
|
// full chain to be rolled back to the committed block. Everything above the
|
|
// sethead point should be deleted. In between the committed block and the
|
|
// requested head the data can remain as "fast sync" data to avoid redownloading
|
|
// it. The side chain is nuked by the freezer.
|
|
func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
|
|
testLongOldForkedFastSyncedShallowSetHead(t, false)
|
|
}
|
|
func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongOldForkedFastSyncedShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
|
// was already committed to disk and then sethead was called. In this test scenario
|
|
// the side chain is below the committed block. In this case we expect the canonical
|
|
// full chain to be rolled back to the committed block. Since the ancient limit was
|
|
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
|
|
// side chain is nuked by the freezer.
|
|
func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
|
|
testLongOldForkedFastSyncedDeepSetHead(t, false)
|
|
}
|
|
func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
|
testLongOldForkedFastSyncedDeepSetHead(t, true)
|
|
}
|
|
|
|
func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4->C5->C6
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 4,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 5,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
|
// was not yet committed, but sethead was called. In this test scenario the side
|
|
// chain is below the committed block. In this case we expect the chain to detect
|
|
// that it was fast syncing and delete everything from the new head, since we can
|
|
// just pick up fast syncing from there. The side chain is completely nuked by the
|
|
// freezer.
|
|
func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
|
|
testLongOldForkedFastSyncingShallowSetHead(t, false)
|
|
}
|
|
func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongOldForkedFastSyncingShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
|
// was not yet committed, but sethead was called. In this test scenario the side
|
|
// chain is below the committed block. In this case we expect the chain to detect
|
|
// that it was fast syncing and delete everything from the new head, since we can
|
|
// just pick up fast syncing from there. The side chain is completely nuked by the
|
|
// freezer.
|
|
func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
|
|
testLongOldForkedFastSyncingDeepSetHead(t, false)
|
|
}
|
|
func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
|
testLongOldForkedFastSyncingDeepSetHead(t, true)
|
|
}
|
|
|
|
func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4->C5->C6
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 3,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 7,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where a recent block - newer than the ancient limit - was already
|
|
// committed to disk and then sethead was called. In this test scenario the side
|
|
// chain is above the committed block. In this case the freezer will delete the
|
|
// sidechain since it's dangling, reverting to TestLongShallowSetHead.
|
|
func TestLongNewerForkedShallowSetHead(t *testing.T) {
|
|
testLongNewerForkedShallowSetHead(t, false)
|
|
}
|
|
func TestLongNewerForkedShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongNewerForkedShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongNewerForkedShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 12,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where a recent block - older than the ancient limit - was already
|
|
// committed to disk and then sethead was called. In this test scenario the side
|
|
// chain is above the committed block. In this case the freezer will delete the
|
|
// sidechain since it's dangling, reverting to TestLongDeepSetHead.
|
|
func TestLongNewerForkedDeepSetHead(t *testing.T) {
|
|
testLongNewerForkedDeepSetHead(t, false)
|
|
}
|
|
func TestLongNewerForkedDeepSetHeadWithSnapshots(t *testing.T) {
|
|
testLongNewerForkedDeepSetHead(t, true)
|
|
}
|
|
|
|
func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C4
|
|
// Expected head fast block: C4
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 12,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 4,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 5,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
|
// was already committed to disk and then sethead was called. In this test scenario
|
|
// the side chain is above the committed block. In this case the freezer will delete
|
|
// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
|
|
func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
|
|
testLongNewerForkedFastSyncedShallowSetHead(t, false)
|
|
}
|
|
func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongNewerForkedFastSyncedShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 12,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
|
// was already committed to disk and then sethead was called. In this test scenario
|
|
// the side chain is above the committed block. In this case the freezer will delete
|
|
// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
|
|
func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
|
|
testLongNewerForkedFastSyncedDeepSetHead(t, false)
|
|
}
|
|
func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
|
testLongNewerForkedFastSyncedDeepSetHead(t, true)
|
|
}
|
|
|
|
func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C4
|
|
// Expected head fast block: C4
|
|
// Expected head block : C
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 12,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 4,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 5,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
|
// was not yet committed, but sethead was called. In this test scenario the side
|
|
// chain is above the committed block. In this case the freezer will delete the
|
|
// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
|
|
func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
|
|
testLongNewerForkedFastSyncingShallowSetHead(t, false)
|
|
}
|
|
func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongNewerForkedFastSyncingShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 12,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
|
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
|
// was not yet committed, but sethead was called. In this test scenario the side
|
|
// chain is above the committed block. In this case the freezer will delete the
|
|
// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
|
|
func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
|
|
testLongNewerForkedFastSyncingDeepSetHead(t, false)
|
|
}
|
|
func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
|
testLongNewerForkedFastSyncingDeepSetHead(t, true)
|
|
}
|
|
|
|
func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4->C5->C6
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 12,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 7,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a longer side
|
|
// chain, where a recent block - newer than the ancient limit - was already committed
|
|
// to disk and then sethead was called. In this case the freezer will delete the
|
|
// sidechain since it's dangling, reverting to TestLongShallowSetHead.
|
|
func TestLongReorgedShallowSetHead(t *testing.T) { testLongReorgedShallowSetHead(t, false) }
|
|
func TestLongReorgedShallowSetHeadWithSnapshots(t *testing.T) { testLongReorgedShallowSetHead(t, true) }
|
|
|
|
func testLongReorgedShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 26,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a longer side
|
|
// chain, where a recent block - older than the ancient limit - was already committed
|
|
// to disk and then sethead was called. In this case the freezer will delete the
|
|
// sidechain since it's dangling, reverting to TestLongDeepSetHead.
|
|
func TestLongReorgedDeepSetHead(t *testing.T) { testLongReorgedDeepSetHead(t, false) }
|
|
func TestLongReorgedDeepSetHeadWithSnapshots(t *testing.T) { testLongReorgedDeepSetHead(t, true) }
|
|
|
|
func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : none
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C4
|
|
// Expected head fast block: C4
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 26,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: nil,
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 4,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 5,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a longer
|
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
|
// was already committed to disk and then sethead was called. In this case the
|
|
// freezer will delete the sidechain since it's dangling, reverting to
|
|
// TestLongFastSyncedShallowSetHead.
|
|
func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) {
|
|
testLongReorgedFastSyncedShallowSetHead(t, false)
|
|
}
|
|
func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongReorgedFastSyncedShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 26,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a longer
|
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
|
// was already committed to disk and then sethead was called. In this case the
|
|
// freezer will delete the sidechain since it's dangling, reverting to
|
|
// TestLongFastSyncedDeepSetHead.
|
|
func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) {
|
|
testLongReorgedFastSyncedDeepSetHead(t, false)
|
|
}
|
|
func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
|
|
testLongReorgedFastSyncedDeepSetHead(t, true)
|
|
}
|
|
|
|
func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G, C4
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C4
|
|
// Expected head fast block: C4
|
|
// Expected head block : C4
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 26,
|
|
freezeThreshold: 16,
|
|
commitBlock: 4,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 4,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 5,
|
|
expHeadHeader: 4,
|
|
expHeadFastBlock: 4,
|
|
expHeadBlock: 4,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a longer
|
|
// side chain, where the fast sync pivot point - newer than the ancient limit -
|
|
// was not yet committed, but sethead was called. In this case we expect the
|
|
// chain to detect that it was fast syncing and delete everything from the new
|
|
// head, since we can just pick up fast syncing from there. The side chain is
|
|
// completely nuked by the freezer.
|
|
func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) {
|
|
testLongReorgedFastSyncingShallowSetHead(t, false)
|
|
}
|
|
func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
|
|
testLongReorgedFastSyncingShallowSetHead(t, true)
|
|
}
|
|
|
|
func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2
|
|
//
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2
|
|
//
|
|
// Expected in leveldb:
|
|
// C2)->C3->C4->C5->C6
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 18,
|
|
sidechainBlocks: 26,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 3,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
// Tests a sethead for a long canonical chain with frozen blocks and a longer
|
|
// side chain, where the fast sync pivot point - older than the ancient limit -
|
|
// was not yet committed, but sethead was called. In this case we expect the
|
|
// chain to detect that it was fast syncing and delete everything from the new
|
|
// head, since we can just pick up fast syncing from there. The side chain is
|
|
// completely nuked by the freezer.
|
|
func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) {
|
|
testLongReorgedFastSyncingDeepSetHead(t, false)
|
|
}
|
|
func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
|
|
testLongReorgedFastSyncingDeepSetHead(t, true)
|
|
}
|
|
|
|
func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
|
|
// Chain:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
|
|
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26
|
|
//
|
|
// Frozen:
|
|
// G->C1->C2->C3->C4->C5->C6->C7->C8
|
|
//
|
|
// Commit: G
|
|
// Pivot : C4
|
|
//
|
|
// SetHead(6)
|
|
//
|
|
// ------------------------------
|
|
//
|
|
// Expected in freezer:
|
|
// G->C1->C2->C3->C4->C5->C6
|
|
//
|
|
// Expected in leveldb: none
|
|
//
|
|
// Expected head header : C6
|
|
// Expected head fast block: C6
|
|
// Expected head block : G
|
|
testSetHead(t, &rewindTest{
|
|
canonicalBlocks: 24,
|
|
sidechainBlocks: 26,
|
|
freezeThreshold: 16,
|
|
commitBlock: 0,
|
|
pivotBlock: uint64ptr(4),
|
|
setheadBlock: 6,
|
|
expCanonicalBlocks: 6,
|
|
expSidechainBlocks: 0,
|
|
expFrozen: 7,
|
|
expHeadHeader: 6,
|
|
expHeadFastBlock: 6,
|
|
expHeadBlock: 0,
|
|
}, snapshots)
|
|
}
|
|
|
|
func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
|
|
// It's hard to follow the test case, visualize the input
|
|
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
// fmt.Println(tt.dump(false))
|
|
|
|
// Create a temporary persistent database
|
|
datadir, err := ioutil.TempDir("", "")
|
|
if err != nil {
|
|
t.Fatalf("Failed to create temporary datadir: %v", err)
|
|
}
|
|
os.RemoveAll(datadir)
|
|
|
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
|
if err != nil {
|
|
t.Fatalf("Failed to create persistent database: %v", err)
|
|
}
|
|
defer db.Close()
|
|
|
|
// Initialize a fresh chain
|
|
var (
|
|
genesis = new(Genesis).MustCommit(db)
|
|
engine = ethash.NewFullFaker()
|
|
config = &CacheConfig{
|
|
TrieCleanLimit: 256,
|
|
TrieDirtyLimit: 256,
|
|
TrieTimeLimit: 5 * time.Minute,
|
|
SnapshotLimit: 0, // Disable snapshot
|
|
}
|
|
)
|
|
if snapshots {
|
|
config.SnapshotLimit = 256
|
|
config.SnapshotWait = true
|
|
}
|
|
config.TriesInMemory = 128
|
|
chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
|
if err != nil {
|
|
t.Fatalf("Failed to create chain: %v", err)
|
|
}
|
|
// If sidechain blocks are needed, make a light chain and import it
|
|
var sideblocks types.Blocks
|
|
if tt.sidechainBlocks > 0 {
|
|
sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
|
|
b.SetCoinbase(common.Address{0x01})
|
|
})
|
|
if _, err := chain.InsertChain(sideblocks); err != nil {
|
|
t.Fatalf("Failed to import side chain: %v", err)
|
|
}
|
|
}
|
|
canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
|
|
b.SetCoinbase(common.Address{0x02})
|
|
b.SetDifficulty(big.NewInt(1000000))
|
|
})
|
|
if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
|
|
t.Fatalf("Failed to import canonical chain start: %v", err)
|
|
}
|
|
if tt.commitBlock > 0 {
|
|
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
|
|
if snapshots {
|
|
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
|
|
t.Fatalf("Failed to flatten snapshots: %v", err)
|
|
}
|
|
}
|
|
}
|
|
if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
|
|
t.Fatalf("Failed to import canonical chain tail: %v", err)
|
|
}
|
|
// Manually dereference anything not committed to not have to work with 128+ tries
|
|
for _, block := range sideblocks {
|
|
chain.stateCache.TrieDB().Dereference(block.Root())
|
|
}
|
|
for _, block := range canonblocks {
|
|
chain.stateCache.TrieDB().Dereference(block.Root())
|
|
}
|
|
chain.stateCache.Purge()
|
|
// Force run a freeze cycle
|
|
type freezer interface {
|
|
Freeze(threshold uint64) error
|
|
Ancients() (uint64, error)
|
|
}
|
|
db.(freezer).Freeze(tt.freezeThreshold)
|
|
|
|
// Set the simulated pivot block
|
|
if tt.pivotBlock != nil {
|
|
rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
|
|
}
|
|
// Set the head of the chain back to the requested number
|
|
chain.SetHead(tt.setheadBlock)
|
|
|
|
// Iterate over all the remaining blocks and ensure there are no gaps
|
|
verifyNoGaps(t, chain, true, canonblocks)
|
|
verifyNoGaps(t, chain, false, sideblocks)
|
|
verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
|
|
verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
|
|
|
|
if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
|
|
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
|
|
}
|
|
if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
|
|
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
|
|
}
|
|
if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
|
|
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
|
|
}
|
|
if frozen, err := db.(freezer).Ancients(); err != nil {
|
|
t.Errorf("Failed to retrieve ancient count: %v\n", err)
|
|
} else if int(frozen) != tt.expFrozen {
|
|
t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
|
|
}
|
|
}
|
|
|
|
// verifyNoGaps checks that there are no gaps after the initial set of blocks in
|
|
// the database and errors if found.
|
|
func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) {
|
|
t.Helper()
|
|
|
|
var end uint64
|
|
for i := uint64(0); i <= uint64(len(inserted)); i++ {
|
|
header := chain.GetHeaderByNumber(i)
|
|
if header == nil && end == 0 {
|
|
end = i
|
|
}
|
|
if header != nil && end > 0 {
|
|
if canonical {
|
|
t.Errorf("Canonical header gap between #%d-#%d", end, i-1)
|
|
} else {
|
|
t.Errorf("Sidechain header gap between #%d-#%d", end, i-1)
|
|
}
|
|
end = 0 // Reset for further gap detection
|
|
}
|
|
}
|
|
end = 0
|
|
for i := uint64(0); i <= uint64(len(inserted)); i++ {
|
|
block := chain.GetBlockByNumber(i)
|
|
if block == nil && end == 0 {
|
|
end = i
|
|
}
|
|
if block != nil && end > 0 {
|
|
if canonical {
|
|
t.Errorf("Canonical block gap between #%d-#%d", end, i-1)
|
|
} else {
|
|
t.Errorf("Sidechain block gap between #%d-#%d", end, i-1)
|
|
}
|
|
end = 0 // Reset for further gap detection
|
|
}
|
|
}
|
|
end = 0
|
|
for i := uint64(1); i <= uint64(len(inserted)); i++ {
|
|
receipts := chain.GetReceiptsByHash(inserted[i-1].Hash())
|
|
if receipts == nil && end == 0 {
|
|
end = i
|
|
}
|
|
if receipts != nil && end > 0 {
|
|
if canonical {
|
|
t.Errorf("Canonical receipt gap between #%d-#%d", end, i-1)
|
|
} else {
|
|
t.Errorf("Sidechain receipt gap between #%d-#%d", end, i-1)
|
|
}
|
|
end = 0 // Reset for further gap detection
|
|
}
|
|
}
|
|
}
|
|
|
|
// verifyCutoff checks that there are no chain data available in the chain after
|
|
// the specified limit, but that it is available before.
|
|
func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) {
|
|
t.Helper()
|
|
|
|
for i := 1; i <= len(inserted); i++ {
|
|
if i <= head {
|
|
if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header == nil {
|
|
if canonical {
|
|
t.Errorf("Canonical header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
} else {
|
|
t.Errorf("Sidechain header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
}
|
|
}
|
|
if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block == nil {
|
|
if canonical {
|
|
t.Errorf("Canonical block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
} else {
|
|
t.Errorf("Sidechain block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
}
|
|
}
|
|
if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts == nil {
|
|
if canonical {
|
|
t.Errorf("Canonical receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
} else {
|
|
t.Errorf("Sidechain receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
}
|
|
}
|
|
} else {
|
|
if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header != nil {
|
|
if canonical {
|
|
t.Errorf("Canonical header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
} else {
|
|
t.Errorf("Sidechain header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
}
|
|
}
|
|
if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block != nil {
|
|
if canonical {
|
|
t.Errorf("Canonical block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
} else {
|
|
t.Errorf("Sidechain block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
}
|
|
}
|
|
if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts != nil {
|
|
if canonical {
|
|
t.Errorf("Canonical receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
} else {
|
|
t.Errorf("Sidechain receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// uint64ptr is a weird helper to allow 1-line constant pointer creation.
|
|
func uint64ptr(n uint64) *uint64 {
|
|
return &n
|
|
}
|