2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 19:48:40 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 03:54:22 +03:00
|
|
|
//
|
2015-07-23 19:35:11 +03:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 03:54:22 +03:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 19:48:40 +03:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 03:54:22 +03:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2014-12-04 11:28:02 +02:00
|
|
|
package core
|
2014-12-17 13:57:35 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-02-18 05:02:15 +02:00
|
|
|
"math/big"
|
2015-06-08 03:19:39 +03:00
|
|
|
"math/rand"
|
2014-12-30 14:32:01 +02:00
|
|
|
"os"
|
2015-05-12 16:20:53 +03:00
|
|
|
"path/filepath"
|
2014-12-18 14:12:54 +02:00
|
|
|
"runtime"
|
2014-12-30 16:42:26 +02:00
|
|
|
"strconv"
|
2014-12-17 13:57:35 +02:00
|
|
|
"testing"
|
2016-03-07 19:11:52 +02:00
|
|
|
"time"
|
2014-12-17 13:57:35 +02:00
|
|
|
|
2015-05-18 18:46:47 +03:00
|
|
|
"github.com/ethereum/ethash"
|
2015-04-30 01:08:43 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2015-10-19 17:08:17 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
2014-12-18 14:12:54 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-08-30 11:19:10 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2015-08-17 15:01:41 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2014-12-18 14:12:54 +02:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2015-08-17 15:01:41 +03:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2015-05-18 18:46:47 +03:00
|
|
|
"github.com/ethereum/go-ethereum/pow"
|
2014-12-30 14:32:01 +02:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2015-06-20 16:56:34 +03:00
|
|
|
"github.com/hashicorp/golang-lru"
|
2014-12-17 13:57:35 +02:00
|
|
|
)
|
|
|
|
|
2014-12-18 14:12:54 +02:00
|
|
|
func init() {
|
|
|
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
2015-01-02 12:16:30 +02:00
|
|
|
}
|
|
|
|
|
2015-05-18 18:46:47 +03:00
|
|
|
func thePow() pow.PoW {
|
|
|
|
pow, _ := ethash.NewForTesting()
|
|
|
|
return pow
|
|
|
|
}
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
func theBlockChain(db ethdb.Database, t *testing.T) *BlockChain {
|
2015-06-08 13:12:13 +03:00
|
|
|
var eventMux event.TypeMux
|
2015-11-17 18:33:25 +02:00
|
|
|
WriteTestNetGenesisBlock(db)
|
2016-03-02 00:32:43 +02:00
|
|
|
blockchain, err := NewBlockChain(db, testChainConfig(), thePow(), &eventMux)
|
2015-06-08 13:12:13 +03:00
|
|
|
if err != nil {
|
2015-10-19 17:08:17 +03:00
|
|
|
t.Error("failed creating blockchain:", err)
|
2015-06-08 13:12:13 +03:00
|
|
|
t.FailNow()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
return blockchain
|
2015-06-08 13:12:13 +03:00
|
|
|
}
|
|
|
|
|
2015-02-18 05:02:15 +02:00
|
|
|
// Test fork of length N starting from block i
|
2015-10-19 17:08:17 +03:00
|
|
|
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
2015-09-21 15:36:29 +03:00
|
|
|
// Copy old chain up to #i into a new db
|
2015-10-19 17:08:17 +03:00
|
|
|
db, blockchain2, err := newCanonical(i, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("could not make new canonical in testFork", err)
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Assert the chains have the same header/block at #i
|
|
|
|
var hash1, hash2 common.Hash
|
|
|
|
if full {
|
2015-10-19 17:08:17 +03:00
|
|
|
hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
|
2015-09-21 15:36:29 +03:00
|
|
|
} else {
|
2015-10-19 17:08:17 +03:00
|
|
|
hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
|
2015-02-28 21:58:37 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
if hash1 != hash2 {
|
|
|
|
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
|
2015-02-28 21:58:37 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Extend the newly created chain
|
|
|
|
var (
|
|
|
|
blockChainB []*types.Block
|
|
|
|
headerChainB []*types.Header
|
|
|
|
)
|
|
|
|
if full {
|
2015-10-19 17:08:17 +03:00
|
|
|
blockChainB = makeBlockChain(blockchain2.CurrentBlock(), n, db, forkSeed)
|
|
|
|
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-19 17:08:17 +03:00
|
|
|
headerChainB = makeHeaderChain(blockchain2.CurrentHeader(), n, db, forkSeed)
|
|
|
|
if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Sanity check that the forked chain can be imported into the original
|
|
|
|
var tdPre, tdPost *big.Int
|
2015-04-29 13:43:24 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
if full {
|
2015-10-19 17:08:17 +03:00
|
|
|
tdPre = blockchain.GetTd(blockchain.CurrentBlock().Hash())
|
|
|
|
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to import forked block chain: %v", err)
|
|
|
|
}
|
2015-10-19 17:08:17 +03:00
|
|
|
tdPost = blockchain.GetTd(blockChainB[len(blockChainB)-1].Hash())
|
2015-09-21 15:36:29 +03:00
|
|
|
} else {
|
2015-10-19 17:08:17 +03:00
|
|
|
tdPre = blockchain.GetTd(blockchain.CurrentHeader().Hash())
|
|
|
|
if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to import forked header chain: %v", err)
|
|
|
|
}
|
2015-10-19 17:08:17 +03:00
|
|
|
tdPost = blockchain.GetTd(headerChainB[len(headerChainB)-1].Hash())
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
// Compare the total difficulties of the chains
|
|
|
|
comparator(tdPre, tdPost)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
func printChain(bc *BlockChain) {
|
2015-02-28 21:58:37 +02:00
|
|
|
for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- {
|
|
|
|
b := bc.GetBlockByNumber(uint64(i))
|
2015-04-29 13:43:24 +03:00
|
|
|
fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty())
|
2015-02-28 21:58:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// testBlockChainImport tries to process a chain of blocks, writing them into
|
|
|
|
// the database if successful.
|
2015-10-19 17:08:17 +03:00
|
|
|
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
2015-09-21 15:36:29 +03:00
|
|
|
for _, block := range chain {
|
|
|
|
// Try and process the block
|
2015-10-19 17:08:17 +03:00
|
|
|
err := blockchain.Validator().ValidateBlock(block)
|
|
|
|
if err != nil {
|
2015-02-18 05:02:15 +02:00
|
|
|
if IsKnownBlockErr(err) {
|
|
|
|
continue
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
return err
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-10-19 17:08:17 +03:00
|
|
|
statedb, err := state.New(blockchain.GetBlock(block.ParentHash()).Root(), blockchain.chainDb)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-03-02 00:32:43 +02:00
|
|
|
receipts, _, usedGas, err := blockchain.Processor().Process(block, statedb, vm.Config{})
|
2015-10-19 17:08:17 +03:00
|
|
|
if err != nil {
|
|
|
|
reportBlock(block, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = blockchain.Validator().ValidateState(block, blockchain.GetBlock(block.ParentHash()), statedb, receipts, usedGas)
|
|
|
|
if err != nil {
|
|
|
|
reportBlock(block, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
blockchain.mu.Lock()
|
|
|
|
WriteTd(blockchain.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash())))
|
|
|
|
WriteBlock(blockchain.chainDb, block)
|
|
|
|
statedb.Commit()
|
|
|
|
blockchain.mu.Unlock()
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// testHeaderChainImport tries to process a chain of header, writing them into
|
|
|
|
// the database if successful.
|
2015-10-19 17:08:17 +03:00
|
|
|
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
|
2015-09-21 15:36:29 +03:00
|
|
|
for _, header := range chain {
|
|
|
|
// Try and validate the header
|
2015-10-19 17:08:17 +03:00
|
|
|
if err := blockchain.Validator().ValidateHeader(header, blockchain.GetHeader(header.ParentHash), false); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-03-15 20:55:39 +02:00
|
|
|
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
2015-10-19 17:08:17 +03:00
|
|
|
blockchain.mu.Lock()
|
|
|
|
WriteTd(blockchain.chainDb, header.Hash(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash)))
|
|
|
|
WriteHeader(blockchain.chainDb, header)
|
|
|
|
blockchain.mu.Unlock()
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
return nil
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2014-12-30 14:32:01 +02:00
|
|
|
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
|
2015-06-16 13:41:50 +03:00
|
|
|
fh, err := os.OpenFile(filepath.Join("..", "_data", fn), os.O_RDONLY, os.ModePerm)
|
2014-12-17 13:57:35 +02:00
|
|
|
if err != nil {
|
2014-12-30 14:32:01 +02:00
|
|
|
return nil, err
|
2014-12-17 13:57:35 +02:00
|
|
|
}
|
2014-12-30 14:32:01 +02:00
|
|
|
defer fh.Close()
|
|
|
|
|
|
|
|
var chain types.Blocks
|
|
|
|
if err := rlp.Decode(fh, &chain); err != nil {
|
|
|
|
return nil, err
|
2014-12-18 14:12:54 +02:00
|
|
|
}
|
|
|
|
|
2014-12-30 14:32:01 +02:00
|
|
|
return chain, nil
|
2014-12-18 14:12:54 +02:00
|
|
|
}
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *testing.T) {
|
|
|
|
_, err := blockchain.InsertChain(chain)
|
2014-12-18 14:12:54 +02:00
|
|
|
if err != nil {
|
|
|
|
fmt.Println(err)
|
|
|
|
t.FailNow()
|
|
|
|
}
|
2015-01-02 12:16:30 +02:00
|
|
|
done <- true
|
2014-12-18 14:12:54 +02:00
|
|
|
}
|
|
|
|
|
2015-10-05 17:51:06 +03:00
|
|
|
func TestLastBlock(t *testing.T) {
|
2015-09-21 15:36:29 +03:00
|
|
|
db, _ := ethdb.NewMemDatabase()
|
|
|
|
|
2015-10-05 17:51:06 +03:00
|
|
|
bchain := theBlockChain(db, t)
|
2015-09-21 15:36:29 +03:00
|
|
|
block := makeBlockChain(bchain.CurrentBlock(), 1, db, 0)[0]
|
2015-10-05 17:51:06 +03:00
|
|
|
bchain.insert(block)
|
|
|
|
if block.Hash() != GetHeadBlockHash(db) {
|
|
|
|
t.Errorf("Write/Get HeadBlockHash failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, it can be extended
|
|
|
|
// with various length chains.
|
|
|
|
func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
|
|
|
|
func TestExtendCanonicalBlocks(t *testing.T) { testExtendCanonical(t, true) }
|
|
|
|
|
|
|
|
func testExtendCanonical(t *testing.T, full bool) {
|
|
|
|
length := 5
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
|
|
|
_, processor, err := newCanonical(length, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Define the difficulty comparator
|
|
|
|
better := func(td1, td2 *big.Int) {
|
2015-02-18 05:02:15 +02:00
|
|
|
if td2.Cmp(td1) <= 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Start fork from current height
|
|
|
|
testFork(t, processor, length, 1, full, better)
|
|
|
|
testFork(t, processor, length, 2, full, better)
|
|
|
|
testFork(t, processor, length, 5, full, better)
|
|
|
|
testFork(t, processor, length, 10, full, better)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating shorter
|
|
|
|
// forks do not take canonical ownership.
|
|
|
|
func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
|
|
|
|
func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true) }
|
|
|
|
|
|
|
|
func testShorterFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
|
|
|
_, processor, err := newCanonical(length, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Define the difficulty comparator
|
|
|
|
worse := func(td1, td2 *big.Int) {
|
2015-02-18 05:02:15 +02:00
|
|
|
if td2.Cmp(td1) >= 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Sum of numbers must be less than `length` for this to be a shorter fork
|
|
|
|
testFork(t, processor, 0, 3, full, worse)
|
|
|
|
testFork(t, processor, 0, 7, full, worse)
|
|
|
|
testFork(t, processor, 1, 1, full, worse)
|
|
|
|
testFork(t, processor, 1, 7, full, worse)
|
|
|
|
testFork(t, processor, 5, 3, full, worse)
|
|
|
|
testFork(t, processor, 5, 4, full, worse)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating longer
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
|
|
|
|
func TestLongerForkBlocks(t *testing.T) { testLongerFork(t, true) }
|
|
|
|
|
|
|
|
func testLongerFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
|
|
|
_, processor, err := newCanonical(length, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Define the difficulty comparator
|
|
|
|
better := func(td1, td2 *big.Int) {
|
2015-02-18 05:02:15 +02:00
|
|
|
if td2.Cmp(td1) <= 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Sum of numbers must be greater than `length` for this to be a longer fork
|
|
|
|
testFork(t, processor, 0, 11, full, better)
|
|
|
|
testFork(t, processor, 0, 15, full, better)
|
|
|
|
testFork(t, processor, 1, 10, full, better)
|
|
|
|
testFork(t, processor, 1, 12, full, better)
|
|
|
|
testFork(t, processor, 5, 6, full, better)
|
|
|
|
testFork(t, processor, 5, 8, full, better)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating equal
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
|
|
|
|
func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true) }
|
|
|
|
|
|
|
|
func testEqualFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
|
|
|
_, processor, err := newCanonical(length, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Define the difficulty comparator
|
|
|
|
equal := func(td1, td2 *big.Int) {
|
2015-02-18 05:02:15 +02:00
|
|
|
if td2.Cmp(td1) != 0 {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Sum of numbers must be equal to `length` for this to be an equal fork
|
|
|
|
testFork(t, processor, 0, 10, full, equal)
|
|
|
|
testFork(t, processor, 1, 9, full, equal)
|
|
|
|
testFork(t, processor, 2, 8, full, equal)
|
|
|
|
testFork(t, processor, 5, 5, full, equal)
|
|
|
|
testFork(t, processor, 6, 4, full, equal)
|
|
|
|
testFork(t, processor, 9, 1, full, equal)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that chains missing links do not get accepted by the processor.
|
|
|
|
func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
|
|
|
|
func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) }
|
|
|
|
|
|
|
|
func testBrokenChain(t *testing.T, full bool) {
|
|
|
|
// Make chain starting from genesis
|
2015-10-19 17:08:17 +03:00
|
|
|
db, blockchain, err := newCanonical(10, full)
|
2015-02-18 05:02:15 +02:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create a forked chain, and try to insert with a missing link
|
|
|
|
if full {
|
2015-10-19 17:08:17 +03:00
|
|
|
chain := makeBlockChain(blockchain.CurrentBlock(), 5, db, forkSeed)[1:]
|
|
|
|
if err := testBlockChainImport(chain, blockchain); err == nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("broken block chain not reported")
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-19 17:08:17 +03:00
|
|
|
chain := makeHeaderChain(blockchain.CurrentHeader(), 5, db, forkSeed)[1:]
|
|
|
|
if err := testHeaderChainImport(chain, blockchain); err == nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("broken header chain not reported")
|
|
|
|
}
|
2015-02-18 05:02:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-18 14:12:54 +02:00
|
|
|
func TestChainInsertions(t *testing.T) {
|
2015-06-16 13:41:50 +03:00
|
|
|
t.Skip("Skipped: outdated test files")
|
2015-01-24 19:40:03 +02:00
|
|
|
|
2015-01-07 14:17:48 +02:00
|
|
|
db, _ := ethdb.NewMemDatabase()
|
2015-01-02 12:16:30 +02:00
|
|
|
|
2014-12-30 16:42:26 +02:00
|
|
|
chain1, err := loadChain("valid1", t)
|
2014-12-30 14:32:01 +02:00
|
|
|
if err != nil {
|
|
|
|
fmt.Println(err)
|
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
|
2014-12-30 16:42:26 +02:00
|
|
|
chain2, err := loadChain("valid2", t)
|
2014-12-30 14:32:01 +02:00
|
|
|
if err != nil {
|
|
|
|
fmt.Println(err)
|
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
blockchain := theBlockChain(db, t)
|
2014-12-18 14:12:54 +02:00
|
|
|
|
|
|
|
const max = 2
|
|
|
|
done := make(chan bool, max)
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
go insertChain(done, blockchain, chain1, t)
|
|
|
|
go insertChain(done, blockchain, chain2, t)
|
2014-12-18 14:12:54 +02:00
|
|
|
|
|
|
|
for i := 0; i < max; i++ {
|
|
|
|
<-done
|
|
|
|
}
|
2014-12-30 14:32:01 +02:00
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
if chain2[len(chain2)-1].Hash() != blockchain.CurrentBlock().Hash() {
|
2014-12-30 14:32:01 +02:00
|
|
|
t.Error("chain2 is canonical and shouldn't be")
|
|
|
|
}
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
if chain1[len(chain1)-1].Hash() != blockchain.CurrentBlock().Hash() {
|
2014-12-30 14:32:01 +02:00
|
|
|
t.Error("chain1 isn't canonical and should be")
|
|
|
|
}
|
2014-12-17 13:57:35 +02:00
|
|
|
}
|
2014-12-30 16:42:26 +02:00
|
|
|
|
|
|
|
func TestChainMultipleInsertions(t *testing.T) {
|
2015-06-16 13:41:50 +03:00
|
|
|
t.Skip("Skipped: outdated test files")
|
2015-01-24 19:40:03 +02:00
|
|
|
|
2015-01-07 14:17:48 +02:00
|
|
|
db, _ := ethdb.NewMemDatabase()
|
2015-01-02 12:16:30 +02:00
|
|
|
|
2014-12-30 16:42:26 +02:00
|
|
|
const max = 4
|
|
|
|
chains := make([]types.Blocks, max)
|
|
|
|
var longest int
|
|
|
|
for i := 0; i < max; i++ {
|
|
|
|
var err error
|
|
|
|
name := "valid" + strconv.Itoa(i+1)
|
|
|
|
chains[i], err = loadChain(name, t)
|
|
|
|
if len(chains[i]) >= len(chains[longest]) {
|
|
|
|
longest = i
|
|
|
|
}
|
|
|
|
fmt.Println("loaded", name, "with a length of", len(chains[i]))
|
|
|
|
if err != nil {
|
|
|
|
fmt.Println(err)
|
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
}
|
2015-06-08 13:12:13 +03:00
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
blockchain := theBlockChain(db, t)
|
2015-06-08 13:12:13 +03:00
|
|
|
|
2014-12-30 16:42:26 +02:00
|
|
|
done := make(chan bool, max)
|
|
|
|
for i, chain := range chains {
|
2015-01-02 12:16:30 +02:00
|
|
|
// XXX the go routine would otherwise reference the same (chain[3]) variable and fail
|
|
|
|
i := i
|
|
|
|
chain := chain
|
2014-12-30 16:42:26 +02:00
|
|
|
go func() {
|
2015-08-31 18:09:50 +03:00
|
|
|
insertChain(done, blockchain, chain, t)
|
2014-12-30 16:42:26 +02:00
|
|
|
fmt.Println(i, "done")
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < max; i++ {
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
if chains[longest][len(chains[longest])-1].Hash() != blockchain.CurrentBlock().Hash() {
|
2014-12-30 16:42:26 +02:00
|
|
|
t.Error("Invalid canonical chain")
|
|
|
|
}
|
|
|
|
}
|
2015-01-12 11:19:27 +02:00
|
|
|
|
2015-04-29 13:43:24 +03:00
|
|
|
type bproc struct{}
|
|
|
|
|
2015-10-19 17:08:17 +03:00
|
|
|
func (bproc) ValidateBlock(*types.Block) error { return nil }
|
|
|
|
func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil }
|
|
|
|
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error {
|
|
|
|
return nil
|
|
|
|
}
|
2016-03-02 00:32:43 +02:00
|
|
|
func (bproc) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, vm.Logs, *big.Int, error) {
|
2015-10-19 17:08:17 +03:00
|
|
|
return nil, nil, nil, nil
|
|
|
|
}
|
2015-04-29 13:43:24 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
|
|
|
|
blocks := makeBlockChainWithDiff(genesis, d, seed)
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
|
|
|
return headers
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
|
2015-04-29 13:43:24 +03:00
|
|
|
var chain []*types.Block
|
|
|
|
for i, difficulty := range d {
|
2015-06-16 13:41:50 +03:00
|
|
|
header := &types.Header{
|
2015-09-30 19:23:31 +03:00
|
|
|
Coinbase: common.Address{seed},
|
|
|
|
Number: big.NewInt(int64(i + 1)),
|
|
|
|
Difficulty: big.NewInt(int64(difficulty)),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
2015-06-16 13:41:50 +03:00
|
|
|
}
|
2015-04-29 13:43:24 +03:00
|
|
|
if i == 0 {
|
2015-06-16 13:41:50 +03:00
|
|
|
header.ParentHash = genesis.Hash()
|
2015-04-29 13:43:24 +03:00
|
|
|
} else {
|
2015-06-16 13:41:50 +03:00
|
|
|
header.ParentHash = chain[i-1].Hash()
|
2015-04-29 13:43:24 +03:00
|
|
|
}
|
2015-06-16 13:41:50 +03:00
|
|
|
block := types.NewBlockWithHeader(header)
|
2015-04-29 13:43:24 +03:00
|
|
|
chain = append(chain, block)
|
|
|
|
}
|
|
|
|
return chain
|
|
|
|
}
|
|
|
|
|
2015-08-31 18:09:50 +03:00
|
|
|
func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
|
2015-04-29 13:43:24 +03:00
|
|
|
var eventMux event.TypeMux
|
2015-12-16 05:26:23 +02:00
|
|
|
bc := &BlockChain{
|
2016-03-02 00:32:43 +02:00
|
|
|
chainDb: db,
|
2015-12-16 05:26:23 +02:00
|
|
|
genesisBlock: genesis,
|
2016-03-02 00:32:43 +02:00
|
|
|
eventMux: &eventMux,
|
|
|
|
pow: FakePow{},
|
|
|
|
config: testChainConfig(),
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
valFn := func() HeaderValidator { return bc.Validator() }
|
2016-03-02 00:32:43 +02:00
|
|
|
bc.hc, _ = NewHeaderChain(db, testChainConfig(), valFn, bc.getProcInterrupt)
|
2015-08-31 20:21:02 +03:00
|
|
|
bc.bodyCache, _ = lru.New(100)
|
|
|
|
bc.bodyRLPCache, _ = lru.New(100)
|
|
|
|
bc.blockCache, _ = lru.New(100)
|
2015-06-29 23:42:13 +03:00
|
|
|
bc.futureBlocks, _ = lru.New(100)
|
2015-10-19 17:08:17 +03:00
|
|
|
bc.SetValidator(bproc{})
|
|
|
|
bc.SetProcessor(bproc{})
|
2015-04-29 13:43:24 +03:00
|
|
|
bc.ResetWithGenesisBlock(genesis)
|
|
|
|
|
2015-04-30 01:08:43 +03:00
|
|
|
return bc
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:55:39 +02:00
|
|
|
// Tests that reorganising a long difficult chain after a short easy one
|
2015-09-21 15:36:29 +03:00
|
|
|
// overwrites the canonical numbers and links in the database.
|
|
|
|
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
|
|
|
|
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
|
2015-07-10 15:29:40 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
func testReorgLong(t *testing.T, full bool) {
|
|
|
|
testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10, full)
|
|
|
|
}
|
2015-04-30 01:08:43 +03:00
|
|
|
|
2016-03-15 20:55:39 +02:00
|
|
|
// Tests that reorganising a short difficult chain after a long easy one
|
2015-09-21 15:36:29 +03:00
|
|
|
// overwrites the canonical numbers and links in the database.
|
|
|
|
func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
|
|
|
|
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
|
2015-04-29 13:43:24 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
func testReorgShort(t *testing.T, full bool) {
|
|
|
|
testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11, full)
|
|
|
|
}
|
2015-04-29 13:43:24 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
|
|
|
|
// Create a pristine block chain
|
|
|
|
db, _ := ethdb.NewMemDatabase()
|
2015-11-17 18:33:25 +02:00
|
|
|
genesis, _ := WriteTestNetGenesisBlock(db)
|
2015-09-21 15:36:29 +03:00
|
|
|
bc := chm(genesis, db)
|
|
|
|
|
|
|
|
// Insert an easy and a difficult chain afterwards
|
|
|
|
if full {
|
|
|
|
bc.InsertChain(makeBlockChainWithDiff(genesis, first, 11))
|
|
|
|
bc.InsertChain(makeBlockChainWithDiff(genesis, second, 22))
|
|
|
|
} else {
|
2015-10-07 12:14:30 +03:00
|
|
|
bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, first, 11), 1)
|
|
|
|
bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, second, 22), 1)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
|
|
|
// Check that the chain is valid number and link wise
|
|
|
|
if full {
|
|
|
|
prev := bc.CurrentBlock()
|
|
|
|
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
|
|
|
|
if prev.ParentHash() != block.Hash() {
|
|
|
|
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
prev := bc.CurrentHeader()
|
|
|
|
for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) {
|
|
|
|
if prev.ParentHash != header.Hash() {
|
|
|
|
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure the chain total difficulty is the correct one
|
|
|
|
want := new(big.Int).Add(genesis.Difficulty(), big.NewInt(td))
|
|
|
|
if full {
|
|
|
|
if have := bc.GetTd(bc.CurrentBlock().Hash()); have.Cmp(want) != 0 {
|
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if have := bc.GetTd(bc.CurrentHeader().Hash()); have.Cmp(want) != 0 {
|
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
2015-04-29 13:43:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-30 01:08:43 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests that the insertion functions detect banned hashes.
|
|
|
|
func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
|
|
|
|
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
|
|
|
|
|
|
|
|
func testBadHashes(t *testing.T, full bool) {
|
|
|
|
// Create a pristine block chain
|
2015-09-14 17:56:33 +03:00
|
|
|
db, _ := ethdb.NewMemDatabase()
|
2015-11-17 18:33:25 +02:00
|
|
|
genesis, _ := WriteTestNetGenesisBlock(db)
|
2015-09-14 17:56:33 +03:00
|
|
|
bc := chm(genesis, db)
|
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create a chain, ban a hash and try to import
|
|
|
|
var err error
|
|
|
|
if full {
|
|
|
|
blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 4}, 10)
|
|
|
|
BadHashes[blocks[2].Header().Hash()] = true
|
|
|
|
_, err = bc.InsertChain(blocks)
|
|
|
|
} else {
|
|
|
|
headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 4}, 10)
|
|
|
|
BadHashes[headers[2].Hash()] = true
|
2015-10-07 12:14:30 +03:00
|
|
|
_, err = bc.InsertHeaderChain(headers, 1)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
2015-09-14 17:56:33 +03:00
|
|
|
if !IsBadHashError(err) {
|
|
|
|
t.Errorf("error mismatch: want: BadHashError, have: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:08:18 +02:00
|
|
|
// Tests that bad hashes are detected on boot, and the chain rolled back to a
|
2015-09-21 15:36:29 +03:00
|
|
|
// good state prior to the bad hash.
|
|
|
|
func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
|
|
|
|
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
|
|
|
|
|
|
|
|
func testReorgBadHashes(t *testing.T, full bool) {
|
|
|
|
// Create a pristine block chain
|
2015-09-14 17:56:33 +03:00
|
|
|
db, _ := ethdb.NewMemDatabase()
|
2015-11-17 18:33:25 +02:00
|
|
|
genesis, _ := WriteTestNetGenesisBlock(db)
|
2015-09-14 17:56:33 +03:00
|
|
|
bc := chm(genesis, db)
|
|
|
|
|
2016-03-15 20:08:18 +02:00
|
|
|
// Create a chain, import and ban afterwards
|
2015-09-21 15:36:29 +03:00
|
|
|
headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
|
|
|
|
blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
|
2015-09-14 17:56:33 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
if full {
|
|
|
|
if _, err := bc.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("failed to import blocks: %v", err)
|
|
|
|
}
|
|
|
|
if bc.CurrentBlock().Hash() != blocks[3].Hash() {
|
|
|
|
t.Errorf("last block hash mismatch: have: %x, want %x", bc.CurrentBlock().Hash(), blocks[3].Header().Hash())
|
|
|
|
}
|
|
|
|
BadHashes[blocks[3].Header().Hash()] = true
|
|
|
|
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
|
|
|
|
} else {
|
2015-10-07 12:14:30 +03:00
|
|
|
if _, err := bc.InsertHeaderChain(headers, 1); err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to import headers: %v", err)
|
|
|
|
}
|
|
|
|
if bc.CurrentHeader().Hash() != headers[3].Hash() {
|
|
|
|
t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash())
|
|
|
|
}
|
|
|
|
BadHashes[headers[3].Hash()] = true
|
|
|
|
defer func() { delete(BadHashes, headers[3].Hash()) }()
|
2015-09-14 17:56:33 +03:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create a new chain manager and check it rolled back the state
|
2016-03-02 00:32:43 +02:00
|
|
|
ncm, err := NewBlockChain(db, testChainConfig(), FakePow{}, new(event.TypeMux))
|
2015-07-10 15:29:40 +03:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to create new chain manager: %v", err)
|
2015-07-10 15:29:40 +03:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
if full {
|
|
|
|
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
|
|
|
|
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
|
|
|
|
}
|
|
|
|
if blocks[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
|
|
|
|
t.Errorf("last block gasLimit mismatch: have: %x, want %x", ncm.GasLimit(), blocks[2].Header().GasLimit)
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-09 16:21:47 +03:00
|
|
|
if ncm.CurrentHeader().Hash() != headers[2].Hash() {
|
|
|
|
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
|
2015-04-30 01:08:43 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-06-08 03:19:39 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
|
|
|
func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
|
|
|
|
func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true) }
|
|
|
|
|
|
|
|
func testInsertNonceError(t *testing.T, full bool) {
|
2015-06-08 03:19:39 +03:00
|
|
|
for i := 1; i < 25 && !t.Failed(); i++ {
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create a pristine chain and database
|
2015-10-19 17:08:17 +03:00
|
|
|
db, blockchain, err := newCanonical(0, full)
|
2015-07-10 15:29:40 +03:00
|
|
|
if err != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
2015-07-10 15:29:40 +03:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
// Create and insert a chain with a failing nonce
|
|
|
|
var (
|
|
|
|
failAt int
|
|
|
|
failRes int
|
|
|
|
failNum uint64
|
|
|
|
failHash common.Hash
|
|
|
|
)
|
|
|
|
if full {
|
2015-10-19 17:08:17 +03:00
|
|
|
blocks := makeBlockChain(blockchain.CurrentBlock(), i, db, 0)
|
2015-09-21 15:36:29 +03:00
|
|
|
|
|
|
|
failAt = rand.Int() % len(blocks)
|
|
|
|
failNum = blocks[failAt].NumberU64()
|
|
|
|
failHash = blocks[failAt].Hash()
|
|
|
|
|
2015-10-19 17:08:17 +03:00
|
|
|
blockchain.pow = failPow{failNum}
|
2015-10-07 12:14:30 +03:00
|
|
|
|
2015-10-19 17:08:17 +03:00
|
|
|
failRes, err = blockchain.InsertChain(blocks)
|
2015-09-21 15:36:29 +03:00
|
|
|
} else {
|
2015-10-19 17:08:17 +03:00
|
|
|
headers := makeHeaderChain(blockchain.CurrentHeader(), i, db, 0)
|
2015-06-08 03:19:39 +03:00
|
|
|
|
2015-09-21 15:36:29 +03:00
|
|
|
failAt = rand.Int() % len(headers)
|
|
|
|
failNum = headers[failAt].Number.Uint64()
|
|
|
|
failHash = headers[failAt].Hash()
|
2015-06-08 03:19:39 +03:00
|
|
|
|
2015-10-19 17:08:17 +03:00
|
|
|
blockchain.pow = failPow{failNum}
|
2016-03-02 00:32:43 +02:00
|
|
|
blockchain.validator = NewBlockValidator(testChainConfig(), blockchain, failPow{failNum})
|
2015-10-07 12:14:30 +03:00
|
|
|
|
2015-10-19 17:08:17 +03:00
|
|
|
failRes, err = blockchain.InsertHeaderChain(headers, 1)
|
2015-09-21 15:36:29 +03:00
|
|
|
}
|
2015-06-08 03:19:39 +03:00
|
|
|
// Check that the returned error indicates the nonce failure.
|
2015-09-21 15:36:29 +03:00
|
|
|
if failRes != failAt {
|
|
|
|
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
|
2015-06-08 03:19:39 +03:00
|
|
|
}
|
|
|
|
if !IsBlockNonceErr(err) {
|
2015-10-19 17:08:17 +03:00
|
|
|
t.Fatalf("test %d: error mismatch: have %v, want nonce error %T", i, err, err)
|
2015-06-08 03:19:39 +03:00
|
|
|
}
|
|
|
|
nerr := err.(*BlockNonceErr)
|
2015-09-21 15:36:29 +03:00
|
|
|
if nerr.Number.Uint64() != failNum {
|
|
|
|
t.Errorf("test %d: number mismatch: have %v, want %v", i, nerr.Number, failNum)
|
2015-06-08 03:19:39 +03:00
|
|
|
}
|
2015-09-21 15:36:29 +03:00
|
|
|
if nerr.Hash != failHash {
|
|
|
|
t.Errorf("test %d: hash mismatch: have %x, want %x", i, nerr.Hash[:4], failHash[:4])
|
2015-06-08 03:19:39 +03:00
|
|
|
}
|
|
|
|
// Check that all no blocks after the failing block have been inserted.
|
2015-09-21 15:36:29 +03:00
|
|
|
for j := 0; j < i-failAt; j++ {
|
|
|
|
if full {
|
2015-10-19 17:08:17 +03:00
|
|
|
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("test %d: invalid block in chain: %v", i, block)
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-19 17:08:17 +03:00
|
|
|
if header := blockchain.GetHeaderByNumber(failNum + uint64(j)); header != nil {
|
2015-09-21 15:36:29 +03:00
|
|
|
t.Errorf("test %d: invalid header in chain: %v", i, header)
|
|
|
|
}
|
2015-06-08 03:19:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
// Tests that fast importing a block chain produces the same chain data as the
|
|
|
|
// classical full block processing.
|
|
|
|
func TestFastVsFullChains(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
gendb, _ = ethdb.NewMemDatabase()
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(1000000000)
|
|
|
|
genesis = GenesisBlockForTesting(gendb, address, funds)
|
|
|
|
)
|
|
|
|
blocks, receipts := GenerateChain(genesis, gendb, 1024, func(i int, block *BlockGen) {
|
|
|
|
block.SetCoinbase(common.Address{0x00})
|
|
|
|
|
|
|
|
// If the block number is multiple of 3, send a few bonus transactions to the miner
|
|
|
|
if i%3 == 2 {
|
|
|
|
for j := 0; j < i%4+1; j++ {
|
|
|
|
tx, err := types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If the block number is a multiple of 5, add a few bonus uncles to the block
|
|
|
|
if i%5 == 5 {
|
|
|
|
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
// Import the chain as an archive node for the comparison baseline
|
|
|
|
archiveDb, _ := ethdb.NewMemDatabase()
|
|
|
|
WriteGenesisBlockForTesting(archiveDb, GenesisAccount{address, funds})
|
|
|
|
|
2016-03-02 00:32:43 +02:00
|
|
|
archive, _ := NewBlockChain(archiveDb, testChainConfig(), FakePow{}, new(event.TypeMux))
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
if n, err := archive.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("failed to process block %d: %v", n, err)
|
|
|
|
}
|
|
|
|
// Fast import the chain as a non-archive node to test
|
|
|
|
fastDb, _ := ethdb.NewMemDatabase()
|
|
|
|
WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
|
2016-03-02 00:32:43 +02:00
|
|
|
fast, _ := NewBlockChain(fastDb, testChainConfig(), FakePow{}, new(event.TypeMux))
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
2015-10-07 12:14:30 +03:00
|
|
|
if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
|
2015-09-30 19:23:31 +03:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
|
|
|
// Iterate over all chain data components, and cross reference
|
|
|
|
for i := 0; i < len(blocks); i++ {
|
|
|
|
num, hash := blocks[i].NumberU64(), blocks[i].Hash()
|
|
|
|
|
|
|
|
if ftd, atd := fast.GetTd(hash), archive.GetTd(hash); ftd.Cmp(atd) != 0 {
|
|
|
|
t.Errorf("block #%d [%x]: td mismatch: have %v, want %v", num, hash, ftd, atd)
|
|
|
|
}
|
|
|
|
if fheader, aheader := fast.GetHeader(hash), archive.GetHeader(hash); fheader.Hash() != aheader.Hash() {
|
|
|
|
t.Errorf("block #%d [%x]: header mismatch: have %v, want %v", num, hash, fheader, aheader)
|
|
|
|
}
|
|
|
|
if fblock, ablock := fast.GetBlock(hash), archive.GetBlock(hash); fblock.Hash() != ablock.Hash() {
|
|
|
|
t.Errorf("block #%d [%x]: block mismatch: have %v, want %v", num, hash, fblock, ablock)
|
|
|
|
} else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(ablock.Transactions()) {
|
|
|
|
t.Errorf("block #%d [%x]: transactions mismatch: have %v, want %v", num, hash, fblock.Transactions(), ablock.Transactions())
|
|
|
|
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(ablock.Uncles()) {
|
|
|
|
t.Errorf("block #%d [%x]: uncles mismatch: have %v, want %v", num, hash, fblock.Uncles(), ablock.Uncles())
|
|
|
|
}
|
|
|
|
if freceipts, areceipts := GetBlockReceipts(fastDb, hash), GetBlockReceipts(archiveDb, hash); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
|
|
|
|
t.Errorf("block #%d [%x]: receipts mismatch: have %v, want %v", num, hash, freceipts, areceipts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check that the canonical chains are the same between the databases
|
|
|
|
for i := 0; i < len(blocks)+1; i++ {
|
|
|
|
if fhash, ahash := GetCanonicalHash(fastDb, uint64(i)), GetCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
|
|
|
|
t.Errorf("block #%d: canonical hash mismatch: have %v, want %v", i, fhash, ahash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that various import methods move the chain head pointers to the correct
|
|
|
|
// positions.
|
|
|
|
func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
gendb, _ = ethdb.NewMemDatabase()
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(1000000000)
|
|
|
|
genesis = GenesisBlockForTesting(gendb, address, funds)
|
|
|
|
)
|
|
|
|
height := uint64(1024)
|
|
|
|
blocks, receipts := GenerateChain(genesis, gendb, int(height), nil)
|
|
|
|
|
2015-10-09 16:21:47 +03:00
|
|
|
// Configure a subchain to roll back
|
|
|
|
remove := []common.Hash{}
|
|
|
|
for _, block := range blocks[height/2:] {
|
|
|
|
remove = append(remove, block.Hash())
|
|
|
|
}
|
2015-09-30 19:23:31 +03:00
|
|
|
// Create a small assertion method to check the three heads
|
|
|
|
assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
|
|
|
|
if num := chain.CurrentBlock().NumberU64(); num != block {
|
|
|
|
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
|
|
|
|
}
|
|
|
|
if num := chain.CurrentFastBlock().NumberU64(); num != fast {
|
|
|
|
t.Errorf("%s head fast-block mismatch: have #%v, want #%v", kind, num, fast)
|
|
|
|
}
|
|
|
|
if num := chain.CurrentHeader().Number.Uint64(); num != header {
|
|
|
|
t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Import the chain as an archive node and ensure all pointers are updated
|
|
|
|
archiveDb, _ := ethdb.NewMemDatabase()
|
|
|
|
WriteGenesisBlockForTesting(archiveDb, GenesisAccount{address, funds})
|
|
|
|
|
2016-03-02 00:32:43 +02:00
|
|
|
archive, _ := NewBlockChain(archiveDb, testChainConfig(), FakePow{}, new(event.TypeMux))
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
if n, err := archive.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("failed to process block %d: %v", n, err)
|
|
|
|
}
|
|
|
|
assert(t, "archive", archive, height, height, height)
|
2015-10-09 16:21:47 +03:00
|
|
|
archive.Rollback(remove)
|
|
|
|
assert(t, "archive", archive, height/2, height/2, height/2)
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
// Import the chain as a non-archive node and ensure all pointers are updated
|
|
|
|
fastDb, _ := ethdb.NewMemDatabase()
|
|
|
|
WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
|
2016-03-02 00:32:43 +02:00
|
|
|
fast, _ := NewBlockChain(fastDb, testChainConfig(), FakePow{}, new(event.TypeMux))
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
2015-10-07 12:14:30 +03:00
|
|
|
if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
|
2015-09-30 19:23:31 +03:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
|
|
|
assert(t, "fast", fast, height, height, 0)
|
2015-10-09 16:21:47 +03:00
|
|
|
fast.Rollback(remove)
|
|
|
|
assert(t, "fast", fast, height/2, height/2, 0)
|
2015-09-30 19:23:31 +03:00
|
|
|
|
|
|
|
// Import the chain as a light node and ensure all pointers are updated
|
|
|
|
lightDb, _ := ethdb.NewMemDatabase()
|
|
|
|
WriteGenesisBlockForTesting(lightDb, GenesisAccount{address, funds})
|
2016-03-02 00:32:43 +02:00
|
|
|
light, _ := NewBlockChain(lightDb, testChainConfig(), FakePow{}, new(event.TypeMux))
|
2015-09-30 19:23:31 +03:00
|
|
|
|
2015-10-07 12:14:30 +03:00
|
|
|
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
2015-09-30 19:23:31 +03:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
assert(t, "light", light, height, 0, 0)
|
2015-10-09 16:21:47 +03:00
|
|
|
light.Rollback(remove)
|
|
|
|
assert(t, "light", light, height/2, 0, 0)
|
2015-09-30 19:23:31 +03:00
|
|
|
}
|
|
|
|
|
2016-03-15 20:55:39 +02:00
|
|
|
// Tests that chain reorganisations handle transaction removals and reinsertions.
|
2015-08-17 15:01:41 +03:00
|
|
|
func TestChainTxReorgs(t *testing.T) {
|
|
|
|
params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be.
|
|
|
|
params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block.
|
|
|
|
|
|
|
|
var (
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
|
|
key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
|
|
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
|
|
|
db, _ = ethdb.NewMemDatabase()
|
|
|
|
)
|
|
|
|
genesis := WriteGenesisBlockForTesting(db,
|
|
|
|
GenesisAccount{addr1, big.NewInt(1000000)},
|
|
|
|
GenesisAccount{addr2, big.NewInt(1000000)},
|
|
|
|
GenesisAccount{addr3, big.NewInt(1000000)},
|
|
|
|
)
|
|
|
|
// Create two transactions shared between the chains:
|
|
|
|
// - postponed: transaction included at a later block in the forked chain
|
|
|
|
// - swapped: transaction included at the same block number in the forked chain
|
|
|
|
postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
|
|
|
|
swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1)
|
|
|
|
|
|
|
|
// Create two transactions that will be dropped by the forked chain:
|
|
|
|
// - pastDrop: transaction dropped retroactively from a past block
|
|
|
|
// - freshDrop: transaction dropped exactly at the block where the reorg is detected
|
|
|
|
var pastDrop, freshDrop *types.Transaction
|
|
|
|
|
|
|
|
// Create three transactions that will be added in the forked chain:
|
2016-03-15 20:08:18 +02:00
|
|
|
// - pastAdd: transaction added before the reorganization is detected
|
2015-08-17 15:01:41 +03:00
|
|
|
// - freshAdd: transaction added at the exact block the reorg is detected
|
|
|
|
// - futureAdd: transaction added after the reorg has already finished
|
|
|
|
var pastAdd, freshAdd, futureAdd *types.Transaction
|
|
|
|
|
2015-09-30 19:23:31 +03:00
|
|
|
chain, _ := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
|
2015-08-17 15:01:41 +03:00
|
|
|
switch i {
|
|
|
|
case 0:
|
|
|
|
pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
|
|
|
|
|
|
|
|
gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point
|
|
|
|
gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
|
|
|
|
|
|
|
|
gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
|
|
|
|
gen.AddTx(swapped) // This transaction will be swapped out at the exact height
|
|
|
|
|
|
|
|
gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain
|
|
|
|
}
|
|
|
|
})
|
|
|
|
// Import the chain. This runs all block validation rules.
|
|
|
|
evmux := &event.TypeMux{}
|
2016-03-02 00:32:43 +02:00
|
|
|
blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
|
2015-10-19 17:08:17 +03:00
|
|
|
if i, err := blockchain.InsertChain(chain); err != nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// overwrite the old chain
|
2015-09-30 19:23:31 +03:00
|
|
|
chain, _ = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
|
2015-08-17 15:01:41 +03:00
|
|
|
switch i {
|
|
|
|
case 0:
|
|
|
|
pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
|
|
|
|
gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
|
|
|
|
gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain
|
|
|
|
|
|
|
|
freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
|
|
|
|
gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
|
|
|
|
|
|
|
|
case 3:
|
|
|
|
futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
|
|
|
|
gen.AddTx(futureAdd) // This transaction will be added after a full reorg
|
|
|
|
}
|
|
|
|
})
|
2015-10-19 17:08:17 +03:00
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
2015-06-08 13:12:13 +03:00
|
|
|
}
|
2015-08-17 15:01:41 +03:00
|
|
|
|
|
|
|
// removed tx
|
|
|
|
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
|
2015-10-22 15:43:21 +03:00
|
|
|
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
|
|
|
|
t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn)
|
2015-08-17 15:01:41 +03:00
|
|
|
}
|
|
|
|
if GetReceipt(db, tx.Hash()) != nil {
|
|
|
|
t.Errorf("drop %d: receipt found while shouldn't have been", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// added tx
|
|
|
|
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
|
2015-10-22 15:43:21 +03:00
|
|
|
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Errorf("add %d: expected tx to be found", i)
|
|
|
|
}
|
|
|
|
if GetReceipt(db, tx.Hash()) == nil {
|
|
|
|
t.Errorf("add %d: expected receipt to be found", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// shared tx
|
|
|
|
for i, tx := range (types.Transactions{postponed, swapped}) {
|
2015-10-22 15:43:21 +03:00
|
|
|
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil {
|
2015-08-17 15:01:41 +03:00
|
|
|
t.Errorf("share %d: expected tx to be found", i)
|
|
|
|
}
|
|
|
|
if GetReceipt(db, tx.Hash()) == nil {
|
|
|
|
t.Errorf("share %d: expected receipt to be found", i)
|
|
|
|
}
|
2015-06-08 13:12:13 +03:00
|
|
|
}
|
|
|
|
}
|
2015-12-01 01:11:24 +02:00
|
|
|
|
|
|
|
func TestLogReorgs(t *testing.T) {
|
|
|
|
params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be.
|
|
|
|
params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block.
|
|
|
|
|
|
|
|
var (
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
db, _ = ethdb.NewMemDatabase()
|
|
|
|
// this code generates a log
|
|
|
|
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
|
|
|
)
|
|
|
|
genesis := WriteGenesisBlockForTesting(db,
|
|
|
|
GenesisAccount{addr1, big.NewInt(10000000000000)},
|
|
|
|
)
|
|
|
|
|
|
|
|
evmux := &event.TypeMux{}
|
2016-03-02 00:32:43 +02:00
|
|
|
blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
|
2015-12-01 01:11:24 +02:00
|
|
|
|
2016-02-13 02:40:44 +02:00
|
|
|
subs := evmux.Subscribe(RemovedLogsEvent{})
|
2015-12-01 01:11:24 +02:00
|
|
|
chain, _ := GenerateChain(genesis, db, 2, func(i int, gen *BlockGen) {
|
|
|
|
if i == 1 {
|
|
|
|
tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), code).SignECDSA(key1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chain, _ = GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {})
|
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ev := <-subs.Chan()
|
2016-02-13 02:40:44 +02:00
|
|
|
if len(ev.Data.(RemovedLogsEvent).Logs) == 0 {
|
2015-12-01 01:11:24 +02:00
|
|
|
t.Error("expected logs")
|
|
|
|
}
|
|
|
|
}
|
2016-03-07 19:11:52 +02:00
|
|
|
|
|
|
|
func TestReorgSideEvent(t *testing.T) {
|
|
|
|
var (
|
|
|
|
db, _ = ethdb.NewMemDatabase()
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
genesis = WriteGenesisBlockForTesting(db, GenesisAccount{addr1, big.NewInt(10000000000000)})
|
|
|
|
)
|
|
|
|
|
|
|
|
evmux := &event.TypeMux{}
|
2016-03-02 00:32:43 +02:00
|
|
|
blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
|
2016-03-07 19:11:52 +02:00
|
|
|
|
2016-03-31 18:43:41 +03:00
|
|
|
chain, _ := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {})
|
2016-03-07 19:11:52 +02:00
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
replacementBlocks, _ := GenerateChain(genesis, db, 4, func(i int, gen *BlockGen) {
|
|
|
|
tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), nil).SignECDSA(key1)
|
2016-03-31 18:43:41 +03:00
|
|
|
if i == 2 {
|
|
|
|
gen.OffsetTime(-1)
|
|
|
|
}
|
2016-03-07 19:11:52 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
})
|
|
|
|
subs := evmux.Subscribe(ChainSideEvent{})
|
|
|
|
if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// first two block of the secondary chain are for a brief moment considered
|
|
|
|
// side chains because up to that point the first one is considered the
|
|
|
|
// heavier chain.
|
|
|
|
expectedSideHashes := map[common.Hash]bool{
|
|
|
|
replacementBlocks[0].Hash(): true,
|
|
|
|
replacementBlocks[1].Hash(): true,
|
|
|
|
chain[0].Hash(): true,
|
|
|
|
chain[1].Hash(): true,
|
|
|
|
chain[2].Hash(): true,
|
|
|
|
}
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
const timeoutDura = 10 * time.Second
|
|
|
|
timeout := time.NewTimer(timeoutDura)
|
|
|
|
done:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ev := <-subs.Chan():
|
|
|
|
block := ev.Data.(ChainSideEvent).Block
|
|
|
|
if _, ok := expectedSideHashes[block.Hash()]; !ok {
|
|
|
|
t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
|
|
|
|
if i == len(expectedSideHashes) {
|
|
|
|
timeout.Stop()
|
|
|
|
|
|
|
|
break done
|
|
|
|
}
|
|
|
|
timeout.Reset(timeoutDura)
|
|
|
|
|
|
|
|
case <-timeout.C:
|
|
|
|
t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure no more events are fired
|
|
|
|
select {
|
|
|
|
case e := <-subs.Chan():
|
2016-03-15 20:08:18 +02:00
|
|
|
t.Errorf("unexpected event fired: %v", e)
|
2016-03-07 19:11:52 +02:00
|
|
|
case <-time.After(250 * time.Millisecond):
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|