2015-07-07 03:54:22 +03:00
// Copyright 2014 The go-ethereum Authors
2015-07-22 19:48:40 +03:00
// This file is part of the go-ethereum library.
2015-07-07 03:54:22 +03:00
//
2015-07-23 19:35:11 +03:00
// The go-ethereum library is free software: you can redistribute it and/or modify
2015-07-07 03:54:22 +03:00
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
2015-07-22 19:48:40 +03:00
// The go-ethereum library is distributed in the hope that it will be useful,
2015-07-07 03:54:22 +03:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 19:48:40 +03:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 03:54:22 +03:00
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
2015-07-22 19:48:40 +03:00
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
2015-07-07 03:54:22 +03:00
2015-07-07 06:08:16 +03:00
// Package core implements the Ethereum consensus protocol.
2014-12-04 11:28:02 +02:00
package core
2014-02-15 00:56:09 +02:00
import (
2015-07-10 15:29:40 +03:00
"errors"
2014-09-24 12:39:17 +03:00
"fmt"
2015-03-18 14:36:48 +02:00
"io"
2014-07-30 01:31:15 +03:00
"math/big"
2015-10-13 12:04:25 +03:00
mrand "math/rand"
2014-12-18 14:12:54 +02:00
"sync"
2015-06-12 17:45:53 +03:00
"sync/atomic"
2015-04-04 17:35:23 +03:00
"time"
2014-07-30 01:31:15 +03:00
2015-03-16 12:27:38 +02:00
"github.com/ethereum/go-ethereum/common"
2017-01-11 00:55:54 +03:00
"github.com/ethereum/go-ethereum/common/mclock"
2017-04-05 01:16:29 +03:00
"github.com/ethereum/go-ethereum/consensus"
2015-03-23 23:59:19 +02:00
"github.com/ethereum/go-ethereum/core/state"
2015-03-17 00:48:18 +02:00
"github.com/ethereum/go-ethereum/core/types"
2015-10-19 17:08:17 +03:00
"github.com/ethereum/go-ethereum/core/vm"
2015-09-30 19:23:31 +03:00
"github.com/ethereum/go-ethereum/crypto"
2015-09-14 10:35:57 +03:00
"github.com/ethereum/go-ethereum/ethdb"
2014-12-03 15:05:19 +02:00
"github.com/ethereum/go-ethereum/event"
2017-02-22 15:10:07 +03:00
"github.com/ethereum/go-ethereum/log"
2015-06-29 16:11:01 +03:00
"github.com/ethereum/go-ethereum/metrics"
2016-10-20 14:36:29 +03:00
"github.com/ethereum/go-ethereum/params"
2015-09-07 20:43:01 +03:00
"github.com/ethereum/go-ethereum/rlp"
2015-10-05 19:37:56 +03:00
"github.com/ethereum/go-ethereum/trie"
2015-06-19 17:48:55 +03:00
"github.com/hashicorp/golang-lru"
2018-02-05 19:40:32 +03:00
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
2014-02-15 00:56:09 +02:00
)
2015-03-03 10:44:41 +02:00
var (
2018-02-23 12:56:08 +03:00
blockInsertTimer = metrics . NewRegisteredTimer ( "chain/inserts" , nil )
2015-07-10 15:29:40 +03:00
ErrNoGenesis = errors . New ( "Genesis not found in chain" )
2015-03-03 10:44:41 +02:00
)
2014-06-23 14:54:10 +03:00
2015-04-20 21:37:40 +03:00
const (
2015-08-31 20:21:02 +03:00
bodyCacheLimit = 256
2015-06-19 19:16:09 +03:00
blockCacheLimit = 256
2015-06-05 15:07:49 +03:00
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
2017-05-25 17:21:20 +03:00
badBlockLimit = 10
2018-02-05 19:40:32 +03:00
triesInMemory = 128
2017-05-25 17:21:20 +03:00
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
2015-10-19 17:08:17 +03:00
BlockChainVersion = 3
2015-04-20 21:37:40 +03:00
)
2015-01-02 13:07:54 +02:00
2018-02-05 19:40:32 +03:00
// CacheConfig contains the configuration values for the trie caching/pruning
// that's resident in a blockchain.
type CacheConfig struct {
Disabled bool // Whether to disable trie write caching (archive node)
TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk
TrieTimeLimit time . Duration // Time limit after which to flush the current in-memory trie to disk
}
2015-10-19 17:08:17 +03:00
// BlockChain represents the canonical chain given a database with a genesis
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
//
// Importing blocks in to the block chain happens according to the set of rules
// defined by the two stage Validator. Processing of blocks is done using the
// Processor which processes the included transaction. The validation of the state
// is done in the second part of the Validator. Failing results in aborting of
// the import.
//
// The BlockChain also helps in returning blocks from **any** chain included
// in the database as well as blocks that represents the canonical chain. It's
// important to note that GetBlock can return any block and does not need to be
// included in the canonical one where as GetBlockByNumber always represents the
// canonical chain.
2015-08-31 18:09:50 +03:00
type BlockChain struct {
2018-02-05 19:40:32 +03:00
chainConfig * params . ChainConfig // Chain & network configuration
cacheConfig * CacheConfig // Cache configuration for pruning
db ethdb . Database // Low level persistent database to store final content in
triegc * prque . Prque // Priority queue mapping block numbers to tries to gc
gcproc time . Duration // Accumulates canonical block processing for trie dumping
2016-03-02 00:32:43 +02:00
2017-08-18 13:58:36 +03:00
hc * HeaderChain
rmLogsFeed event . Feed
chainFeed event . Feed
chainSideFeed event . Feed
chainHeadFeed event . Feed
logsFeed event . Feed
scope event . SubscriptionScope
genesisBlock * types . Block
2015-12-16 05:26:23 +02:00
2016-03-08 16:55:27 +02:00
mu sync . RWMutex // global mutex for locking chain operations
chainmu sync . RWMutex // blockchain insertion lock
procmu sync . RWMutex // block processor lock
2015-04-28 18:48:46 +03:00
2015-12-16 05:26:23 +02:00
checkpoint int // checkpoint counts towards the new checkpoint
currentBlock * types . Block // Current head of the block chain
currentFastBlock * types . Block // Current head of the fast-sync chain (may be above the block chain!)
2014-02-15 00:56:09 +02:00
2017-06-27 16:57:06 +03:00
stateCache state . Database // State database to reuse between imports (contains state cache)
2016-09-22 22:04:58 +03:00
bodyCache * lru . Cache // Cache for the most recent block bodies
bodyRLPCache * lru . Cache // Cache for the most recent block bodies in RLP encoded format
blockCache * lru . Cache // Cache for the most recent entire blocks
futureBlocks * lru . Cache // future blocks are blocks added for later processing
2015-03-20 16:54:42 +02:00
2016-03-08 16:55:27 +02:00
quit chan struct { } // blockchain quit channel
2016-03-15 18:12:03 +02:00
running int32 // running must be called atomically
2015-06-12 17:45:53 +03:00
// procInterrupt must be atomically called
2016-03-08 16:55:27 +02:00
procInterrupt int32 // interrupt signaler for block processing
wg sync . WaitGroup // chain processing wait group for shutting down
2015-05-17 02:42:30 +03:00
2017-04-05 01:16:29 +03:00
engine consensus . Engine
2016-03-08 16:55:27 +02:00
processor Processor // block processor interface
validator Validator // block and state validator interface
2017-01-17 14:19:50 +03:00
vmConfig vm . Config
2017-02-13 23:44:06 +03:00
badBlocks * lru . Cache // Bad block cache
2014-12-18 14:12:54 +02:00
}
2014-02-15 00:56:09 +02:00
2015-10-19 17:08:17 +03:00
// NewBlockChain returns a fully initialised block chain using information
2017-04-10 16:01:31 +03:00
// available in the database. It initialises the default Ethereum Validator and
2015-10-19 17:08:17 +03:00
// Processor.
2018-02-05 19:40:32 +03:00
func NewBlockChain ( db ethdb . Database , cacheConfig * CacheConfig , chainConfig * params . ChainConfig , engine consensus . Engine , vmConfig vm . Config ) ( * BlockChain , error ) {
if cacheConfig == nil {
cacheConfig = & CacheConfig {
TrieNodeLimit : 256 * 1024 * 1024 ,
TrieTimeLimit : 5 * time . Minute ,
}
}
2015-08-31 20:21:02 +03:00
bodyCache , _ := lru . New ( bodyCacheLimit )
bodyRLPCache , _ := lru . New ( bodyCacheLimit )
blockCache , _ := lru . New ( blockCacheLimit )
futureBlocks , _ := lru . New ( maxFutureBlocks )
2017-02-13 23:44:06 +03:00
badBlocks , _ := lru . New ( badBlockLimit )
2015-08-31 20:21:02 +03:00
2015-08-31 18:09:50 +03:00
bc := & BlockChain {
2018-02-05 19:40:32 +03:00
chainConfig : chainConfig ,
cacheConfig : cacheConfig ,
db : db ,
triegc : prque . New ( ) ,
stateCache : state . NewDatabase ( db ) ,
2015-08-31 20:21:02 +03:00
quit : make ( chan struct { } ) ,
bodyCache : bodyCache ,
bodyRLPCache : bodyRLPCache ,
blockCache : blockCache ,
futureBlocks : futureBlocks ,
2017-04-05 01:16:29 +03:00
engine : engine ,
2017-01-17 14:19:50 +03:00
vmConfig : vmConfig ,
2017-02-13 23:44:06 +03:00
badBlocks : badBlocks ,
2015-04-20 21:37:40 +03:00
}
2018-02-05 19:40:32 +03:00
bc . SetValidator ( NewBlockValidator ( chainConfig , bc , engine ) )
bc . SetProcessor ( NewStateProcessor ( chainConfig , bc , engine ) )
2015-12-16 05:26:23 +02:00
var err error
2018-02-05 19:40:32 +03:00
bc . hc , err = NewHeaderChain ( db , chainConfig , engine , bc . getProcInterrupt )
2015-10-13 12:04:25 +03:00
if err != nil {
return nil , err
}
2015-07-10 15:29:40 +03:00
bc . genesisBlock = bc . GetBlockByNumber ( 0 )
if bc . genesisBlock == nil {
2016-03-02 00:32:43 +02:00
return nil , ErrNoGenesis
2015-07-10 15:29:40 +03:00
}
2015-09-21 15:36:29 +03:00
if err := bc . loadLastState ( ) ; err != nil {
2015-07-10 15:29:40 +03:00
return nil , err
2015-06-08 13:12:13 +03:00
}
2015-04-20 13:58:17 +03:00
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
2017-01-06 17:52:03 +03:00
for hash := range BadHashes {
2016-04-05 16:22:04 +03:00
if header := bc . GetHeaderByHash ( hash ) ; header != nil {
2016-11-28 12:37:42 +03:00
// get the canonical block corresponding to the offending header's number
headerByNumber := bc . GetHeaderByNumber ( header . Number . Uint64 ( ) )
// make sure the headerByNumber (if present) is in our current canonical chain
if headerByNumber != nil && headerByNumber . Hash ( ) == header . Hash ( ) {
2017-02-28 14:35:17 +03:00
log . Error ( "Found bad hash, rewinding chain" , "number" , header . Number , "hash" , header . ParentHash )
2016-11-28 12:37:42 +03:00
bc . SetHead ( header . Number . Uint64 ( ) - 1 )
2017-02-28 14:35:17 +03:00
log . Error ( "Chain rewind was successful, resuming normal operation" )
2016-11-28 12:37:42 +03:00
}
2015-04-20 13:58:17 +03:00
}
}
2015-03-13 15:37:54 +02:00
// Take ownership of this particular state
2015-03-06 16:50:44 +02:00
go bc . update ( )
2015-06-08 13:12:13 +03:00
return bc , nil
2015-02-18 14:14:21 +02:00
}
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) getProcInterrupt ( ) bool {
return atomic . LoadInt32 ( & bc . procInterrupt ) == 1
2015-12-16 05:26:23 +02:00
}
2015-09-21 15:36:29 +03:00
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) loadLastState ( ) error {
2015-09-21 15:36:29 +03:00
// Restore the last known head block
2018-02-05 19:40:32 +03:00
head := GetHeadBlockHash ( bc . db )
2015-09-21 15:36:29 +03:00
if head == ( common . Hash { } ) {
// Corrupt or empty database, init from scratch
2017-03-22 03:37:24 +03:00
log . Warn ( "Empty database, resetting chain" )
2017-05-11 04:55:48 +03:00
return bc . Reset ( )
2017-03-22 03:37:24 +03:00
}
// Make sure the entire head block is available
2017-05-11 04:55:48 +03:00
currentBlock := bc . GetBlockByHash ( head )
2017-03-22 03:37:24 +03:00
if currentBlock == nil {
// Corrupt or empty database, init from scratch
log . Warn ( "Head block missing, resetting chain" , "hash" , head )
2017-05-11 04:55:48 +03:00
return bc . Reset ( )
2017-03-22 03:37:24 +03:00
}
// Make sure the state associated with the block is available
2017-06-27 16:57:06 +03:00
if _ , err := state . New ( currentBlock . Root ( ) , bc . stateCache ) ; err != nil {
2017-03-22 03:37:24 +03:00
// Dangling block without a state associated, init from scratch
2018-02-05 19:40:32 +03:00
log . Warn ( "Head state missing, repairing chain" , "number" , currentBlock . Number ( ) , "hash" , currentBlock . Hash ( ) )
if err := bc . repair ( & currentBlock ) ; err != nil {
return err
}
2015-09-21 15:36:29 +03:00
}
2017-03-22 03:37:24 +03:00
// Everything seems to be fine, set as the head block
2017-05-11 04:55:48 +03:00
bc . currentBlock = currentBlock
2017-03-22 03:37:24 +03:00
2015-09-21 15:36:29 +03:00
// Restore the last known head header
2017-05-11 04:55:48 +03:00
currentHeader := bc . currentBlock . Header ( )
2018-02-05 19:40:32 +03:00
if head := GetHeadHeaderHash ( bc . db ) ; head != ( common . Hash { } ) {
2017-05-11 04:55:48 +03:00
if header := bc . GetHeaderByHash ( head ) ; header != nil {
2015-12-16 05:26:23 +02:00
currentHeader = header
2015-09-21 15:36:29 +03:00
}
}
2017-05-11 04:55:48 +03:00
bc . hc . SetCurrentHeader ( currentHeader )
2017-03-22 03:37:24 +03:00
2015-09-30 19:23:31 +03:00
// Restore the last known head fast block
2017-05-11 04:55:48 +03:00
bc . currentFastBlock = bc . currentBlock
2018-02-05 19:40:32 +03:00
if head := GetHeadFastBlockHash ( bc . db ) ; head != ( common . Hash { } ) {
2017-05-11 04:55:48 +03:00
if block := bc . GetBlockByHash ( head ) ; block != nil {
bc . currentFastBlock = block
2015-09-30 19:23:31 +03:00
}
}
2016-09-22 22:04:58 +03:00
// Issue a status log for the user
2017-05-11 04:55:48 +03:00
headerTd := bc . GetTd ( currentHeader . Hash ( ) , currentHeader . Number . Uint64 ( ) )
blockTd := bc . GetTd ( bc . currentBlock . Hash ( ) , bc . currentBlock . NumberU64 ( ) )
fastTd := bc . GetTd ( bc . currentFastBlock . Hash ( ) , bc . currentFastBlock . NumberU64 ( ) )
2015-09-21 15:36:29 +03:00
2017-02-28 14:35:17 +03:00
log . Info ( "Loaded most recent local header" , "number" , currentHeader . Number , "hash" , currentHeader . Hash ( ) , "td" , headerTd )
2017-05-11 04:55:48 +03:00
log . Info ( "Loaded most recent local full block" , "number" , bc . currentBlock . Number ( ) , "hash" , bc . currentBlock . Hash ( ) , "td" , blockTd )
log . Info ( "Loaded most recent local fast block" , "number" , bc . currentFastBlock . Number ( ) , "hash" , bc . currentFastBlock . Hash ( ) , "td" , fastTd )
2015-09-21 15:36:29 +03:00
return nil
}
2015-10-13 12:04:25 +03:00
// SetHead rewinds the local chain to a new head. In the case of headers, everything
// above the new head will be deleted and the new one set. In the case of blocks
// though, the head may be further rewound if block bodies are missing (non-archive
// nodes after a fast sync).
2017-03-22 03:37:24 +03:00
func ( bc * BlockChain ) SetHead ( head uint64 ) error {
log . Warn ( "Rewinding blockchain" , "target" , head )
2015-04-20 13:29:02 +03:00
bc . mu . Lock ( )
defer bc . mu . Unlock ( )
2017-03-22 03:37:24 +03:00
// Rewind the header chain, deleting all block bodies until then
2016-04-05 16:22:04 +03:00
delFn := func ( hash common . Hash , num uint64 ) {
2018-02-05 19:40:32 +03:00
DeleteBody ( bc . db , hash , num )
2015-09-30 19:23:31 +03:00
}
2015-12-16 05:26:23 +02:00
bc . hc . SetHead ( head , delFn )
2017-03-22 03:37:24 +03:00
currentHeader := bc . hc . CurrentHeader ( )
2015-12-16 05:26:23 +02:00
2015-09-21 15:36:29 +03:00
// Clear out any stale content from the caches
2015-08-31 20:21:02 +03:00
bc . bodyCache . Purge ( )
bc . bodyRLPCache . Purge ( )
bc . blockCache . Purge ( )
bc . futureBlocks . Purge ( )
2015-04-20 13:29:02 +03:00
2017-03-22 03:37:24 +03:00
// Rewind the block chain, ensuring we don't end up with a stateless head block
2016-04-05 16:22:04 +03:00
if bc . currentBlock != nil && currentHeader . Number . Uint64 ( ) < bc . currentBlock . NumberU64 ( ) {
bc . currentBlock = bc . GetBlock ( currentHeader . Hash ( ) , currentHeader . Number . Uint64 ( ) )
2015-12-16 05:26:23 +02:00
}
2017-03-22 03:37:24 +03:00
if bc . currentBlock != nil {
2017-06-27 16:57:06 +03:00
if _ , err := state . New ( bc . currentBlock . Root ( ) , bc . stateCache ) ; err != nil {
2017-03-22 03:37:24 +03:00
// Rewound state missing, rolled back to before pivot, reset to genesis
bc . currentBlock = nil
}
}
// Rewind the fast block in a simpleton way to the target head
2016-04-05 16:22:04 +03:00
if bc . currentFastBlock != nil && currentHeader . Number . Uint64 ( ) < bc . currentFastBlock . NumberU64 ( ) {
bc . currentFastBlock = bc . GetBlock ( currentHeader . Hash ( ) , currentHeader . Number . Uint64 ( ) )
2015-12-16 05:26:23 +02:00
}
2017-03-22 03:37:24 +03:00
// If either blocks reached nil, reset to the genesis state
2015-09-30 19:23:31 +03:00
if bc . currentBlock == nil {
bc . currentBlock = bc . genesisBlock
}
2015-10-09 16:21:47 +03:00
if bc . currentFastBlock == nil {
bc . currentFastBlock = bc . genesisBlock
}
2018-02-05 19:40:32 +03:00
if err := WriteHeadBlockHash ( bc . db , bc . currentBlock . Hash ( ) ) ; err != nil {
2017-02-28 14:35:17 +03:00
log . Crit ( "Failed to reset head full block" , "err" , err )
2015-10-09 16:21:47 +03:00
}
2018-02-05 19:40:32 +03:00
if err := WriteHeadFastBlockHash ( bc . db , bc . currentFastBlock . Hash ( ) ) ; err != nil {
2017-02-28 14:35:17 +03:00
log . Crit ( "Failed to reset head fast block" , "err" , err )
2015-10-09 16:21:47 +03:00
}
2017-03-22 03:37:24 +03:00
return bc . loadLastState ( )
2015-04-20 13:29:02 +03:00
}
2015-10-05 19:37:56 +03:00
// FastSyncCommitHead sets the current head block to the one defined by the hash
// irrelevant what the chain contents were prior.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) FastSyncCommitHead ( hash common . Hash ) error {
2015-10-13 12:04:25 +03:00
// Make sure that both the block as well at its state trie exists
2017-05-11 04:55:48 +03:00
block := bc . GetBlockByHash ( hash )
2015-10-05 19:37:56 +03:00
if block == nil {
return fmt . Errorf ( "non existent block [%x…]" , hash [ : 4 ] )
}
2018-02-05 19:40:32 +03:00
if _ , err := trie . NewSecure ( block . Root ( ) , bc . stateCache . TrieDB ( ) , 0 ) ; err != nil {
2015-10-05 19:37:56 +03:00
return err
}
// If all checks out, manually set the head block
2017-05-11 04:55:48 +03:00
bc . mu . Lock ( )
bc . currentBlock = block
bc . mu . Unlock ( )
2015-10-05 19:37:56 +03:00
2017-02-28 14:35:17 +03:00
log . Info ( "Committed new head block" , "number" , block . Number ( ) , "hash" , hash )
2015-10-05 19:37:56 +03:00
return nil
}
2015-10-19 17:08:17 +03:00
// GasLimit returns the gas limit of the current HEAD block.
2017-11-13 14:47:27 +03:00
func ( bc * BlockChain ) GasLimit ( ) uint64 {
2017-05-11 04:55:48 +03:00
bc . mu . RLock ( )
defer bc . mu . RUnlock ( )
2014-12-10 20:59:12 +02:00
2017-05-11 04:55:48 +03:00
return bc . currentBlock . GasLimit ( )
2014-12-18 14:12:54 +02:00
}
2015-09-21 15:36:29 +03:00
// CurrentBlock retrieves the current head block of the canonical chain. The
2015-10-13 12:04:25 +03:00
// block is retrieved from the blockchain's internal cache.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) CurrentBlock ( ) * types . Block {
bc . mu . RLock ( )
defer bc . mu . RUnlock ( )
2014-12-18 14:12:54 +02:00
2017-05-11 04:55:48 +03:00
return bc . currentBlock
2014-02-15 00:56:09 +02:00
}
2015-09-30 19:23:31 +03:00
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
2015-10-13 12:04:25 +03:00
// chain. The block is retrieved from the blockchain's internal cache.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) CurrentFastBlock ( ) * types . Block {
bc . mu . RLock ( )
defer bc . mu . RUnlock ( )
2015-09-30 19:23:31 +03:00
2017-05-11 04:55:48 +03:00
return bc . currentFastBlock
2015-09-30 19:23:31 +03:00
}
2015-10-19 17:08:17 +03:00
// SetProcessor sets the processor required for making state modifications.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) SetProcessor ( processor Processor ) {
bc . procmu . Lock ( )
defer bc . procmu . Unlock ( )
bc . processor = processor
2015-10-19 17:08:17 +03:00
}
// SetValidator sets the validator which is used to validate incoming blocks.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) SetValidator ( validator Validator ) {
bc . procmu . Lock ( )
defer bc . procmu . Unlock ( )
bc . validator = validator
2014-11-18 17:58:22 +02:00
}
2015-10-19 17:08:17 +03:00
// Validator returns the current validator.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) Validator ( ) Validator {
bc . procmu . RLock ( )
defer bc . procmu . RUnlock ( )
return bc . validator
2015-10-19 17:08:17 +03:00
}
// Processor returns the current processor.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) Processor ( ) Processor {
bc . procmu . RLock ( )
defer bc . procmu . RUnlock ( )
return bc . processor
2015-10-19 17:08:17 +03:00
}
// State returns a new mutable state based on the current HEAD block.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) State ( ) ( * state . StateDB , error ) {
return bc . StateAt ( bc . CurrentBlock ( ) . Root ( ) )
2016-09-27 13:13:13 +03:00
}
// StateAt returns a new mutable state based on a particular point in time.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) StateAt ( root common . Hash ) ( * state . StateDB , error ) {
2017-06-27 16:57:06 +03:00
return state . New ( root , bc . stateCache )
2014-12-10 20:59:12 +02:00
}
2015-08-31 20:21:02 +03:00
// Reset purges the entire blockchain, restoring it to its genesis state.
2017-03-22 03:37:24 +03:00
func ( bc * BlockChain ) Reset ( ) error {
return bc . ResetWithGenesisBlock ( bc . genesisBlock )
2015-03-13 19:29:42 +02:00
}
2015-08-31 20:21:02 +03:00
// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
// specified genesis state.
2017-03-22 03:37:24 +03:00
func ( bc * BlockChain ) ResetWithGenesisBlock ( genesis * types . Block ) error {
2015-09-30 19:23:31 +03:00
// Dump the entire block chain and purge the caches
2017-03-22 03:37:24 +03:00
if err := bc . SetHead ( 0 ) ; err != nil {
return err
}
2015-03-03 19:41:51 +02:00
bc . mu . Lock ( )
defer bc . mu . Unlock ( )
2015-10-13 12:04:25 +03:00
// Prepare the genesis block and reinitialise the chain
2016-04-05 16:22:04 +03:00
if err := bc . hc . WriteTd ( genesis . Hash ( ) , genesis . NumberU64 ( ) , genesis . Difficulty ( ) ) ; err != nil {
2017-02-28 14:35:17 +03:00
log . Crit ( "Failed to write genesis block TD" , "err" , err )
2015-09-07 20:43:01 +03:00
}
2018-02-05 19:40:32 +03:00
if err := WriteBlock ( bc . db , genesis ) ; err != nil {
2017-02-28 14:35:17 +03:00
log . Crit ( "Failed to write genesis block" , "err" , err )
2015-07-10 15:29:40 +03:00
}
2015-09-23 00:55:31 +03:00
bc . genesisBlock = genesis
2015-03-03 19:41:51 +02:00
bc . insert ( bc . genesisBlock )
bc . currentBlock = bc . genesisBlock
2015-12-16 05:26:23 +02:00
bc . hc . SetGenesis ( bc . genesisBlock . Header ( ) )
bc . hc . SetCurrentHeader ( bc . genesisBlock . Header ( ) )
2015-09-30 19:23:31 +03:00
bc . currentFastBlock = bc . genesisBlock
2017-03-22 03:37:24 +03:00
return nil
2015-03-03 19:41:51 +02:00
}
2018-02-05 19:40:32 +03:00
// repair tries to repair the current blockchain by rolling back the current block
// until one with associated state is found. This is needed to fix incomplete db
// writes caused either by crashes/power outages, or simply non-committed tries.
//
// This method only rolls back the current block. The current header and current
// fast block are left intact.
func ( bc * BlockChain ) repair ( head * * types . Block ) error {
for {
// Abort if we've rewound to a head block that does have associated state
if _ , err := state . New ( ( * head ) . Root ( ) , bc . stateCache ) ; err == nil {
log . Info ( "Rewound blockchain to past state" , "number" , ( * head ) . Number ( ) , "hash" , ( * head ) . Hash ( ) )
return nil
}
// Otherwise rewind one block and recheck state availability there
( * head ) = bc . GetBlock ( ( * head ) . ParentHash ( ) , ( * head ) . NumberU64 ( ) - 1 )
}
}
2015-03-18 14:36:48 +02:00
// Export writes the active chain to the given writer.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) Export ( w io . Writer ) error {
return bc . ExportN ( w , uint64 ( 0 ) , bc . currentBlock . NumberU64 ( ) )
2015-06-06 06:01:54 +03:00
}
// ExportN writes a subset of the active chain to the given writer.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) ExportN ( w io . Writer , first uint64 , last uint64 ) error {
bc . mu . RLock ( )
defer bc . mu . RUnlock ( )
2015-04-05 00:04:19 +03:00
2015-06-06 06:01:54 +03:00
if first > last {
return fmt . Errorf ( "export failed: first (%d) is greater than last (%d)" , first , last )
}
2017-02-28 14:35:17 +03:00
log . Info ( "Exporting batch of blocks" , "count" , last - first + 1 )
2015-04-13 11:13:52 +03:00
2015-06-06 06:01:54 +03:00
for nr := first ; nr <= last ; nr ++ {
2017-05-11 04:55:48 +03:00
block := bc . GetBlockByNumber ( nr )
2015-04-20 17:02:50 +03:00
if block == nil {
return fmt . Errorf ( "export failed on #%d: not found" , nr )
}
if err := block . EncodeRLP ( w ) ; err != nil {
2015-03-18 14:36:48 +02:00
return err
}
2014-12-17 13:57:35 +02:00
}
2015-04-13 11:13:52 +03:00
2015-03-18 14:36:48 +02:00
return nil
2014-12-17 13:57:35 +02:00
}
2015-09-21 15:36:29 +03:00
// insert injects a new head block into the current block chain. This method
// assumes that the block is indeed a true head. It will also reset the head
2015-10-28 18:40:24 +03:00
// header and the head fast sync block to this very same block if they are older
// or if they are on a different side chain.
2015-09-21 15:36:29 +03:00
//
// Note, this function assumes that the `mu` mutex is held!
2015-08-31 18:09:50 +03:00
func ( bc * BlockChain ) insert ( block * types . Block ) {
2015-10-28 18:40:24 +03:00
// If the block is on a side chain or an unknown one, force other heads onto it too
2018-02-05 19:40:32 +03:00
updateHeads := GetCanonicalHash ( bc . db , block . NumberU64 ( ) ) != block . Hash ( )
2015-10-28 18:40:24 +03:00
2015-09-07 20:43:01 +03:00
// Add the block to the canonical chain number scheme and mark as the head
2018-02-05 19:40:32 +03:00
if err := WriteCanonicalHash ( bc . db , block . Hash ( ) , block . NumberU64 ( ) ) ; err != nil {
2017-02-28 14:35:17 +03:00
log . Crit ( "Failed to insert block number" , "err" , err )
2015-06-20 21:31:11 +03:00
}
2018-02-05 19:40:32 +03:00
if err := WriteHeadBlockHash ( bc . db , block . Hash ( ) ) ; err != nil {
2017-02-28 14:35:17 +03:00
log . Crit ( "Failed to insert head block hash" , "err" , err )
2015-10-05 17:51:06 +03:00
}
2015-04-22 13:46:41 +03:00
bc . currentBlock = block
2015-10-28 18:40:24 +03:00
2018-01-22 15:07:47 +03:00
// If the block is better than our head or is on a different chain, force update heads
2015-10-28 18:40:24 +03:00
if updateHeads {
2015-12-16 05:26:23 +02:00
bc . hc . SetCurrentHeader ( block . Header ( ) )
2015-10-28 18:40:24 +03:00
2018-02-05 19:40:32 +03:00
if err := WriteHeadFastBlockHash ( bc . db , block . Hash ( ) ) ; err != nil {
2017-02-28 14:35:17 +03:00
log . Crit ( "Failed to insert head fast block hash" , "err" , err )
2015-10-28 18:40:24 +03:00
}
bc . currentFastBlock = block
}
2014-12-05 17:26:39 +02:00
}
2017-05-25 17:21:20 +03:00
// Genesis retrieves the chain's genesis block.
2015-08-31 18:09:50 +03:00
func ( bc * BlockChain ) Genesis ( ) * types . Block {
2014-02-15 00:56:09 +02:00
return bc . genesisBlock
}
2015-09-07 20:43:01 +03:00
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetBody ( hash common . Hash ) * types . Body {
2015-08-31 20:21:02 +03:00
// Short circuit if the body's already in the cache, retrieve otherwise
2017-05-11 04:55:48 +03:00
if cached , ok := bc . bodyCache . Get ( hash ) ; ok {
2015-09-07 20:43:01 +03:00
body := cached . ( * types . Body )
return body
2014-08-21 15:47:58 +03:00
}
2018-02-05 19:40:32 +03:00
body := GetBody ( bc . db , hash , bc . hc . GetBlockNumber ( hash ) )
2015-09-07 20:43:01 +03:00
if body == nil {
return nil
2015-08-31 20:21:02 +03:00
}
// Cache the found body for next time and return
2017-05-11 04:55:48 +03:00
bc . bodyCache . Add ( hash , body )
2015-09-07 20:43:01 +03:00
return body
2015-08-31 20:21:02 +03:00
}
2015-01-28 22:12:26 +02:00
2015-08-31 20:21:02 +03:00
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
// caching it if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetBodyRLP ( hash common . Hash ) rlp . RawValue {
2015-08-31 20:21:02 +03:00
// Short circuit if the body's already in the cache, retrieve otherwise
2017-05-11 04:55:48 +03:00
if cached , ok := bc . bodyRLPCache . Get ( hash ) ; ok {
2015-09-07 20:43:01 +03:00
return cached . ( rlp . RawValue )
2015-08-31 20:21:02 +03:00
}
2018-02-05 19:40:32 +03:00
body := GetBodyRLP ( bc . db , hash , bc . hc . GetBlockNumber ( hash ) )
2015-09-07 20:43:01 +03:00
if len ( body ) == 0 {
2015-08-31 20:21:02 +03:00
return nil
2014-08-21 15:47:58 +03:00
}
2015-08-31 20:21:02 +03:00
// Cache the found body for next time and return
2017-05-11 04:55:48 +03:00
bc . bodyRLPCache . Add ( hash , body )
2015-08-31 20:21:02 +03:00
return body
}
2014-08-21 15:47:58 +03:00
2017-09-09 19:03:07 +03:00
// HasBlock checks if a block is fully present in the database or not.
func ( bc * BlockChain ) HasBlock ( hash common . Hash , number uint64 ) bool {
if bc . blockCache . Contains ( hash ) {
return true
}
2018-02-05 19:40:32 +03:00
ok , _ := bc . db . Has ( blockBodyKey ( hash , number ) )
2017-09-09 19:03:07 +03:00
return ok
2014-08-21 15:47:58 +03:00
}
2018-02-05 19:40:32 +03:00
// HasState checks if state trie is fully present in the database or not.
func ( bc * BlockChain ) HasState ( hash common . Hash ) bool {
_ , err := bc . stateCache . OpenTrie ( hash )
return err == nil
}
2015-12-29 14:01:08 +02:00
// HasBlockAndState checks if a block and associated state trie is fully present
// in the database or not, caching it if present.
2018-02-05 19:40:32 +03:00
func ( bc * BlockChain ) HasBlockAndState ( hash common . Hash , number uint64 ) bool {
2015-12-29 14:01:08 +02:00
// Check first that the block itself is known
2018-02-05 19:40:32 +03:00
block := bc . GetBlock ( hash , number )
2015-12-29 14:01:08 +02:00
if block == nil {
return false
}
2018-02-05 19:40:32 +03:00
return bc . HasState ( block . Root ( ) )
2015-12-29 14:01:08 +02:00
}
2016-04-05 16:22:04 +03:00
// GetBlock retrieves a block from the database by hash and number,
// caching it if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetBlock ( hash common . Hash , number uint64 ) * types . Block {
2015-08-31 20:21:02 +03:00
// Short circuit if the block's already in the cache, retrieve otherwise
2017-05-11 04:55:48 +03:00
if block , ok := bc . blockCache . Get ( hash ) ; ok {
2015-06-19 17:21:20 +03:00
return block . ( * types . Block )
}
2018-02-05 19:40:32 +03:00
block := GetBlock ( bc . db , hash , number )
2015-07-01 17:15:02 +03:00
if block == nil {
2014-12-23 14:48:44 +02:00
return nil
}
2015-08-31 20:21:02 +03:00
// Cache the found block for next time and return
2017-05-11 04:55:48 +03:00
bc . blockCache . Add ( block . Hash ( ) , block )
2015-08-31 20:21:02 +03:00
return block
2014-02-15 00:56:09 +02:00
}
2016-04-05 16:22:04 +03:00
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetBlockByHash ( hash common . Hash ) * types . Block {
return bc . GetBlock ( hash , bc . hc . GetBlockNumber ( hash ) )
2016-04-05 16:22:04 +03:00
}
2015-08-31 20:21:02 +03:00
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetBlockByNumber ( number uint64 ) * types . Block {
2018-02-05 19:40:32 +03:00
hash := GetCanonicalHash ( bc . db , number )
2015-08-31 20:21:02 +03:00
if hash == ( common . Hash { } ) {
return nil
}
2017-05-11 04:55:48 +03:00
return bc . GetBlock ( hash , number )
2015-08-31 20:21:02 +03:00
}
2015-04-18 22:23:42 +03:00
2018-02-05 19:40:32 +03:00
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
func ( bc * BlockChain ) GetReceiptsByHash ( hash common . Hash ) types . Receipts {
return GetBlockReceipts ( bc . db , hash , GetBlockNumber ( bc . db , hash ) )
}
2015-06-16 13:41:50 +03:00
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
2017-05-11 04:55:48 +03:00
// [deprecated by eth/62]
func ( bc * BlockChain ) GetBlocksFromHash ( hash common . Hash , n int ) ( blocks [ ] * types . Block ) {
number := bc . hc . GetBlockNumber ( hash )
2015-06-16 13:41:50 +03:00
for i := 0 ; i < n ; i ++ {
2017-05-11 04:55:48 +03:00
block := bc . GetBlock ( hash , number )
2015-06-16 13:41:50 +03:00
if block == nil {
break
}
blocks = append ( blocks , block )
hash = block . ParentHash ( )
2016-04-05 16:22:04 +03:00
number --
2015-06-16 13:41:50 +03:00
}
return
}
2015-09-21 15:36:29 +03:00
// GetUnclesInChain retrieves all the uncles from a given block backwards until
// a specific distance is reached.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetUnclesInChain ( block * types . Block , length int ) [ ] * types . Header {
2015-09-21 15:36:29 +03:00
uncles := [ ] * types . Header { }
2015-01-09 18:38:35 +02:00
for i := 0 ; block != nil && i < length ; i ++ {
uncles = append ( uncles , block . Uncles ( ) ... )
2017-05-11 04:55:48 +03:00
block = bc . GetBlock ( block . ParentHash ( ) , block . NumberU64 ( ) - 1 )
2015-01-09 18:38:35 +02:00
}
2015-09-21 15:36:29 +03:00
return uncles
2014-11-17 13:12:55 +02:00
}
2014-09-26 14:32:54 +03:00
2018-02-05 19:40:32 +03:00
// TrieNode retrieves a blob of data associated with a trie node (or code hash)
// either from ephemeral in-memory cache, or from persistent storage.
func ( bc * BlockChain ) TrieNode ( hash common . Hash ) ( [ ] byte , error ) {
return bc . stateCache . TrieDB ( ) . Node ( hash )
}
2015-10-19 17:08:17 +03:00
// Stop stops the blockchain service. If any imports are currently in progress
// it will abort them using the procInterrupt.
2015-08-31 18:09:50 +03:00
func ( bc * BlockChain ) Stop ( ) {
2015-08-01 13:32:28 +03:00
if ! atomic . CompareAndSwapInt32 ( & bc . running , 0 , 1 ) {
return
}
2017-08-18 13:58:36 +03:00
// Unsubscribe all subscriptions registered from blockchain
bc . scope . Close ( )
2015-03-06 16:50:44 +02:00
close ( bc . quit )
2015-06-12 17:45:53 +03:00
atomic . StoreInt32 ( & bc . procInterrupt , 1 )
2015-04-30 18:50:23 +03:00
bc . wg . Wait ( )
2018-02-05 19:40:32 +03:00
// Ensure the state of a recent block is also stored to disk before exiting.
// It is fine if this state does not exist (fast start/stop cycle), but it is
// advisable to leave an N block gap from the head so 1) a restart loads up
// the last N blocks as sync assistance to remote nodes; 2) a restart during
// a (small) reorg doesn't require deep reprocesses; 3) chain "repair" from
// missing states are constantly tested.
//
// This may be tuned a bit on mainnet if its too annoying to reprocess the last
// N blocks.
if ! bc . cacheConfig . Disabled {
triedb := bc . stateCache . TrieDB ( )
if number := bc . CurrentBlock ( ) . NumberU64 ( ) ; number >= triesInMemory {
recent := bc . GetBlockByNumber ( bc . CurrentBlock ( ) . NumberU64 ( ) - triesInMemory + 1 )
log . Info ( "Writing cached state to disk" , "block" , recent . Number ( ) , "hash" , recent . Hash ( ) , "root" , recent . Root ( ) )
if err := triedb . Commit ( recent . Root ( ) , true ) ; err != nil {
log . Error ( "Failed to commit recent state trie" , "err" , err )
}
}
for ! bc . triegc . Empty ( ) {
triedb . Dereference ( bc . triegc . PopItem ( ) . ( common . Hash ) , common . Hash { } )
}
if size := triedb . Size ( ) ; size != 0 {
log . Error ( "Dangling trie nodes after full cleanup" )
}
}
2017-02-28 14:35:17 +03:00
log . Info ( "Blockchain manager stopped" )
2015-03-06 16:50:44 +02:00
}
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) procFutureBlocks ( ) {
blocks := make ( [ ] * types . Block , 0 , bc . futureBlocks . Len ( ) )
for _ , hash := range bc . futureBlocks . Keys ( ) {
if block , exist := bc . futureBlocks . Peek ( hash ) ; exist {
2016-03-08 16:55:27 +02:00
blocks = append ( blocks , block . ( * types . Block ) )
}
2015-06-29 23:42:13 +03:00
}
2015-05-29 19:55:42 +03:00
if len ( blocks ) > 0 {
types . BlockBy ( types . Number ) . Sort ( blocks )
2016-12-13 17:14:33 +03:00
// Insert one by one as chain insertion needs contiguous ancestry between blocks
for i := range blocks {
2017-05-11 04:55:48 +03:00
bc . InsertChain ( blocks [ i : i + 1 ] )
2016-12-13 17:14:33 +03:00
}
2015-05-29 19:55:42 +03:00
}
2015-04-04 17:35:23 +03:00
}
2017-05-11 04:55:48 +03:00
// WriteStatus status of write
2015-12-16 05:26:23 +02:00
type WriteStatus byte
2015-06-29 13:12:30 +03:00
const (
2015-12-16 05:26:23 +02:00
NonStatTy WriteStatus = iota
2015-07-03 12:24:42 +03:00
CanonStatTy
SideStatTy
2015-06-29 13:12:30 +03:00
)
2015-10-09 16:21:47 +03:00
// Rollback is designed to remove a chain of links from the database that aren't
// certain enough to be valid.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) Rollback ( chain [ ] common . Hash ) {
bc . mu . Lock ( )
defer bc . mu . Unlock ( )
2015-10-13 12:04:25 +03:00
2015-10-09 16:21:47 +03:00
for i := len ( chain ) - 1 ; i >= 0 ; i -- {
hash := chain [ i ]
2017-05-11 04:55:48 +03:00
currentHeader := bc . hc . CurrentHeader ( )
2016-04-05 16:22:04 +03:00
if currentHeader . Hash ( ) == hash {
2017-05-11 04:55:48 +03:00
bc . hc . SetCurrentHeader ( bc . GetHeader ( currentHeader . ParentHash , currentHeader . Number . Uint64 ( ) - 1 ) )
2015-10-09 16:21:47 +03:00
}
2017-05-11 04:55:48 +03:00
if bc . currentFastBlock . Hash ( ) == hash {
bc . currentFastBlock = bc . GetBlock ( bc . currentFastBlock . ParentHash ( ) , bc . currentFastBlock . NumberU64 ( ) - 1 )
2018-02-05 19:40:32 +03:00
WriteHeadFastBlockHash ( bc . db , bc . currentFastBlock . Hash ( ) )
2015-10-09 16:21:47 +03:00
}
2017-05-11 04:55:48 +03:00
if bc . currentBlock . Hash ( ) == hash {
bc . currentBlock = bc . GetBlock ( bc . currentBlock . ParentHash ( ) , bc . currentBlock . NumberU64 ( ) - 1 )
2018-02-05 19:40:32 +03:00
WriteHeadBlockHash ( bc . db , bc . currentBlock . Hash ( ) )
2015-10-09 16:21:47 +03:00
}
}
}
2016-10-14 06:47:09 +03:00
// SetReceiptsData computes all the non-consensus fields of the receipts
2016-11-02 15:44:13 +03:00
func SetReceiptsData ( config * params . ChainConfig , block * types . Block , receipts types . Receipts ) {
signer := types . MakeSigner ( config , block . Number ( ) )
2016-10-14 06:47:09 +03:00
transactions , logIndex := block . Transactions ( ) , uint ( 0 )
for j := 0 ; j < len ( receipts ) ; j ++ {
// The transaction hash can be retrieved from the transaction itself
receipts [ j ] . TxHash = transactions [ j ] . Hash ( )
// The contract address can be derived from the transaction itself
2017-05-08 12:09:35 +03:00
if transactions [ j ] . To ( ) == nil {
// Deriving the signer is expensive, only do if it's actually needed
from , _ := types . Sender ( signer , transactions [ j ] )
receipts [ j ] . ContractAddress = crypto . CreateAddress ( from , transactions [ j ] . Nonce ( ) )
2016-10-14 06:47:09 +03:00
}
// The used gas can be calculated based on previous receipts
if j == 0 {
2017-11-13 14:47:27 +03:00
receipts [ j ] . GasUsed = receipts [ j ] . CumulativeGasUsed
2016-10-14 06:47:09 +03:00
} else {
2017-11-13 14:47:27 +03:00
receipts [ j ] . GasUsed = receipts [ j ] . CumulativeGasUsed - receipts [ j - 1 ] . CumulativeGasUsed
2016-10-14 06:47:09 +03:00
}
// The derived log fields can simply be set from the block and transaction
for k := 0 ; k < len ( receipts [ j ] . Logs ) ; k ++ {
receipts [ j ] . Logs [ k ] . BlockNumber = block . NumberU64 ( )
receipts [ j ] . Logs [ k ] . BlockHash = block . Hash ( )
receipts [ j ] . Logs [ k ] . TxHash = receipts [ j ] . TxHash
receipts [ j ] . Logs [ k ] . TxIndex = uint ( j )
receipts [ j ] . Logs [ k ] . Index = logIndex
logIndex ++
}
}
}
2015-09-30 19:23:31 +03:00
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) InsertReceiptChain ( blockChain types . Blocks , receiptChain [ ] types . Receipts ) ( int , error ) {
2017-09-09 19:03:07 +03:00
bc . wg . Add ( 1 )
defer bc . wg . Done ( )
2016-12-13 17:14:33 +03:00
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1 ; i < len ( blockChain ) ; i ++ {
if blockChain [ i ] . NumberU64 ( ) != blockChain [ i - 1 ] . NumberU64 ( ) + 1 || blockChain [ i ] . ParentHash ( ) != blockChain [ i - 1 ] . Hash ( ) {
2017-02-28 14:35:17 +03:00
log . Error ( "Non contiguous receipt insert" , "number" , blockChain [ i ] . Number ( ) , "hash" , blockChain [ i ] . Hash ( ) , "parent" , blockChain [ i ] . ParentHash ( ) ,
"prevnumber" , blockChain [ i - 1 ] . Number ( ) , "prevhash" , blockChain [ i - 1 ] . Hash ( ) )
return 0 , fmt . Errorf ( "non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])" , i - 1 , blockChain [ i - 1 ] . NumberU64 ( ) ,
blockChain [ i - 1 ] . Hash ( ) . Bytes ( ) [ : 4 ] , i , blockChain [ i ] . NumberU64 ( ) , blockChain [ i ] . Hash ( ) . Bytes ( ) [ : 4 ] , blockChain [ i ] . ParentHash ( ) . Bytes ( ) [ : 4 ] )
2016-12-13 17:14:33 +03:00
}
}
2015-09-30 19:23:31 +03:00
2017-09-09 19:03:07 +03:00
var (
stats = struct { processed , ignored int32 } { }
start = time . Now ( )
bytes = 0
2018-02-05 19:40:32 +03:00
batch = bc . db . NewBatch ( )
2017-09-09 19:03:07 +03:00
)
for i , block := range blockChain {
receipts := receiptChain [ i ]
// Short circuit insertion if shutting down or processing failed
if atomic . LoadInt32 ( & bc . procInterrupt ) == 1 {
return 0 , nil
}
// Short circuit if the owner header is unknown
if ! bc . HasHeader ( block . Hash ( ) , block . NumberU64 ( ) ) {
return i , fmt . Errorf ( "containing header #%d [%x…] unknown" , block . Number ( ) , block . Hash ( ) . Bytes ( ) [ : 4 ] )
}
// Skip if the entire data is already known
if bc . HasBlock ( block . Hash ( ) , block . NumberU64 ( ) ) {
stats . ignored ++
continue
}
// Compute all the non-consensus fields of the receipts
2018-02-05 19:40:32 +03:00
SetReceiptsData ( bc . chainConfig , block , receipts )
2017-09-09 19:03:07 +03:00
// Write all the data out into the database
if err := WriteBody ( batch , block . Hash ( ) , block . NumberU64 ( ) , block . Body ( ) ) ; err != nil {
return i , fmt . Errorf ( "failed to write block body: %v" , err )
}
if err := WriteBlockReceipts ( batch , block . Hash ( ) , block . NumberU64 ( ) , receipts ) ; err != nil {
return i , fmt . Errorf ( "failed to write block receipts: %v" , err )
}
if err := WriteTxLookupEntries ( batch , block ) ; err != nil {
return i , fmt . Errorf ( "failed to write lookup metadata: %v" , err )
}
stats . processed ++
2015-10-07 12:14:30 +03:00
2017-09-09 19:03:07 +03:00
if batch . ValueSize ( ) >= ethdb . IdealBatchSize {
if err := batch . Write ( ) ; err != nil {
return 0 , err
2015-10-07 12:14:30 +03:00
}
2017-09-09 19:03:07 +03:00
bytes += batch . ValueSize ( )
2018-01-30 20:03:31 +03:00
batch . Reset ( )
2015-09-30 19:23:31 +03:00
}
2015-10-07 12:14:30 +03:00
}
2017-09-09 19:03:07 +03:00
if batch . ValueSize ( ) > 0 {
bytes += batch . ValueSize ( )
if err := batch . Write ( ) ; err != nil {
return 0 , err
2015-09-30 19:23:31 +03:00
}
}
2017-09-09 19:03:07 +03:00
2015-10-07 12:14:30 +03:00
// Update the head fast sync block if better
2017-05-11 04:55:48 +03:00
bc . mu . Lock ( )
2017-09-09 19:03:07 +03:00
head := blockChain [ len ( blockChain ) - 1 ]
2017-05-11 04:55:48 +03:00
if td := bc . GetTd ( head . Hash ( ) , head . NumberU64 ( ) ) ; td != nil { // Rewind may have occurred, skip in that case
if bc . GetTd ( bc . currentFastBlock . Hash ( ) , bc . currentFastBlock . NumberU64 ( ) ) . Cmp ( td ) < 0 {
2018-02-05 19:40:32 +03:00
if err := WriteHeadFastBlockHash ( bc . db , head . Hash ( ) ) ; err != nil {
2017-03-22 03:37:24 +03:00
log . Crit ( "Failed to update head fast block hash" , "err" , err )
}
2017-05-11 04:55:48 +03:00
bc . currentFastBlock = head
2015-10-07 12:14:30 +03:00
}
}
2017-05-11 04:55:48 +03:00
bc . mu . Unlock ( )
2015-10-07 12:14:30 +03:00
2017-09-09 19:03:07 +03:00
log . Info ( "Imported new block receipts" ,
"count" , stats . processed ,
"elapsed" , common . PrettyDuration ( time . Since ( start ) ) ,
"number" , head . Number ( ) ,
"hash" , head . Hash ( ) ,
2018-02-05 19:40:32 +03:00
"size" , common . StorageSize ( bytes ) ,
2017-09-09 19:03:07 +03:00
"ignored" , stats . ignored )
2015-09-30 19:23:31 +03:00
return 0 , nil
}
2018-02-05 19:40:32 +03:00
var lastWrite uint64
// WriteBlockWithoutState writes only the block and its metadata to the database,
// but does not write any state. This is used to construct competing side forks
// up to the point where they exceed the canonical total difficulty.
func ( bc * BlockChain ) WriteBlockWithoutState ( block * types . Block , td * big . Int ) ( err error ) {
bc . wg . Add ( 1 )
defer bc . wg . Done ( )
if err := bc . hc . WriteTd ( block . Hash ( ) , block . NumberU64 ( ) , td ) ; err != nil {
return err
}
if err := WriteBlock ( bc . db , block ) ; err != nil {
return err
}
return nil
}
// WriteBlockWithState writes the block and all associated state to the database.
func ( bc * BlockChain ) WriteBlockWithState ( block * types . Block , receipts [ ] * types . Receipt , state * state . StateDB ) ( status WriteStatus , err error ) {
2017-05-11 04:55:48 +03:00
bc . wg . Add ( 1 )
defer bc . wg . Done ( )
2015-06-29 13:12:30 +03:00
2015-09-07 20:43:01 +03:00
// Calculate the total difficulty of the block
2017-05-11 04:55:48 +03:00
ptd := bc . GetTd ( block . ParentHash ( ) , block . NumberU64 ( ) - 1 )
2015-09-07 20:43:01 +03:00
if ptd == nil {
2017-04-06 14:58:03 +03:00
return NonStatTy , consensus . ErrUnknownAncestor
2015-09-07 20:43:01 +03:00
}
2015-09-29 14:24:28 +03:00
// Make sure no inconsistent state is leaked during insertion
2017-05-11 04:55:48 +03:00
bc . mu . Lock ( )
defer bc . mu . Unlock ( )
2015-09-07 20:43:01 +03:00
2017-05-11 04:55:48 +03:00
localTd := bc . GetTd ( bc . currentBlock . Hash ( ) , bc . currentBlock . NumberU64 ( ) )
2016-07-08 16:59:19 +03:00
externTd := new ( big . Int ) . Add ( block . Difficulty ( ) , ptd )
2016-07-26 17:37:04 +03:00
// Irrelevant of the canonical status, write the block itself to the database
2017-05-11 04:55:48 +03:00
if err := bc . hc . WriteTd ( block . Hash ( ) , block . NumberU64 ( ) , externTd ) ; err != nil {
2017-09-09 19:03:07 +03:00
return NonStatTy , err
2016-07-26 17:37:04 +03:00
}
2017-09-09 19:03:07 +03:00
// Write other block data using a batch.
2018-02-05 19:40:32 +03:00
batch := bc . db . NewBatch ( )
2017-09-09 19:03:07 +03:00
if err := WriteBlock ( batch , block ) ; err != nil {
return NonStatTy , err
}
2018-02-05 19:40:32 +03:00
root , err := state . Commit ( bc . chainConfig . IsEIP158 ( block . Number ( ) ) )
if err != nil {
2017-09-09 19:03:07 +03:00
return NonStatTy , err
}
2018-02-05 19:40:32 +03:00
triedb := bc . stateCache . TrieDB ( )
// If we're running an archive node, always flush
if bc . cacheConfig . Disabled {
if err := triedb . Commit ( root , false ) ; err != nil {
return NonStatTy , err
}
} else {
// Full but not archive node, do proper garbage collection
triedb . Reference ( root , common . Hash { } ) // metadata reference to keep trie alive
bc . triegc . Push ( root , - float32 ( block . NumberU64 ( ) ) )
if current := block . NumberU64 ( ) ; current > triesInMemory {
// Find the next state trie we need to commit
header := bc . GetHeaderByNumber ( current - triesInMemory )
chosen := header . Number . Uint64 ( )
// Only write to disk if we exceeded our memory allowance *and* also have at
// least a given number of tries gapped.
var (
size = triedb . Size ( )
limit = common . StorageSize ( bc . cacheConfig . TrieNodeLimit ) * 1024 * 1024
)
if size > limit || bc . gcproc > bc . cacheConfig . TrieTimeLimit {
// If we're exceeding limits but haven't reached a large enough memory gap,
// warn the user that the system is becoming unstable.
if chosen < lastWrite + triesInMemory {
switch {
case size >= 2 * limit :
2018-02-13 16:12:55 +03:00
log . Warn ( "State memory usage too high, committing" , "size" , size , "limit" , limit , "optimum" , float64 ( chosen - lastWrite ) / triesInMemory )
2018-02-05 19:40:32 +03:00
case bc . gcproc >= 2 * bc . cacheConfig . TrieTimeLimit :
2018-02-13 16:12:55 +03:00
log . Info ( "State in memory for too long, committing" , "time" , bc . gcproc , "allowance" , bc . cacheConfig . TrieTimeLimit , "optimum" , float64 ( chosen - lastWrite ) / triesInMemory )
2018-02-05 19:40:32 +03:00
}
}
// If optimum or critical limits reached, write to disk
if chosen >= lastWrite + triesInMemory || size >= 2 * limit || bc . gcproc >= 2 * bc . cacheConfig . TrieTimeLimit {
triedb . Commit ( header . Root , true )
lastWrite = chosen
bc . gcproc = 0
}
}
// Garbage collect anything below our required write retention
for ! bc . triegc . Empty ( ) {
root , number := bc . triegc . Pop ( )
if uint64 ( - number ) > chosen {
bc . triegc . Push ( root , number )
break
}
triedb . Dereference ( root . ( common . Hash ) , common . Hash { } )
}
}
}
2017-09-09 19:03:07 +03:00
if err := WriteBlockReceipts ( batch , block . Hash ( ) , block . NumberU64 ( ) , receipts ) ; err != nil {
return NonStatTy , err
2016-07-26 17:37:04 +03:00
}
2015-09-29 14:24:28 +03:00
// If the total difficulty is higher than our known, add it to the canonical chain
2016-01-12 12:54:29 +02:00
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
2017-11-13 18:07:05 +03:00
reorg := externTd . Cmp ( localTd ) > 0
if ! reorg && externTd . Cmp ( localTd ) == 0 {
// Split same-difficulty blocks by number, then at random
reorg = block . NumberU64 ( ) < bc . currentBlock . NumberU64 ( ) || ( block . NumberU64 ( ) == bc . currentBlock . NumberU64 ( ) && mrand . Float64 ( ) < 0.5 )
}
if reorg {
2016-03-15 20:55:39 +02:00
// Reorganise the chain if the parent is not the head block
2017-05-11 04:55:48 +03:00
if block . ParentHash ( ) != bc . currentBlock . Hash ( ) {
if err := bc . reorg ( bc . currentBlock , block ) ; err != nil {
2015-07-03 12:24:42 +03:00
return NonStatTy , err
2015-06-29 13:12:30 +03:00
}
}
2017-09-09 19:03:07 +03:00
// Write the positional metadata for transaction and receipt lookups
if err := WriteTxLookupEntries ( batch , block ) ; err != nil {
return NonStatTy , err
}
// Write hash preimages
2018-02-05 19:40:32 +03:00
if err := WritePreimages ( bc . db , block . NumberU64 ( ) , state . Preimages ( ) ) ; err != nil {
2017-09-09 19:03:07 +03:00
return NonStatTy , err
}
2015-09-29 14:24:28 +03:00
status = CanonStatTy
2015-06-29 13:12:30 +03:00
} else {
2015-07-03 12:24:42 +03:00
status = SideStatTy
2015-06-29 13:12:30 +03:00
}
2017-09-09 19:03:07 +03:00
if err := batch . Write ( ) ; err != nil {
return NonStatTy , err
}
2016-03-08 16:55:27 +02:00
2017-09-09 19:03:07 +03:00
// Set new head.
if status == CanonStatTy {
bc . insert ( block )
}
2017-05-11 04:55:48 +03:00
bc . futureBlocks . Remove ( block . Hash ( ) )
2017-09-09 19:03:07 +03:00
return status , nil
2015-06-29 13:12:30 +03:00
}
2017-09-11 13:13:05 +03:00
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
// wrong.
//
// After insertion is done, all accumulated events will be fired.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) InsertChain ( chain types . Blocks ) ( int , error ) {
2017-09-11 13:13:05 +03:00
n , events , logs , err := bc . insertChain ( chain )
bc . PostChainEvents ( events , logs )
return n , err
}
// insertChain will execute the actual chain insertion and event aggregation. The
// only reason this method exists as a separate one is to make locking cleaner
// with deferred statements.
func ( bc * BlockChain ) insertChain ( chain types . Blocks ) ( int , [ ] interface { } , [ ] * types . Log , error ) {
2016-12-13 17:14:33 +03:00
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1 ; i < len ( chain ) ; i ++ {
if chain [ i ] . NumberU64 ( ) != chain [ i - 1 ] . NumberU64 ( ) + 1 || chain [ i ] . ParentHash ( ) != chain [ i - 1 ] . Hash ( ) {
// Chain broke ancestry, log a messge (programming error) and skip insertion
2017-02-28 14:35:17 +03:00
log . Error ( "Non contiguous block insert" , "number" , chain [ i ] . Number ( ) , "hash" , chain [ i ] . Hash ( ) ,
"parent" , chain [ i ] . ParentHash ( ) , "prevnumber" , chain [ i - 1 ] . Number ( ) , "prevhash" , chain [ i - 1 ] . Hash ( ) )
2016-12-13 17:14:33 +03:00
2017-09-11 13:13:05 +03:00
return 0 , nil , nil , fmt . Errorf ( "non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])" , i - 1 , chain [ i - 1 ] . NumberU64 ( ) ,
2017-02-28 14:35:17 +03:00
chain [ i - 1 ] . Hash ( ) . Bytes ( ) [ : 4 ] , i , chain [ i ] . NumberU64 ( ) , chain [ i ] . Hash ( ) . Bytes ( ) [ : 4 ] , chain [ i ] . ParentHash ( ) . Bytes ( ) [ : 4 ] )
2016-12-13 17:14:33 +03:00
}
}
// Pre-checks passed, start the full block imports
2017-05-11 04:55:48 +03:00
bc . wg . Add ( 1 )
defer bc . wg . Done ( )
2015-04-30 18:50:23 +03:00
2017-05-11 04:55:48 +03:00
bc . chainmu . Lock ( )
defer bc . chainmu . Unlock ( )
2015-05-17 01:55:02 +03:00
2015-05-29 19:07:23 +03:00
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
// acquiring.
2015-04-05 00:04:19 +03:00
var (
2017-01-11 00:55:54 +03:00
stats = insertStats { startTime : mclock . Now ( ) }
2015-10-19 17:08:17 +03:00
events = make ( [ ] interface { } , 0 , len ( chain ) )
2017-09-11 13:13:05 +03:00
lastCanon * types . Block
2017-01-05 16:03:50 +03:00
coalescedLogs [ ] * types . Log
2015-04-05 00:04:19 +03:00
)
2017-04-05 01:16:29 +03:00
// Start the parallel header verifier
headers := make ( [ ] * types . Header , len ( chain ) )
seals := make ( [ ] bool , len ( chain ) )
2015-05-17 02:42:30 +03:00
2017-04-05 01:16:29 +03:00
for i , block := range chain {
headers [ i ] = block . Header ( )
seals [ i ] = true
}
2017-05-11 04:55:48 +03:00
abort , results := bc . engine . VerifyHeaders ( bc , headers , seals )
2017-04-05 01:16:29 +03:00
defer close ( abort )
2015-06-19 17:21:20 +03:00
2017-04-05 01:16:29 +03:00
// Iterate over the blocks and insert when the verifier permits
2015-03-06 16:50:44 +02:00
for i , block := range chain {
2017-04-05 01:16:29 +03:00
// If the chain is terminating, stop processing blocks
2017-05-11 04:55:48 +03:00
if atomic . LoadInt32 ( & bc . procInterrupt ) == 1 {
2017-02-28 14:35:17 +03:00
log . Debug ( "Premature abort during blocks processing" )
2015-06-12 17:45:53 +03:00
break
}
2017-04-05 01:16:29 +03:00
// If the header is a banned one, straight out abort
2015-06-12 17:45:53 +03:00
if BadHashes [ block . Hash ( ) ] {
2017-05-11 04:55:48 +03:00
bc . reportBlock ( block , nil , ErrBlacklistedHash )
2017-09-11 13:13:05 +03:00
return i , events , coalescedLogs , ErrBlacklistedHash
2015-06-12 17:45:53 +03:00
}
2017-04-05 01:16:29 +03:00
// Wait for the block's verification to complete
bstart := time . Now ( )
err := <- results
if err == nil {
2017-05-11 04:55:48 +03:00
err = bc . Validator ( ) . ValidateBody ( block )
2017-04-05 01:16:29 +03:00
}
2018-02-05 19:40:32 +03:00
switch {
case err == ErrKnownBlock :
2018-02-12 12:54:14 +03:00
// Block and state both already known. However if the current block is below
// this number we did a rollback and we should reimport it nonetheless.
if bc . CurrentBlock ( ) . NumberU64 ( ) >= block . NumberU64 ( ) {
stats . ignored ++
continue
}
2018-02-05 19:40:32 +03:00
case err == consensus . ErrFutureBlock :
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
// the chain is discarded and processed at a later time if given.
max := big . NewInt ( time . Now ( ) . Unix ( ) + maxTimeFutureBlocks )
if block . Time ( ) . Cmp ( max ) > 0 {
return i , events , coalescedLogs , fmt . Errorf ( "future block: %v > %v" , block . Time ( ) , max )
2015-06-12 17:45:53 +03:00
}
2018-02-05 19:40:32 +03:00
bc . futureBlocks . Add ( block . Hash ( ) , block )
stats . queued ++
continue
case err == consensus . ErrUnknownAncestor && bc . futureBlocks . Contains ( block . ParentHash ( ) ) :
bc . futureBlocks . Add ( block . Hash ( ) , block )
stats . queued ++
continue
2015-04-04 17:35:23 +03:00
2018-02-05 19:40:32 +03:00
case err == consensus . ErrPrunedAncestor :
// Block competing with the canonical chain, store in the db, but don't process
// until the competitor TD goes above the canonical TD
localTd := bc . GetTd ( bc . currentBlock . Hash ( ) , bc . currentBlock . NumberU64 ( ) )
externTd := new ( big . Int ) . Add ( bc . GetTd ( block . ParentHash ( ) , block . NumberU64 ( ) - 1 ) , block . Difficulty ( ) )
if localTd . Cmp ( externTd ) > 0 {
if err = bc . WriteBlockWithoutState ( block , externTd ) ; err != nil {
return i , events , coalescedLogs , err
2015-06-12 14:36:38 +03:00
}
2015-06-12 17:45:53 +03:00
continue
}
2018-02-05 19:40:32 +03:00
// Competitor chain beat canonical, gather all blocks from the common ancestor
var winner [ ] * types . Block
2015-04-05 00:04:19 +03:00
2018-02-05 19:40:32 +03:00
parent := bc . GetBlock ( block . ParentHash ( ) , block . NumberU64 ( ) - 1 )
for ! bc . HasState ( parent . Root ( ) ) {
winner = append ( winner , parent )
parent = bc . GetBlock ( parent . ParentHash ( ) , parent . NumberU64 ( ) - 1 )
}
for j := 0 ; j < len ( winner ) / 2 ; j ++ {
winner [ j ] , winner [ len ( winner ) - 1 - j ] = winner [ len ( winner ) - 1 - j ] , winner [ j ]
}
// Import all the pruned blocks to make the state available
bc . chainmu . Unlock ( )
_ , evs , logs , err := bc . insertChain ( winner )
bc . chainmu . Lock ( )
events , coalescedLogs = evs , logs
if err != nil {
return i , events , coalescedLogs , err
2015-06-12 14:36:38 +03:00
}
2015-04-20 13:01:20 +03:00
2018-02-05 19:40:32 +03:00
case err != nil :
2017-05-11 04:55:48 +03:00
bc . reportBlock ( block , nil , err )
2017-09-11 13:13:05 +03:00
return i , events , coalescedLogs , err
2015-10-19 17:08:17 +03:00
}
// Create a new statedb using the parent block and report an
// error if it fails.
2017-06-27 16:57:06 +03:00
var parent * types . Block
if i == 0 {
parent = bc . GetBlock ( block . ParentHash ( ) , block . NumberU64 ( ) - 1 )
} else {
parent = chain [ i - 1 ]
2016-05-19 13:24:14 +03:00
}
2017-06-27 16:57:06 +03:00
state , err := state . New ( parent . Root ( ) , bc . stateCache )
2015-10-19 17:08:17 +03:00
if err != nil {
2017-09-11 13:13:05 +03:00
return i , events , coalescedLogs , err
2015-06-12 17:45:53 +03:00
}
2015-10-19 17:08:17 +03:00
// Process block using the parent state as reference point.
2017-06-27 16:57:06 +03:00
receipts , logs , usedGas , err := bc . processor . Process ( block , state , bc . vmConfig )
2015-10-19 17:08:17 +03:00
if err != nil {
2017-05-11 04:55:48 +03:00
bc . reportBlock ( block , receipts , err )
2017-09-11 13:13:05 +03:00
return i , events , coalescedLogs , err
2015-10-19 17:08:17 +03:00
}
// Validate the state using the default validator
2017-06-27 16:57:06 +03:00
err = bc . Validator ( ) . ValidateState ( block , parent , state , receipts , usedGas )
2015-10-19 17:08:17 +03:00
if err != nil {
2017-05-11 04:55:48 +03:00
bc . reportBlock ( block , receipts , err )
2017-09-11 13:13:05 +03:00
return i , events , coalescedLogs , err
2015-10-19 17:08:17 +03:00
}
2018-02-05 19:40:32 +03:00
proctime := time . Since ( bstart )
2017-09-09 19:03:07 +03:00
// Write the block to the chain and get the status.
2018-02-05 19:40:32 +03:00
status , err := bc . WriteBlockWithState ( block , receipts , state )
2015-06-29 13:12:30 +03:00
if err != nil {
2017-09-11 13:13:05 +03:00
return i , events , coalescedLogs , err
2015-06-29 13:12:30 +03:00
}
switch status {
2015-07-03 12:24:42 +03:00
case CanonStatTy :
2017-02-28 14:35:17 +03:00
log . Debug ( "Inserted new block" , "number" , block . Number ( ) , "hash" , block . Hash ( ) , "uncles" , len ( block . Uncles ( ) ) ,
"txs" , len ( block . Transactions ( ) ) , "gas" , block . GasUsed ( ) , "elapsed" , common . PrettyDuration ( time . Since ( bstart ) ) )
2017-09-11 13:13:05 +03:00
2017-09-09 19:03:07 +03:00
coalescedLogs = append ( coalescedLogs , logs ... )
2016-10-18 10:06:26 +03:00
blockInsertTimer . UpdateSince ( bstart )
2015-10-12 15:04:38 +03:00
events = append ( events , ChainEvent { block , block . Hash ( ) , logs } )
2017-09-11 13:13:05 +03:00
lastCanon = block
2018-02-05 19:40:32 +03:00
// Only count canonical blocks for GC processing time
bc . gcproc += proctime
2015-07-03 12:24:42 +03:00
case SideStatTy :
2017-02-28 14:35:17 +03:00
log . Debug ( "Inserted forked block" , "number" , block . Number ( ) , "hash" , block . Hash ( ) , "diff" , block . Difficulty ( ) , "elapsed" ,
common . PrettyDuration ( time . Since ( bstart ) ) , "txs" , len ( block . Transactions ( ) ) , "gas" , block . GasUsed ( ) , "uncles" , len ( block . Uncles ( ) ) )
2016-10-18 10:06:26 +03:00
blockInsertTimer . UpdateSince ( bstart )
2016-12-04 21:07:24 +03:00
events = append ( events , ChainSideEvent { block } )
2015-06-12 14:36:38 +03:00
}
2015-06-12 17:45:53 +03:00
stats . processed ++
2017-11-13 14:47:27 +03:00
stats . usedGas += usedGas
2018-02-05 19:40:32 +03:00
stats . report ( chain , i , bc . stateCache . TrieDB ( ) . Size ( ) )
2014-11-04 13:46:33 +02:00
}
2017-09-11 13:13:05 +03:00
// Append a single chain head event if we've progressed the chain
2018-01-30 19:39:32 +03:00
if lastCanon != nil && bc . CurrentBlock ( ) . Hash ( ) == lastCanon . Hash ( ) {
2017-09-11 13:13:05 +03:00
events = append ( events , ChainHeadEvent { lastCanon } )
}
return 0 , events , coalescedLogs , nil
2014-11-17 13:12:55 +02:00
}
2015-01-02 13:07:54 +02:00
2016-10-07 15:25:01 +03:00
// insertStats tracks and reports on block insertion.
type insertStats struct {
queued , processed , ignored int
2016-10-21 11:40:00 +03:00
usedGas uint64
2016-10-07 15:25:01 +03:00
lastIndex int
2017-01-11 00:55:54 +03:00
startTime mclock . AbsTime
2016-10-07 15:25:01 +03:00
}
2016-10-18 11:18:07 +03:00
// statsReportLimit is the time limit during import after which we always print
// out progress. This avoids the user wondering what's going on.
const statsReportLimit = 8 * time . Second
2016-10-07 15:25:01 +03:00
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
2018-02-05 19:40:32 +03:00
func ( st * insertStats ) report ( chain [ ] * types . Block , index int , cache common . StorageSize ) {
2016-10-21 11:40:00 +03:00
// Fetch the timings for the batch
2016-10-18 11:18:07 +03:00
var (
2017-01-11 00:55:54 +03:00
now = mclock . Now ( )
elapsed = time . Duration ( now ) - time . Duration ( st . startTime )
2016-10-18 11:18:07 +03:00
)
2016-10-21 11:40:00 +03:00
// If we're at the last block of the batch or report period reached, log
2016-10-18 11:18:07 +03:00
if index == len ( chain ) - 1 || elapsed >= statsReportLimit {
2017-02-28 14:35:17 +03:00
var (
end = chain [ index ]
txs = countTransactions ( chain [ st . lastIndex : index + 1 ] )
)
context := [ ] interface { } {
2017-02-28 16:36:51 +03:00
"blocks" , st . processed , "txs" , txs , "mgas" , float64 ( st . usedGas ) / 1000000 ,
"elapsed" , common . PrettyDuration ( elapsed ) , "mgasps" , float64 ( st . usedGas ) * 1000 / float64 ( elapsed ) ,
2018-02-05 19:40:32 +03:00
"number" , end . Number ( ) , "hash" , end . Hash ( ) , "cache" , cache ,
2016-10-18 11:18:07 +03:00
}
2017-02-28 14:35:17 +03:00
if st . queued > 0 {
context = append ( context , [ ] interface { } { "queued" , st . queued } ... )
2016-10-21 11:40:00 +03:00
}
2017-02-28 14:35:17 +03:00
if st . ignored > 0 {
context = append ( context , [ ] interface { } { "ignored" , st . ignored } ... )
}
log . Info ( "Imported new chain segment" , context ... )
2016-10-18 11:18:07 +03:00
2017-06-29 15:19:10 +03:00
* st = insertStats { startTime : now , lastIndex : index + 1 }
2016-10-07 15:25:01 +03:00
}
}
func countTransactions ( chain [ ] * types . Block ) ( c int ) {
for _ , b := range chain {
c += len ( b . Transactions ( ) )
}
return c
}
2015-08-17 15:01:41 +03:00
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) reorg ( oldBlock , newBlock * types . Block ) error {
2015-05-13 23:27:18 +03:00
var (
2016-12-04 21:07:24 +03:00
newChain types . Blocks
oldChain types . Blocks
commonBlock * types . Block
deletedTxs types . Transactions
2017-01-05 16:03:50 +03:00
deletedLogs [ ] * types . Log
2015-12-01 01:11:24 +02:00
// collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash.
// These logs are later announced as deleted.
collectLogs = func ( h common . Hash ) {
2016-12-04 21:07:24 +03:00
// Coalesce logs and set 'Removed'.
2018-02-05 19:40:32 +03:00
receipts := GetBlockReceipts ( bc . db , h , bc . hc . GetBlockNumber ( h ) )
2015-12-01 01:11:24 +02:00
for _ , receipt := range receipts {
2016-12-04 21:07:24 +03:00
for _ , log := range receipt . Logs {
del := * log
del . Removed = true
deletedLogs = append ( deletedLogs , & del )
}
2015-12-01 01:11:24 +02:00
}
}
2015-05-13 23:27:18 +03:00
)
2015-05-15 16:30:34 +03:00
// first reduce whoever is higher bound
if oldBlock . NumberU64 ( ) > newBlock . NumberU64 ( ) {
// reduce old chain
2017-05-11 04:55:48 +03:00
for ; oldBlock != nil && oldBlock . NumberU64 ( ) != newBlock . NumberU64 ( ) ; oldBlock = bc . GetBlock ( oldBlock . ParentHash ( ) , oldBlock . NumberU64 ( ) - 1 ) {
2016-03-07 19:11:52 +02:00
oldChain = append ( oldChain , oldBlock )
2015-08-17 15:01:41 +03:00
deletedTxs = append ( deletedTxs , oldBlock . Transactions ( ) ... )
2015-12-01 01:11:24 +02:00
collectLogs ( oldBlock . Hash ( ) )
2015-05-15 16:30:34 +03:00
}
} else {
// reduce new chain and append new chain blocks for inserting later on
2017-05-11 04:55:48 +03:00
for ; newBlock != nil && newBlock . NumberU64 ( ) != oldBlock . NumberU64 ( ) ; newBlock = bc . GetBlock ( newBlock . ParentHash ( ) , newBlock . NumberU64 ( ) - 1 ) {
2015-05-15 16:30:34 +03:00
newChain = append ( newChain , newBlock )
}
2015-04-29 13:43:24 +03:00
}
2015-05-28 19:18:23 +03:00
if oldBlock == nil {
2015-08-17 15:01:41 +03:00
return fmt . Errorf ( "Invalid old chain" )
2015-05-28 19:18:23 +03:00
}
if newBlock == nil {
2015-08-17 15:01:41 +03:00
return fmt . Errorf ( "Invalid new chain" )
2015-05-28 19:18:23 +03:00
}
2015-04-29 13:43:24 +03:00
2015-04-14 01:18:38 +03:00
for {
if oldBlock . Hash ( ) == newBlock . Hash ( ) {
2015-05-13 23:27:18 +03:00
commonBlock = oldBlock
2015-04-14 01:18:38 +03:00
break
}
2016-03-07 19:11:52 +02:00
oldChain = append ( oldChain , oldBlock )
2015-04-14 01:18:38 +03:00
newChain = append ( newChain , newBlock )
2015-08-17 15:01:41 +03:00
deletedTxs = append ( deletedTxs , oldBlock . Transactions ( ) ... )
2015-12-01 01:11:24 +02:00
collectLogs ( oldBlock . Hash ( ) )
2015-04-29 13:43:24 +03:00
2017-05-11 04:55:48 +03:00
oldBlock , newBlock = bc . GetBlock ( oldBlock . ParentHash ( ) , oldBlock . NumberU64 ( ) - 1 ) , bc . GetBlock ( newBlock . ParentHash ( ) , newBlock . NumberU64 ( ) - 1 )
2015-05-28 16:35:50 +03:00
if oldBlock == nil {
2015-08-17 15:01:41 +03:00
return fmt . Errorf ( "Invalid old chain" )
2015-05-28 16:35:50 +03:00
}
if newBlock == nil {
2015-08-17 15:01:41 +03:00
return fmt . Errorf ( "Invalid new chain" )
2015-05-28 16:35:50 +03:00
}
2015-04-14 01:18:38 +03:00
}
2017-02-22 15:10:07 +03:00
// Ensure the user sees large reorgs
2017-03-03 10:54:13 +03:00
if len ( oldChain ) > 0 && len ( newChain ) > 0 {
logFn := log . Debug
if len ( oldChain ) > 63 {
logFn = log . Warn
}
logFn ( "Chain split detected" , "number" , commonBlock . Number ( ) , "hash" , commonBlock . Hash ( ) ,
"drop" , len ( oldChain ) , "dropfrom" , oldChain [ 0 ] . Hash ( ) , "add" , len ( newChain ) , "addfrom" , newChain [ 0 ] . Hash ( ) )
} else {
log . Error ( "Impossible reorg, please file an issue" , "oldnum" , oldBlock . Number ( ) , "oldhash" , oldBlock . Hash ( ) , "newnum" , newBlock . Number ( ) , "newhash" , newBlock . Hash ( ) )
2017-02-22 15:10:07 +03:00
}
2018-01-22 15:07:47 +03:00
// Insert the new chain, taking care of the proper incremental order
2015-08-17 15:01:41 +03:00
var addedTxs types . Transactions
2018-01-22 15:07:47 +03:00
for i := len ( newChain ) - 1 ; i >= 0 ; i -- {
2015-07-14 19:18:09 +03:00
// insert the block in the canonical way, re-writing history
2018-01-22 15:07:47 +03:00
bc . insert ( newChain [ i ] )
2017-07-14 19:39:53 +03:00
// write lookup entries for hash based transaction/receipt searches
2018-02-05 19:40:32 +03:00
if err := WriteTxLookupEntries ( bc . db , newChain [ i ] ) ; err != nil {
2015-10-12 18:58:51 +03:00
return err
}
2018-01-22 15:07:47 +03:00
addedTxs = append ( addedTxs , newChain [ i ] . Transactions ( ) ... )
2015-08-17 15:01:41 +03:00
}
// calculate the difference between deleted and added transactions
diff := types . TxDifference ( deletedTxs , addedTxs )
// When transactions get deleted from the database that means the
// receipts that were created in the fork must also be deleted
for _ , tx := range diff {
2018-02-05 19:40:32 +03:00
DeleteTxLookupEntry ( bc . db , tx . Hash ( ) )
2015-04-14 01:18:38 +03:00
}
2015-12-01 01:11:24 +02:00
if len ( deletedLogs ) > 0 {
2017-08-18 13:58:36 +03:00
go bc . rmLogsFeed . Send ( RemovedLogsEvent { deletedLogs } )
2015-12-01 01:11:24 +02:00
}
2016-03-07 19:11:52 +02:00
if len ( oldChain ) > 0 {
go func ( ) {
for _ , block := range oldChain {
2017-08-18 13:58:36 +03:00
bc . chainSideFeed . Send ( ChainSideEvent { Block : block } )
2016-03-07 19:11:52 +02:00
}
} ( )
}
2015-05-28 16:35:50 +03:00
return nil
2015-04-14 01:18:38 +03:00
}
2017-08-18 13:58:36 +03:00
// PostChainEvents iterates over the events generated by a chain insertion and
// posts them into the event feed.
// TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
func ( bc * BlockChain ) PostChainEvents ( events [ ] interface { } , logs [ ] * types . Log ) {
2015-10-19 17:08:17 +03:00
// post event logs for further processing
2017-08-18 13:58:36 +03:00
if logs != nil {
bc . logsFeed . Send ( logs )
}
2015-10-12 15:04:38 +03:00
for _ , event := range events {
2017-08-18 13:58:36 +03:00
switch ev := event . ( type ) {
case ChainEvent :
bc . chainFeed . Send ( ev )
case ChainHeadEvent :
bc . chainHeadFeed . Send ( ev )
case ChainSideEvent :
bc . chainSideFeed . Send ( ev )
2015-10-12 15:04:38 +03:00
}
}
}
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) update ( ) {
2018-01-02 14:50:46 +03:00
futureTimer := time . NewTicker ( 5 * time . Second )
defer futureTimer . Stop ( )
2015-03-06 16:50:44 +02:00
for {
select {
2018-01-02 14:50:46 +03:00
case <- futureTimer . C :
2017-05-11 04:55:48 +03:00
bc . procFutureBlocks ( )
case <- bc . quit :
2015-10-12 15:04:38 +03:00
return
2015-03-06 16:50:44 +02:00
}
}
}
2015-05-17 02:42:30 +03:00
2017-02-13 23:44:06 +03:00
// BadBlockArgs represents the entries in the list returned when bad blocks are queried.
type BadBlockArgs struct {
Hash common . Hash ` json:"hash" `
Header * types . Header ` json:"header" `
}
// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
func ( bc * BlockChain ) BadBlocks ( ) ( [ ] BadBlockArgs , error ) {
headers := make ( [ ] BadBlockArgs , 0 , bc . badBlocks . Len ( ) )
for _ , hash := range bc . badBlocks . Keys ( ) {
if hdr , exist := bc . badBlocks . Peek ( hash ) ; exist {
header := hdr . ( * types . Header )
headers = append ( headers , BadBlockArgs { header . Hash ( ) , header } )
}
}
return headers , nil
}
// addBadBlock adds a bad block to the bad-block LRU cache
func ( bc * BlockChain ) addBadBlock ( block * types . Block ) {
bc . badBlocks . Add ( block . Header ( ) . Hash ( ) , block . Header ( ) )
}
2016-05-24 19:49:54 +03:00
// reportBlock logs a bad block error.
2016-11-23 15:32:25 +03:00
func ( bc * BlockChain ) reportBlock ( block * types . Block , receipts types . Receipts , err error ) {
2017-02-13 23:44:06 +03:00
bc . addBadBlock ( block )
2017-02-28 14:35:17 +03:00
var receiptString string
for _ , receipt := range receipts {
receiptString += fmt . Sprintf ( "\t%v\n" , receipt )
}
log . Error ( fmt . Sprintf ( `
2016-11-23 15:32:25 +03:00
# # # # # # # # # # BAD BLOCK # # # # # # # # #
Chain config : % v
Number : % v
Hash : 0 x % x
% v
Error : % v
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2018-02-05 19:40:32 +03:00
` , bc . chainConfig , block . Number ( ) , block . Hash ( ) , receiptString , err ) )
2015-05-29 19:07:23 +03:00
}
2015-12-16 05:26:23 +02:00
// InsertHeaderChain attempts to insert the given header chain in to the local
// chain, possibly creating a reorg. If an error is returned, it will return the
// index number of the failing header as well an error describing what went wrong.
//
// The verify parameter can be used to fine tune whether nonce verification
// should be done or not. The reason behind the optional check is because some
2016-03-15 18:12:03 +02:00
// of the header retrieval mechanisms already need to verify nonces, as well as
2015-12-16 05:26:23 +02:00
// because nonces can be verified sparsely, not needing to check each.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) InsertHeaderChain ( chain [ ] * types . Header , checkFreq int ) ( int , error ) {
2017-03-22 22:44:22 +03:00
start := time . Now ( )
2017-05-11 04:55:48 +03:00
if i , err := bc . hc . ValidateHeaderChain ( chain , checkFreq ) ; err != nil {
2017-03-22 22:44:22 +03:00
return i , err
}
2015-12-16 05:26:23 +02:00
// Make sure only one thread manipulates the chain at once
2017-05-11 04:55:48 +03:00
bc . chainmu . Lock ( )
defer bc . chainmu . Unlock ( )
2015-12-16 05:26:23 +02:00
2017-05-11 04:55:48 +03:00
bc . wg . Add ( 1 )
defer bc . wg . Done ( )
2015-12-16 05:26:23 +02:00
whFunc := func ( header * types . Header ) error {
2017-05-11 04:55:48 +03:00
bc . mu . Lock ( )
defer bc . mu . Unlock ( )
2015-12-16 05:26:23 +02:00
2017-05-11 04:55:48 +03:00
_ , err := bc . hc . WriteHeader ( header )
2015-12-16 05:26:23 +02:00
return err
}
2017-05-11 04:55:48 +03:00
return bc . hc . InsertHeaderChain ( chain , whFunc , start )
2015-12-16 05:26:23 +02:00
}
// writeHeader writes a header into the local chain, given that its parent is
// already known. If the total difficulty of the newly inserted header becomes
// greater than the current known TD, the canonical chain is re-routed.
//
// Note: This method is not concurrent-safe with inserting blocks simultaneously
// into the chain, as side effects caused by reorganisations cannot be emulated
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) writeHeader ( header * types . Header ) error {
bc . wg . Add ( 1 )
defer bc . wg . Done ( )
2015-12-16 05:26:23 +02:00
2017-05-11 04:55:48 +03:00
bc . mu . Lock ( )
defer bc . mu . Unlock ( )
2015-12-16 05:26:23 +02:00
2017-05-11 04:55:48 +03:00
_ , err := bc . hc . WriteHeader ( header )
2015-12-16 05:26:23 +02:00
return err
}
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) CurrentHeader ( ) * types . Header {
bc . mu . RLock ( )
defer bc . mu . RUnlock ( )
2015-12-16 05:26:23 +02:00
2017-05-11 04:55:48 +03:00
return bc . hc . CurrentHeader ( )
2015-12-16 05:26:23 +02:00
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
2016-04-05 16:22:04 +03:00
// database by hash and number, caching it if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetTd ( hash common . Hash , number uint64 ) * big . Int {
return bc . hc . GetTd ( hash , number )
2016-04-05 16:22:04 +03:00
}
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
2015-12-16 05:26:23 +02:00
// database by hash, caching it if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetTdByHash ( hash common . Hash ) * big . Int {
return bc . hc . GetTdByHash ( hash )
2016-04-05 16:22:04 +03:00
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetHeader ( hash common . Hash , number uint64 ) * types . Header {
return bc . hc . GetHeader ( hash , number )
2015-12-16 05:26:23 +02:00
}
2016-04-05 16:22:04 +03:00
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
2015-12-16 05:26:23 +02:00
// found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetHeaderByHash ( hash common . Hash ) * types . Header {
return bc . hc . GetHeaderByHash ( hash )
2015-12-16 05:26:23 +02:00
}
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
2017-09-09 19:03:07 +03:00
func ( bc * BlockChain ) HasHeader ( hash common . Hash , number uint64 ) bool {
return bc . hc . HasHeader ( hash , number )
2015-12-16 05:26:23 +02:00
}
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
// hash, fetching towards the genesis block.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetBlockHashesFromHash ( hash common . Hash , max uint64 ) [ ] common . Hash {
return bc . hc . GetBlockHashesFromHash ( hash , max )
2015-12-16 05:26:23 +02:00
}
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) GetHeaderByNumber ( number uint64 ) * types . Header {
return bc . hc . GetHeaderByNumber ( number )
2015-12-16 05:26:23 +02:00
}
2016-04-29 14:02:54 +03:00
// Config retrieves the blockchain's chain configuration.
2018-02-05 19:40:32 +03:00
func ( bc * BlockChain ) Config ( ) * params . ChainConfig { return bc . chainConfig }
2017-04-12 16:38:31 +03:00
// Engine retrieves the blockchain's consensus engine.
2017-05-11 04:55:48 +03:00
func ( bc * BlockChain ) Engine ( ) consensus . Engine { return bc . engine }
2017-08-18 13:58:36 +03:00
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func ( bc * BlockChain ) SubscribeRemovedLogsEvent ( ch chan <- RemovedLogsEvent ) event . Subscription {
return bc . scope . Track ( bc . rmLogsFeed . Subscribe ( ch ) )
}
// SubscribeChainEvent registers a subscription of ChainEvent.
func ( bc * BlockChain ) SubscribeChainEvent ( ch chan <- ChainEvent ) event . Subscription {
return bc . scope . Track ( bc . chainFeed . Subscribe ( ch ) )
}
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
func ( bc * BlockChain ) SubscribeChainHeadEvent ( ch chan <- ChainHeadEvent ) event . Subscription {
return bc . scope . Track ( bc . chainHeadFeed . Subscribe ( ch ) )
}
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
func ( bc * BlockChain ) SubscribeChainSideEvent ( ch chan <- ChainSideEvent ) event . Subscription {
return bc . scope . Track ( bc . chainSideFeed . Subscribe ( ch ) )
}
// SubscribeLogsEvent registers a subscription of []*types.Log.
func ( bc * BlockChain ) SubscribeLogsEvent ( ch chan <- [ ] * types . Log ) event . Subscription {
return bc . scope . Track ( bc . logsFeed . Subscribe ( ch ) )
}