2015-12-16 05:26:23 +02:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2016-04-14 19:18:24 +03:00
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
package core
|
|
|
|
|
|
|
|
import (
|
|
|
|
crand "crypto/rand"
|
2017-04-05 01:16:29 +03:00
|
|
|
"errors"
|
2016-10-18 11:18:07 +03:00
|
|
|
"fmt"
|
2015-12-16 05:26:23 +02:00
|
|
|
"math"
|
|
|
|
"math/big"
|
|
|
|
mrand "math/rand"
|
2018-03-26 12:28:46 +03:00
|
|
|
"sync/atomic"
|
2015-12-16 05:26:23 +02:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2017-04-05 01:16:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus"
|
2018-05-07 14:35:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-12-16 05:26:23 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2017-02-22 15:10:07 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2016-10-20 14:36:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2018-09-24 15:57:49 +03:00
|
|
|
lru "github.com/hashicorp/golang-lru"
|
2015-12-16 05:26:23 +02:00
|
|
|
)
|
|
|
|
|
2016-04-05 16:22:04 +03:00
|
|
|
const (
|
|
|
|
headerCacheLimit = 512
|
|
|
|
tdCacheLimit = 1024
|
|
|
|
numberCacheLimit = 2048
|
|
|
|
)
|
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
// HeaderChain implements the basic block header chain logic that is shared by
|
|
|
|
// core.BlockChain and light.LightChain. It is not usable in itself, only as
|
|
|
|
// a part of either structure.
|
|
|
|
// It is not thread safe either, the encapsulating chain structures should do
|
|
|
|
// the necessary mutex locking/unlocking.
|
|
|
|
type HeaderChain struct {
|
2016-10-20 14:36:29 +03:00
|
|
|
config *params.ChainConfig
|
2016-03-02 00:32:43 +02:00
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
chainDb ethdb.Database
|
|
|
|
genesisHeader *types.Header
|
|
|
|
|
2018-02-26 12:53:10 +03:00
|
|
|
currentHeader atomic.Value // Current head of the header chain (may be above the block chain!)
|
|
|
|
currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time)
|
2015-12-16 05:26:23 +02:00
|
|
|
|
2016-03-10 20:19:09 +02:00
|
|
|
headerCache *lru.Cache // Cache for the most recent block headers
|
|
|
|
tdCache *lru.Cache // Cache for the most recent block total difficulties
|
2016-04-05 16:22:04 +03:00
|
|
|
numberCache *lru.Cache // Cache for the most recent block numbers
|
2016-03-10 20:19:09 +02:00
|
|
|
|
|
|
|
procInterrupt func() bool
|
2015-12-16 05:26:23 +02:00
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
rand *mrand.Rand
|
|
|
|
engine consensus.Engine
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewHeaderChain creates a new HeaderChain structure.
|
|
|
|
// getValidator should return the parent's validator
|
|
|
|
// procInterrupt points to the parent's interrupt semaphore
|
|
|
|
// wg points to the parent's shutdown wait group
|
2017-04-05 01:16:29 +03:00
|
|
|
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
|
2015-12-16 05:26:23 +02:00
|
|
|
headerCache, _ := lru.New(headerCacheLimit)
|
|
|
|
tdCache, _ := lru.New(tdCacheLimit)
|
2016-04-05 16:22:04 +03:00
|
|
|
numberCache, _ := lru.New(numberCacheLimit)
|
2015-12-16 05:26:23 +02:00
|
|
|
|
|
|
|
// Seed a fast but crypto originating random generator
|
|
|
|
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
hc := &HeaderChain{
|
2016-03-02 00:32:43 +02:00
|
|
|
config: config,
|
2015-12-16 05:26:23 +02:00
|
|
|
chainDb: chainDb,
|
|
|
|
headerCache: headerCache,
|
|
|
|
tdCache: tdCache,
|
2016-04-05 16:22:04 +03:00
|
|
|
numberCache: numberCache,
|
2015-12-16 05:26:23 +02:00
|
|
|
procInterrupt: procInterrupt,
|
|
|
|
rand: mrand.New(mrand.NewSource(seed.Int64())),
|
2017-04-05 01:16:29 +03:00
|
|
|
engine: engine,
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
hc.genesisHeader = hc.GetHeaderByNumber(0)
|
|
|
|
if hc.genesisHeader == nil {
|
2017-03-02 16:03:33 +03:00
|
|
|
return nil, ErrNoGenesis
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
2018-02-26 12:53:10 +03:00
|
|
|
hc.currentHeader.Store(hc.genesisHeader)
|
2018-05-07 14:35:06 +03:00
|
|
|
if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
|
2016-04-05 16:22:04 +03:00
|
|
|
if chead := hc.GetHeaderByHash(head); chead != nil {
|
2018-02-26 12:53:10 +03:00
|
|
|
hc.currentHeader.Store(chead)
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
}
|
2018-02-26 12:53:10 +03:00
|
|
|
hc.currentHeaderHash = hc.CurrentHeader().Hash()
|
2019-06-10 14:21:02 +03:00
|
|
|
headHeaderGauge.Update(hc.CurrentHeader().Number.Int64())
|
2015-12-16 05:26:23 +02:00
|
|
|
|
|
|
|
return hc, nil
|
|
|
|
}
|
|
|
|
|
2016-04-05 16:22:04 +03:00
|
|
|
// GetBlockNumber retrieves the block number belonging to the given hash
|
|
|
|
// from the cache or database
|
2018-05-07 14:35:06 +03:00
|
|
|
func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 {
|
2016-04-05 16:22:04 +03:00
|
|
|
if cached, ok := hc.numberCache.Get(hash); ok {
|
2018-05-07 14:35:06 +03:00
|
|
|
number := cached.(uint64)
|
|
|
|
return &number
|
2016-04-05 16:22:04 +03:00
|
|
|
}
|
2018-05-07 14:35:06 +03:00
|
|
|
number := rawdb.ReadHeaderNumber(hc.chainDb, hash)
|
|
|
|
if number != nil {
|
|
|
|
hc.numberCache.Add(hash, *number)
|
2016-04-05 16:22:04 +03:00
|
|
|
}
|
|
|
|
return number
|
|
|
|
}
|
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
// WriteHeader writes a header into the local chain, given that its parent is
|
|
|
|
// already known. If the total difficulty of the newly inserted header becomes
|
|
|
|
// greater than the current known TD, the canonical chain is re-routed.
|
|
|
|
//
|
|
|
|
// Note: This method is not concurrent-safe with inserting blocks simultaneously
|
|
|
|
// into the chain, as side effects caused by reorganisations cannot be emulated
|
|
|
|
// without the real blocks. Hence, writing headers directly should only be done
|
|
|
|
// in two scenarios: pure-header mode of operation (light clients), or properly
|
|
|
|
// separated header/block phases (non-archive clients).
|
|
|
|
func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, err error) {
|
2016-03-10 20:19:09 +02:00
|
|
|
// Cache some values to prevent constant recalculation
|
|
|
|
var (
|
|
|
|
hash = header.Hash()
|
|
|
|
number = header.Number.Uint64()
|
|
|
|
)
|
2015-12-16 05:26:23 +02:00
|
|
|
// Calculate the total difficulty of the header
|
2016-04-05 16:22:04 +03:00
|
|
|
ptd := hc.GetTd(header.ParentHash, number-1)
|
2015-12-16 05:26:23 +02:00
|
|
|
if ptd == nil {
|
2017-04-06 14:58:03 +03:00
|
|
|
return NonStatTy, consensus.ErrUnknownAncestor
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
2018-02-26 12:53:10 +03:00
|
|
|
localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
|
2015-12-16 05:26:23 +02:00
|
|
|
externTd := new(big.Int).Add(header.Difficulty, ptd)
|
|
|
|
|
2016-07-26 17:37:04 +03:00
|
|
|
// Irrelevant of the canonical status, write the td and header to the database
|
|
|
|
if err := hc.WriteTd(hash, number, externTd); err != nil {
|
2017-02-28 14:35:17 +03:00
|
|
|
log.Crit("Failed to write header total difficulty", "err", err)
|
2016-07-26 17:37:04 +03:00
|
|
|
}
|
2018-05-07 14:35:06 +03:00
|
|
|
rawdb.WriteHeader(hc.chainDb, header)
|
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
// If the total difficulty is higher than our known, add it to the canonical chain
|
|
|
|
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
|
|
|
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
|
|
|
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
|
|
|
|
// Delete any canonical number assignments above the new head
|
2018-07-02 11:16:30 +03:00
|
|
|
batch := hc.chainDb.NewBatch()
|
2016-04-05 16:22:04 +03:00
|
|
|
for i := number + 1; ; i++ {
|
2018-05-07 14:35:06 +03:00
|
|
|
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
|
2016-04-05 16:22:04 +03:00
|
|
|
if hash == (common.Hash{}) {
|
|
|
|
break
|
|
|
|
}
|
2018-07-02 11:16:30 +03:00
|
|
|
rawdb.DeleteCanonicalHash(batch, i)
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
2018-07-02 11:16:30 +03:00
|
|
|
batch.Write()
|
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
// Overwrite any stale canonical number assignments
|
2016-03-10 20:19:09 +02:00
|
|
|
var (
|
|
|
|
headHash = header.ParentHash
|
2016-04-05 16:22:04 +03:00
|
|
|
headNumber = header.Number.Uint64() - 1
|
|
|
|
headHeader = hc.GetHeader(headHash, headNumber)
|
2016-03-10 20:19:09 +02:00
|
|
|
)
|
2018-05-07 14:35:06 +03:00
|
|
|
for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
|
|
|
|
rawdb.WriteCanonicalHash(hc.chainDb, headHash, headNumber)
|
2016-03-10 20:19:09 +02:00
|
|
|
|
|
|
|
headHash = headHeader.ParentHash
|
2016-04-05 16:22:04 +03:00
|
|
|
headNumber = headHeader.Number.Uint64() - 1
|
|
|
|
headHeader = hc.GetHeader(headHash, headNumber)
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
// Extend the canonical chain with the new header
|
2018-05-07 14:35:06 +03:00
|
|
|
rawdb.WriteCanonicalHash(hc.chainDb, hash, number)
|
|
|
|
rawdb.WriteHeadHeaderHash(hc.chainDb, hash)
|
|
|
|
|
2018-02-26 12:53:10 +03:00
|
|
|
hc.currentHeaderHash = hash
|
|
|
|
hc.currentHeader.Store(types.CopyHeader(header))
|
2019-06-10 14:21:02 +03:00
|
|
|
headHeaderGauge.Update(header.Number.Int64())
|
2016-03-10 20:19:09 +02:00
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
status = CanonStatTy
|
|
|
|
} else {
|
|
|
|
status = SideStatTy
|
|
|
|
}
|
2016-03-10 20:19:09 +02:00
|
|
|
hc.headerCache.Add(hash, header)
|
2016-04-05 16:22:04 +03:00
|
|
|
hc.numberCache.Add(hash, number)
|
2016-03-10 20:19:09 +02:00
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// WhCallback is a callback function for inserting individual headers.
|
|
|
|
// A callback is used for two reasons: first, in a LightChain, status should be
|
|
|
|
// processed and light chain events sent, while in a BlockChain this is not
|
|
|
|
// necessary since chain events are sent after inserting blocks. Second, the
|
|
|
|
// header writes should be protected by the parent chain mutex individually.
|
|
|
|
type WhCallback func(*types.Header) error
|
|
|
|
|
2017-03-22 22:44:22 +03:00
|
|
|
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
2016-12-13 17:14:33 +03:00
|
|
|
// Do a sanity check that the provided chain is actually ordered and linked
|
|
|
|
for i := 1; i < len(chain); i++ {
|
|
|
|
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() {
|
2018-07-30 14:10:48 +03:00
|
|
|
// Chain broke ancestry, log a message (programming error) and skip insertion
|
2017-02-28 14:35:17 +03:00
|
|
|
log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(),
|
|
|
|
"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash())
|
2016-12-13 17:14:33 +03:00
|
|
|
|
2017-02-28 14:35:17 +03:00
|
|
|
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number,
|
|
|
|
chain[i-1].Hash().Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4])
|
2016-12-13 17:14:33 +03:00
|
|
|
}
|
|
|
|
}
|
2015-12-16 05:26:23 +02:00
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
// Generate the list of seal verification requests, and start the parallel verifier
|
|
|
|
seals := make([]bool, len(chain))
|
2019-01-24 14:18:26 +03:00
|
|
|
if checkFreq != 0 {
|
|
|
|
// In case of checkFreq == 0 all seals are left false.
|
|
|
|
for i := 0; i < len(seals)/checkFreq; i++ {
|
|
|
|
index := i*checkFreq + hc.rand.Intn(checkFreq)
|
|
|
|
if index >= len(seals) {
|
|
|
|
index = len(seals) - 1
|
|
|
|
}
|
|
|
|
seals[index] = true
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
2019-01-24 14:18:26 +03:00
|
|
|
// Last should always be verified to avoid junk.
|
|
|
|
seals[len(seals)-1] = true
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
|
|
|
|
defer close(abort)
|
2015-12-16 05:26:23 +02:00
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
// Iterate over the headers and ensure they all check out
|
|
|
|
for i, header := range chain {
|
|
|
|
// If the chain is terminating, stop processing blocks
|
|
|
|
if hc.procInterrupt() {
|
|
|
|
log.Debug("Premature abort during headers verification")
|
|
|
|
return 0, errors.New("aborted")
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
2017-04-05 01:16:29 +03:00
|
|
|
// If the header is a banned one, straight out abort
|
|
|
|
if BadHashes[header.Hash()] {
|
2017-04-06 14:58:03 +03:00
|
|
|
return i, ErrBlacklistedHash
|
2017-04-05 01:16:29 +03:00
|
|
|
}
|
|
|
|
// Otherwise wait for headers checks and ensure they pass
|
|
|
|
if err := <-results; err != nil {
|
|
|
|
return i, err
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
}
|
2017-03-22 22:44:22 +03:00
|
|
|
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
2017-05-11 04:55:48 +03:00
|
|
|
// InsertHeaderChain attempts to insert the given header chain in to the local
|
|
|
|
// chain, possibly creating a reorg. If an error is returned, it will return the
|
|
|
|
// index number of the failing header as well an error describing what went wrong.
|
|
|
|
//
|
|
|
|
// The verify parameter can be used to fine tune whether nonce verification
|
|
|
|
// should be done or not. The reason behind the optional check is because some
|
|
|
|
// of the header retrieval mechanisms already need to verfy nonces, as well as
|
|
|
|
// because nonces can be verified sparsely, not needing to check each.
|
2017-03-22 22:44:22 +03:00
|
|
|
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCallback, start time.Time) (int, error) {
|
|
|
|
// Collect some import statistics to report on
|
|
|
|
stats := struct{ processed, ignored int }{}
|
2015-12-16 05:26:23 +02:00
|
|
|
// All headers passed verification, import them into the database
|
|
|
|
for i, header := range chain {
|
|
|
|
// Short circuit insertion if shutting down
|
|
|
|
if hc.procInterrupt() {
|
2017-04-05 01:16:29 +03:00
|
|
|
log.Debug("Premature abort during headers import")
|
|
|
|
return i, errors.New("aborted")
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
// If the header's already known, skip it, otherwise store
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
hash := header.Hash()
|
|
|
|
if hc.HasHeader(hash, header.Number.Uint64()) {
|
|
|
|
externTd := hc.GetTd(hash, header.Number.Uint64())
|
|
|
|
localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
|
|
|
|
if externTd == nil || externTd.Cmp(localTd) <= 0 {
|
|
|
|
stats.ignored++
|
|
|
|
continue
|
|
|
|
}
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
if err := writeHeader(header); err != nil {
|
|
|
|
return i, err
|
|
|
|
}
|
|
|
|
stats.processed++
|
|
|
|
}
|
|
|
|
// Report some public statistics so the user has a clue what's going on
|
2017-02-28 14:35:17 +03:00
|
|
|
last := chain[len(chain)-1]
|
2018-09-20 11:41:59 +03:00
|
|
|
|
|
|
|
context := []interface{}{
|
|
|
|
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
|
|
|
|
"number", last.Number, "hash", last.Hash(),
|
|
|
|
}
|
2019-04-02 23:28:48 +03:00
|
|
|
if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {
|
2018-09-20 11:41:59 +03:00
|
|
|
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
|
|
|
}
|
|
|
|
if stats.ignored > 0 {
|
|
|
|
context = append(context, []interface{}{"ignored", stats.ignored}...)
|
|
|
|
}
|
|
|
|
log.Info("Imported new block headers", context...)
|
2015-12-16 05:26:23 +02:00
|
|
|
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
|
|
|
|
// hash, fetching towards the genesis block.
|
|
|
|
func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
|
|
|
|
// Get the origin header from which to fetch
|
2016-04-05 16:22:04 +03:00
|
|
|
header := hc.GetHeaderByHash(hash)
|
2015-12-16 05:26:23 +02:00
|
|
|
if header == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Iterate the headers until enough is collected or the genesis reached
|
|
|
|
chain := make([]common.Hash, 0, max)
|
|
|
|
for i := uint64(0); i < max; i++ {
|
2016-03-10 20:19:09 +02:00
|
|
|
next := header.ParentHash
|
2016-04-05 16:22:04 +03:00
|
|
|
if header = hc.GetHeader(next, header.Number.Uint64()-1); header == nil {
|
2015-12-16 05:26:23 +02:00
|
|
|
break
|
|
|
|
}
|
2016-03-10 20:19:09 +02:00
|
|
|
chain = append(chain, next)
|
2017-02-28 17:09:11 +03:00
|
|
|
if header.Number.Sign() == 0 {
|
2015-12-16 05:26:23 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return chain
|
|
|
|
}
|
|
|
|
|
2018-06-12 16:52:54 +03:00
|
|
|
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
|
|
|
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
|
|
|
// number of blocks to be individually checked before we reach the canonical chain.
|
|
|
|
//
|
|
|
|
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
|
|
|
func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
|
|
|
if ancestor > number {
|
|
|
|
return common.Hash{}, 0
|
|
|
|
}
|
|
|
|
if ancestor == 1 {
|
|
|
|
// in this case it is cheaper to just read the header
|
|
|
|
if header := hc.GetHeader(hash, number); header != nil {
|
|
|
|
return header.ParentHash, number - 1
|
|
|
|
} else {
|
|
|
|
return common.Hash{}, 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for ancestor != 0 {
|
|
|
|
if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
|
2019-09-17 16:28:41 +03:00
|
|
|
ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor)
|
|
|
|
if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
|
|
|
|
number -= ancestor
|
|
|
|
return ancestorHash, number
|
|
|
|
}
|
2018-06-12 16:52:54 +03:00
|
|
|
}
|
|
|
|
if *maxNonCanonical == 0 {
|
|
|
|
return common.Hash{}, 0
|
|
|
|
}
|
|
|
|
*maxNonCanonical--
|
|
|
|
ancestor--
|
|
|
|
header := hc.GetHeader(hash, number)
|
|
|
|
if header == nil {
|
|
|
|
return common.Hash{}, 0
|
|
|
|
}
|
|
|
|
hash = header.ParentHash
|
|
|
|
number--
|
|
|
|
}
|
|
|
|
return hash, number
|
|
|
|
}
|
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
2016-04-05 16:22:04 +03:00
|
|
|
// database by hash and number, caching it if found.
|
|
|
|
func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
2015-12-16 05:26:23 +02:00
|
|
|
// Short circuit if the td's already in the cache, retrieve otherwise
|
|
|
|
if cached, ok := hc.tdCache.Get(hash); ok {
|
|
|
|
return cached.(*big.Int)
|
|
|
|
}
|
2018-05-07 14:35:06 +03:00
|
|
|
td := rawdb.ReadTd(hc.chainDb, hash, number)
|
2015-12-16 05:26:23 +02:00
|
|
|
if td == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Cache the found body for next time and return
|
|
|
|
hc.tdCache.Add(hash, td)
|
|
|
|
return td
|
|
|
|
}
|
|
|
|
|
2016-04-05 16:22:04 +03:00
|
|
|
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
|
|
|
|
// database by hash, caching it if found.
|
|
|
|
func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int {
|
2018-05-07 14:35:06 +03:00
|
|
|
number := hc.GetBlockNumber(hash)
|
|
|
|
if number == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return hc.GetTd(hash, *number)
|
2016-04-05 16:22:04 +03:00
|
|
|
}
|
|
|
|
|
2016-03-10 20:19:09 +02:00
|
|
|
// WriteTd stores a block's total difficulty into the database, also caching it
|
|
|
|
// along the way.
|
2016-04-05 16:22:04 +03:00
|
|
|
func (hc *HeaderChain) WriteTd(hash common.Hash, number uint64, td *big.Int) error {
|
2018-05-07 14:35:06 +03:00
|
|
|
rawdb.WriteTd(hc.chainDb, hash, number, td)
|
2016-03-10 20:19:09 +02:00
|
|
|
hc.tdCache.Add(hash, new(big.Int).Set(td))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-05 16:22:04 +03:00
|
|
|
// GetHeader retrieves a block header from the database by hash and number,
|
|
|
|
// caching it if found.
|
|
|
|
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
2015-12-16 05:26:23 +02:00
|
|
|
// Short circuit if the header's already in the cache, retrieve otherwise
|
|
|
|
if header, ok := hc.headerCache.Get(hash); ok {
|
|
|
|
return header.(*types.Header)
|
|
|
|
}
|
2018-05-07 14:35:06 +03:00
|
|
|
header := rawdb.ReadHeader(hc.chainDb, hash, number)
|
2015-12-16 05:26:23 +02:00
|
|
|
if header == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Cache the found header for next time and return
|
2016-03-10 20:19:09 +02:00
|
|
|
hc.headerCache.Add(hash, header)
|
2015-12-16 05:26:23 +02:00
|
|
|
return header
|
|
|
|
}
|
|
|
|
|
2016-04-05 16:22:04 +03:00
|
|
|
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
|
|
|
|
// found.
|
|
|
|
func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
|
2018-05-07 14:35:06 +03:00
|
|
|
number := hc.GetBlockNumber(hash)
|
|
|
|
if number == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return hc.GetHeader(hash, *number)
|
2016-04-05 16:22:04 +03:00
|
|
|
}
|
|
|
|
|
2017-09-09 19:03:07 +03:00
|
|
|
// HasHeader checks if a block header is present in the database or not.
|
|
|
|
func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool {
|
|
|
|
if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) {
|
|
|
|
return true
|
|
|
|
}
|
2018-05-07 14:35:06 +03:00
|
|
|
return rawdb.HasHeader(hc.chainDb, hash, number)
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetHeaderByNumber retrieves a block header from the database by number,
|
|
|
|
// caching it (associated with its hash) if found.
|
|
|
|
func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
|
2018-05-07 14:35:06 +03:00
|
|
|
hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
|
2015-12-16 05:26:23 +02:00
|
|
|
if hash == (common.Hash{}) {
|
|
|
|
return nil
|
|
|
|
}
|
2016-04-05 16:22:04 +03:00
|
|
|
return hc.GetHeader(hash, number)
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
2019-09-26 11:47:31 +03:00
|
|
|
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
|
|
|
|
return rawdb.ReadCanonicalHash(hc.chainDb, number)
|
|
|
|
}
|
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
// CurrentHeader retrieves the current head header of the canonical chain. The
|
|
|
|
// header is retrieved from the HeaderChain's internal cache.
|
|
|
|
func (hc *HeaderChain) CurrentHeader() *types.Header {
|
2018-02-26 12:53:10 +03:00
|
|
|
return hc.currentHeader.Load().(*types.Header)
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// SetCurrentHeader sets the current head header of the canonical chain.
|
|
|
|
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
2018-05-07 14:35:06 +03:00
|
|
|
rawdb.WriteHeadHeaderHash(hc.chainDb, head.Hash())
|
|
|
|
|
2018-02-26 12:53:10 +03:00
|
|
|
hc.currentHeader.Store(head)
|
2016-03-10 20:19:09 +02:00
|
|
|
hc.currentHeaderHash = head.Hash()
|
2019-06-10 14:21:02 +03:00
|
|
|
headHeaderGauge.Update(head.Number.Int64())
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
type (
|
|
|
|
// UpdateHeadBlocksCallback is a callback function that is called by SetHead
|
|
|
|
// before head header is updated.
|
|
|
|
UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header)
|
|
|
|
|
|
|
|
// DeleteBlockContentCallback is a callback function that is called by SetHead
|
|
|
|
// before each header is deleted.
|
|
|
|
DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64)
|
|
|
|
)
|
2015-12-16 05:26:23 +02:00
|
|
|
|
|
|
|
// SetHead rewinds the local chain to a new head. Everything above the new head
|
|
|
|
// will be deleted and the new one set.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
|
|
|
|
var (
|
|
|
|
parentHash common.Hash
|
|
|
|
batch = hc.chainDb.NewBatch()
|
|
|
|
)
|
2018-02-26 12:53:10 +03:00
|
|
|
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
hash, num := hdr.Hash(), hdr.Number.Uint64()
|
|
|
|
|
|
|
|
// Rewind block chain to new head.
|
|
|
|
parent := hc.GetHeader(hdr.ParentHash, num-1)
|
|
|
|
if parent == nil {
|
|
|
|
parent = hc.genesisHeader
|
|
|
|
}
|
|
|
|
parentHash = hdr.ParentHash
|
|
|
|
// Notably, since geth has the possibility for setting the head to a low
|
|
|
|
// height which is even lower than ancient head.
|
|
|
|
// In order to ensure that the head is always no higher than the data in
|
|
|
|
// the database(ancient store or active store), we need to update head
|
|
|
|
// first then remove the relative data from the database.
|
|
|
|
//
|
|
|
|
// Update head first(head fast block, head full block) before deleting the data.
|
|
|
|
if updateFn != nil {
|
|
|
|
updateFn(hc.chainDb, parent)
|
|
|
|
}
|
|
|
|
// Update head header then.
|
|
|
|
rawdb.WriteHeadHeaderHash(hc.chainDb, parentHash)
|
|
|
|
|
|
|
|
// Remove the relative data from the database.
|
2015-12-16 05:26:23 +02:00
|
|
|
if delFn != nil {
|
2018-07-02 11:16:30 +03:00
|
|
|
delFn(batch, hash, num)
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
// Rewind header chain to new head.
|
2018-07-02 11:16:30 +03:00
|
|
|
rawdb.DeleteHeader(batch, hash, num)
|
|
|
|
rawdb.DeleteTd(batch, hash, num)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
rawdb.DeleteCanonicalHash(batch, num)
|
2018-05-07 14:35:06 +03:00
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
hc.currentHeader.Store(parent)
|
|
|
|
hc.currentHeaderHash = parentHash
|
2019-06-10 14:21:02 +03:00
|
|
|
headHeaderGauge.Update(parent.Number.Int64())
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
2018-07-02 11:16:30 +03:00
|
|
|
batch.Write()
|
|
|
|
|
2015-12-16 05:26:23 +02:00
|
|
|
// Clear out any stale content from the caches
|
|
|
|
hc.headerCache.Purge()
|
|
|
|
hc.tdCache.Purge()
|
2016-04-05 16:22:04 +03:00
|
|
|
hc.numberCache.Purge()
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// SetGenesis sets a new genesis block header for the chain
|
|
|
|
func (hc *HeaderChain) SetGenesis(head *types.Header) {
|
|
|
|
hc.genesisHeader = head
|
|
|
|
}
|
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
// Config retrieves the header chain's chain configuration.
|
|
|
|
func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config }
|
2015-12-16 05:26:23 +02:00
|
|
|
|
2017-04-12 16:38:31 +03:00
|
|
|
// Engine retrieves the header chain's consensus engine.
|
|
|
|
func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine }
|
|
|
|
|
2017-04-05 01:16:29 +03:00
|
|
|
// GetBlock implements consensus.ChainReader, and returns nil for every input as
|
|
|
|
// a header chain does not have blocks available for retrieval.
|
|
|
|
func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
|
|
|
return nil
|
2015-12-16 05:26:23 +02:00
|
|
|
}
|