core: speed up header import (#21967)
This PR implements the following modifications - Don't shortcut check if block is present, thus avoid disk lookup - Don't check hash ancestry in early-check (it's still done in parallel checker) - Don't check time.Now for every single header Charts and background info can be found here: https://github.com/holiman/headerimport/blob/main/README.md With these changes, writing 1M headers goes down to from 80s to 62s.
This commit is contained in:
parent
14d495491d
commit
681618275c
@ -39,11 +39,11 @@ import (
|
|||||||
|
|
||||||
// Ethash proof-of-work protocol constants.
|
// Ethash proof-of-work protocol constants.
|
||||||
var (
|
var (
|
||||||
FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||||
ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||||
ConstantinopleBlockReward = big.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople
|
ConstantinopleBlockReward = big.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople
|
||||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||||
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
|
||||||
|
|
||||||
// calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
|
// calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
|
||||||
// It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
|
// It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
|
||||||
@ -102,7 +102,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *ty
|
|||||||
return consensus.ErrUnknownAncestor
|
return consensus.ErrUnknownAncestor
|
||||||
}
|
}
|
||||||
// Sanity checks passed, do a proper verification
|
// Sanity checks passed, do a proper verification
|
||||||
return ethash.verifyHeader(chain, header, parent, false, seal)
|
return ethash.verifyHeader(chain, header, parent, false, seal, time.Now().Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||||
@ -126,15 +126,16 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers [
|
|||||||
|
|
||||||
// Create a task channel and spawn the verifiers
|
// Create a task channel and spawn the verifiers
|
||||||
var (
|
var (
|
||||||
inputs = make(chan int)
|
inputs = make(chan int)
|
||||||
done = make(chan int, workers)
|
done = make(chan int, workers)
|
||||||
errors = make([]error, len(headers))
|
errors = make([]error, len(headers))
|
||||||
abort = make(chan struct{})
|
abort = make(chan struct{})
|
||||||
|
unixNow = time.Now().Unix()
|
||||||
)
|
)
|
||||||
for i := 0; i < workers; i++ {
|
for i := 0; i < workers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
for index := range inputs {
|
for index := range inputs {
|
||||||
errors[index] = ethash.verifyHeaderWorker(chain, headers, seals, index)
|
errors[index] = ethash.verifyHeaderWorker(chain, headers, seals, index, unixNow)
|
||||||
done <- index
|
done <- index
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -170,7 +171,7 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers [
|
|||||||
return abort, errorsOut
|
return abort, errorsOut
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error {
|
func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int, unixNow int64) error {
|
||||||
var parent *types.Header
|
var parent *types.Header
|
||||||
if index == 0 {
|
if index == 0 {
|
||||||
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
||||||
@ -180,10 +181,7 @@ func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, head
|
|||||||
if parent == nil {
|
if parent == nil {
|
||||||
return consensus.ErrUnknownAncestor
|
return consensus.ErrUnknownAncestor
|
||||||
}
|
}
|
||||||
if chain.GetHeader(headers[index].Hash(), headers[index].Number.Uint64()) != nil {
|
return ethash.verifyHeader(chain, headers[index], parent, false, seals[index], unixNow)
|
||||||
return nil // known block
|
|
||||||
}
|
|
||||||
return ethash.verifyHeader(chain, headers[index], parent, false, seals[index])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||||
@ -234,7 +232,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
|||||||
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == block.ParentHash() {
|
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == block.ParentHash() {
|
||||||
return errDanglingUncle
|
return errDanglingUncle
|
||||||
}
|
}
|
||||||
if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, true); err != nil {
|
if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, true, time.Now().Unix()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -244,14 +242,14 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
|||||||
// verifyHeader checks whether a header conforms to the consensus rules of the
|
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||||
// stock Ethereum ethash engine.
|
// stock Ethereum ethash engine.
|
||||||
// See YP section 4.3.4. "Block Header Validity"
|
// See YP section 4.3.4. "Block Header Validity"
|
||||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error {
|
func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool, unixNow int64) error {
|
||||||
// Ensure that the header's extra-data section is of a reasonable size
|
// Ensure that the header's extra-data section is of a reasonable size
|
||||||
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
||||||
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
||||||
}
|
}
|
||||||
// Verify the header's timestamp
|
// Verify the header's timestamp
|
||||||
if !uncle {
|
if !uncle {
|
||||||
if header.Time > uint64(time.Now().Add(allowedFutureBlockTime).Unix()) {
|
if header.Time > uint64(unixNow+allowedFutureBlockTimeSeconds) {
|
||||||
return consensus.ErrFutureBlock
|
return consensus.ErrFutureBlock
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -299,17 +299,18 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
|
|||||||
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
||||||
// Do a sanity check that the provided chain is actually ordered and linked
|
// Do a sanity check that the provided chain is actually ordered and linked
|
||||||
for i := 1; i < len(chain); i++ {
|
for i := 1; i < len(chain); i++ {
|
||||||
parentHash := chain[i-1].Hash()
|
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 {
|
||||||
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != parentHash {
|
hash := chain[i].Hash()
|
||||||
|
parentHash := chain[i-1].Hash()
|
||||||
// Chain broke ancestry, log a message (programming error) and skip insertion
|
// Chain broke ancestry, log a message (programming error) and skip insertion
|
||||||
log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(),
|
log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash,
|
||||||
"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash)
|
"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash)
|
||||||
|
|
||||||
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number,
|
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number,
|
||||||
parentHash.Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4])
|
parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4])
|
||||||
}
|
}
|
||||||
// If the header is a banned one, straight out abort
|
// If the header is a banned one, straight out abort
|
||||||
if BadHashes[parentHash] {
|
if BadHashes[chain[i].ParentHash] {
|
||||||
return i - 1, ErrBlacklistedHash
|
return i - 1, ErrBlacklistedHash
|
||||||
}
|
}
|
||||||
// If it's the last header in the cunk, we need to check it too
|
// If it's the last header in the cunk, we need to check it too
|
||||||
|
@ -103,6 +103,11 @@ type freezerTable struct {
|
|||||||
lock sync.RWMutex // Mutex protecting the data file descriptors
|
lock sync.RWMutex // Mutex protecting the data file descriptors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewFreezerTable opens the given path as a freezer table.
|
||||||
|
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
|
||||||
|
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, disableSnappy)
|
||||||
|
}
|
||||||
|
|
||||||
// newTable opens a freezer table with default settings - 2G files
|
// newTable opens a freezer table with default settings - 2G files
|
||||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
|
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
|
||||||
return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
|
return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
|
||||||
|
Loading…
Reference in New Issue
Block a user