2018-05-07 14:35:06 +03:00
|
|
|
// Copyright 2018 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package rawdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2021-09-28 13:54:49 +03:00
|
|
|
"errors"
|
2021-09-07 13:31:17 +03:00
|
|
|
"fmt"
|
2018-05-07 14:35:06 +03:00
|
|
|
"math/big"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2023-07-27 14:11:09 +03:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
2018-05-07 14:35:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2019-11-19 13:32:57 +03:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2018-09-24 15:57:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2018-05-07 14:35:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2019-03-27 19:11:24 +03:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2018-05-07 14:35:06 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2023-06-20 12:58:47 +03:00
|
|
|
"golang.org/x/exp/slices"
|
2018-05-07 14:35:06 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
|
2021-10-25 17:24:27 +03:00
|
|
|
var data []byte
|
2022-05-06 14:28:42 +03:00
|
|
|
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
|
2023-02-21 13:17:34 +03:00
|
|
|
data, _ = reader.Ancient(ChainFreezerHashTable, number)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
if len(data) == 0 {
|
2021-10-25 17:24:27 +03:00
|
|
|
// Get it by hash from leveldb
|
|
|
|
data, _ = db.Get(headerHashKey(number))
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-10-25 17:24:27 +03:00
|
|
|
return nil
|
|
|
|
})
|
2018-05-07 14:35:06 +03:00
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteCanonicalHash stores the hash assigned to a canonical block number.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to store number to hash mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteCanonicalHash removes the number to hash canonical mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Delete(headerHashKey(number)); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to delete number to hash mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
2019-03-08 16:56:20 +03:00
|
|
|
// both canonical and reorged forks included.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
|
2019-03-08 16:56:20 +03:00
|
|
|
prefix := headerKeyPrefix(number)
|
|
|
|
|
|
|
|
hashes := make([]common.Hash, 0, 1)
|
2020-04-15 14:08:53 +03:00
|
|
|
it := db.NewIterator(prefix, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
defer it.Release()
|
|
|
|
|
2019-03-08 16:56:20 +03:00
|
|
|
for it.Next() {
|
|
|
|
if key := it.Key(); len(key) == len(prefix)+32 {
|
|
|
|
hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|
|
|
|
|
2021-09-07 13:31:17 +03:00
|
|
|
type NumberHash struct {
|
|
|
|
Number uint64
|
|
|
|
Hash common.Hash
|
|
|
|
}
|
|
|
|
|
2022-03-10 11:37:23 +03:00
|
|
|
// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
|
|
|
|
// heights, both canonical and reorged forks included.
|
2021-09-07 13:31:17 +03:00
|
|
|
// This method considers both limits to be _inclusive_.
|
|
|
|
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
|
|
|
|
var (
|
|
|
|
start = encodeBlockNumber(first)
|
|
|
|
keyLength = len(headerPrefix) + 8 + 32
|
|
|
|
hashes = make([]*NumberHash, 0, 1+last-first)
|
|
|
|
it = db.NewIterator(headerPrefix, start)
|
|
|
|
)
|
|
|
|
defer it.Release()
|
|
|
|
for it.Next() {
|
|
|
|
key := it.Key()
|
|
|
|
if len(key) != keyLength {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8])
|
|
|
|
if num > last {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
hash := common.BytesToHash(key[len(key)-32:])
|
|
|
|
hashes = append(hashes, &NumberHash{num, hash})
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|
|
|
|
|
2020-07-13 12:02:54 +03:00
|
|
|
// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
|
|
|
|
// certain chain range. If the accumulated entries reaches the given threshold,
|
|
|
|
// abort the iteration and return the semi-finish result.
|
|
|
|
func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
|
|
|
|
// Short circuit if the limit is 0.
|
|
|
|
if limit == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
var (
|
|
|
|
numbers []uint64
|
|
|
|
hashes []common.Hash
|
|
|
|
)
|
|
|
|
// Construct the key prefix of start point.
|
|
|
|
start, end := headerHashKey(from), headerHashKey(to)
|
|
|
|
it := db.NewIterator(nil, start)
|
|
|
|
defer it.Release()
|
|
|
|
|
|
|
|
for it.Next() {
|
|
|
|
if bytes.Compare(it.Key(), end) >= 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
|
|
|
|
numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
|
|
|
|
hashes = append(hashes, common.BytesToHash(it.Value()))
|
|
|
|
// If the accumulated entries reaches the limit threshold, return.
|
|
|
|
if len(numbers) >= limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return numbers, hashes
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// ReadHeaderNumber returns the header number assigned to a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
|
2018-06-11 16:06:26 +03:00
|
|
|
data, _ := db.Get(headerNumberKey(hash))
|
2018-05-07 14:35:06 +03:00
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
// WriteHeaderNumber stores the hash->number mapping.
|
|
|
|
func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
key := headerNumberKey(hash)
|
|
|
|
enc := encodeBlockNumber(number)
|
|
|
|
if err := db.Put(key, enc); err != nil {
|
|
|
|
log.Crit("Failed to store hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteHeaderNumber removes hash->number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
|
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
|
2018-05-07 14:35:06 +03:00
|
|
|
data, _ := db.Get(headHeaderKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadHeaderHash stores the hash of the current canonical head header.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
2018-05-07 14:35:06 +03:00
|
|
|
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last header's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
|
2018-05-07 14:35:06 +03:00
|
|
|
data, _ := db.Get(headBlockKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadBlockHash stores the head block's hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
2018-05-07 14:35:06 +03:00
|
|
|
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last block's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
|
2018-05-07 14:35:06 +03:00
|
|
|
data, _ := db.Get(headFastBlockKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
2018-05-07 14:35:06 +03:00
|
|
|
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last fast block's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-18 17:30:42 +03:00
|
|
|
// ReadFinalizedBlockHash retrieves the hash of the finalized block.
|
|
|
|
func ReadFinalizedBlockHash(db ethdb.KeyValueReader) common.Hash {
|
|
|
|
data, _ := db.Get(headFinalizedBlockKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteFinalizedBlockHash stores the hash of the finalized block.
|
|
|
|
func WriteFinalizedBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
|
if err := db.Put(headFinalizedBlockKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last finalized block's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-20 13:01:24 +03:00
|
|
|
// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
|
|
|
|
// full synced, the last pivot will always be nil.
|
|
|
|
func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(lastPivotKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var pivot uint64
|
|
|
|
if err := rlp.DecodeBytes(data, &pivot); err != nil {
|
|
|
|
log.Error("Invalid pivot block number in database", "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &pivot
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteLastPivotNumber stores the number of the last pivot block.
|
|
|
|
func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
|
|
|
|
enc, err := rlp.EncodeToBytes(pivot)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode pivot block number", "err", err)
|
|
|
|
}
|
|
|
|
if err := db.Put(lastPivotKey, enc); err != nil {
|
|
|
|
log.Crit("Failed to store pivot block number", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-11 18:58:43 +03:00
|
|
|
// ReadTxIndexTail retrieves the number of oldest indexed block
|
2022-09-29 10:50:24 +03:00
|
|
|
// whose transaction indices has been indexed.
|
2020-05-11 18:58:43 +03:00
|
|
|
func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(txIndexTailKey)
|
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTxIndexTail stores the number of oldest indexed block
|
|
|
|
// into database.
|
|
|
|
func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
|
|
|
|
if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
|
|
|
|
log.Crit("Failed to store the transaction index tail", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
|
|
|
|
func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(fastTxLookupLimitKey)
|
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
|
|
|
|
func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
|
|
|
|
if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
|
|
|
|
log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
// ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going
|
|
|
|
// backwards towards genesis. This method assumes that the caller already has
|
|
|
|
// placed a cap on count, to prevent DoS issues.
|
|
|
|
// Since this method operates in head-towards-genesis mode, it will return an empty
|
|
|
|
// slice in case the head ('number') is missing. Hence, the caller must ensure that
|
|
|
|
// the head ('number') argument is actually an existing header.
|
|
|
|
//
|
|
|
|
// N.B: Since the input is a number, as opposed to a hash, it's implicit that
|
|
|
|
// this method only operates on canon headers.
|
|
|
|
func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValue {
|
|
|
|
var rlpHeaders []rlp.RawValue
|
|
|
|
if count == 0 {
|
|
|
|
return rlpHeaders
|
|
|
|
}
|
|
|
|
i := number
|
|
|
|
if count-1 > number {
|
|
|
|
// It's ok to request block 0, 1 item
|
|
|
|
count = number + 1
|
|
|
|
}
|
|
|
|
limit, _ := db.Ancients()
|
|
|
|
// First read live blocks
|
|
|
|
if i >= limit {
|
|
|
|
// If we need to read live blocks, we need to figure out the hash first
|
|
|
|
hash := ReadCanonicalHash(db, number)
|
|
|
|
for ; i >= limit && count > 0; i-- {
|
|
|
|
if data, _ := db.Get(headerKey(i, hash)); len(data) > 0 {
|
|
|
|
rlpHeaders = append(rlpHeaders, data)
|
|
|
|
// Get the parent hash for next query
|
|
|
|
hash = types.HeaderParentHashFromRLP(data)
|
|
|
|
} else {
|
|
|
|
break // Maybe got moved to ancients
|
|
|
|
}
|
|
|
|
count--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if count == 0 {
|
|
|
|
return rlpHeaders
|
|
|
|
}
|
|
|
|
// read remaining from ancients
|
2023-10-31 14:04:45 +03:00
|
|
|
data, err := db.AncientRange(ChainFreezerHeaderTable, i+1-count, count, 0)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Failed to read headers from freezer", "err", err)
|
|
|
|
return rlpHeaders
|
|
|
|
}
|
|
|
|
if uint64(len(data)) != count {
|
|
|
|
log.Warn("Incomplete read of headers from freezer", "wanted", count, "read", len(data))
|
|
|
|
return rlpHeaders
|
|
|
|
}
|
|
|
|
// The data is on the order [h, h+1, .., n] -- reordering needed
|
|
|
|
for i := range data {
|
|
|
|
rlpHeaders = append(rlpHeaders, data[len(data)-1-i])
|
core, eth: improve delivery speed on header requests (#23105)
This PR reduces the amount of work we do when answering header queries, e.g. when a peer
is syncing from us.
For some items, e.g block bodies, when we read the rlp-data from database, we plug it
directly into the response package. We didn't do that for headers, but instead read
headers-rlp, decode to types.Header, and re-encode to rlp. This PR changes that to keep it
in RLP-form as much as possible. When a node is syncing from us, it typically requests 192
contiguous headers. On master it has the following effect:
- For headers not in ancient: 2 db lookups. One for translating hash->number (even though
the request is by number), and another for reading by hash (this latter one is sometimes
cached).
- For headers in ancient: 1 file lookup/syscall for translating hash->number (even though
the request is by number), and another for reading the header itself. After this, it
also performes a hashing of the header, to ensure that the hash is what it expected. In
this PR, I instead move the logic for "give me a sequence of blocks" into the lower
layers, where the database can determine how and what to read from leveldb and/or
ancients.
There are basically four types of requests; three of them are improved this way. The
fourth, by hash going backwards, is more tricky to optimize. However, since we know that
the gap is 0, we can look up by the parentHash, and stlil shave off all the number->hash
lookups.
The gapped collection can be optimized similarly, as a follow-up, at least in three out of
four cases.
Co-authored-by: Felix Lange <fjl@twurst.com>
2021-12-07 19:50:58 +03:00
|
|
|
}
|
|
|
|
return rlpHeaders
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
2021-10-25 17:24:27 +03:00
|
|
|
var data []byte
|
2022-05-06 14:28:42 +03:00
|
|
|
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
|
2021-10-25 17:24:27 +03:00
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
2023-02-21 13:17:34 +03:00
|
|
|
data, _ = reader.Ancient(ChainFreezerHeaderTable, number)
|
2021-10-25 17:24:27 +03:00
|
|
|
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// If not, try reading from leveldb
|
|
|
|
data, _ = db.Get(headerKey(number, hash))
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return data
|
2018-05-07 14:35:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// HasHeader verifies the existence of a block header corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
2021-10-25 17:24:27 +03:00
|
|
|
if isCanon(db, number, hash) {
|
2019-03-08 16:56:20 +03:00
|
|
|
return true
|
|
|
|
}
|
2018-06-11 16:06:26 +03:00
|
|
|
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeader retrieves the block header corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
|
2018-05-07 14:35:06 +03:00
|
|
|
data := ReadHeaderRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
header := new(types.Header)
|
2023-08-24 11:47:42 +03:00
|
|
|
if err := rlp.DecodeBytes(data, header); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Error("Invalid block header RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return header
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeader stores a block header into the database and also stores the hash-
|
|
|
|
// to-number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
|
2018-05-07 14:35:06 +03:00
|
|
|
var (
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
hash = header.Hash()
|
|
|
|
number = header.Number.Uint64()
|
2018-05-07 14:35:06 +03:00
|
|
|
)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
// Write the hash -> number mapping
|
|
|
|
WriteHeaderNumber(db, hash, number)
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// Write the encoded header
|
|
|
|
data, err := rlp.EncodeToBytes(header)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode header", "err", err)
|
|
|
|
}
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
|
|
|
key := headerKey(number, hash)
|
2018-05-07 14:35:06 +03:00
|
|
|
if err := db.Put(key, data); err != nil {
|
|
|
|
log.Crit("Failed to store header", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteHeader removes all block header data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-09-24 15:57:49 +03:00
|
|
|
deleteHeaderWithoutNumber(db, hash, number)
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to delete hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 15:57:49 +03:00
|
|
|
// deleteHeaderWithoutNumber removes only the block header but does not remove
|
|
|
|
// the hash to number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-09-24 15:57:49 +03:00
|
|
|
if err := db.Delete(headerKey(number, hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete header", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-25 17:24:27 +03:00
|
|
|
// isCanon is an internal utility method, to check whether the given number/hash
|
|
|
|
// is part of the ancient (canon) set.
|
2022-05-06 14:28:42 +03:00
|
|
|
func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool {
|
2023-02-21 13:17:34 +03:00
|
|
|
h, err := reader.Ancient(ChainFreezerHashTable, number)
|
2021-10-25 17:24:27 +03:00
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return bytes.Equal(h, hash[:])
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
2019-11-19 13:32:57 +03:00
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
2021-10-25 17:24:27 +03:00
|
|
|
var data []byte
|
2022-05-06 14:28:42 +03:00
|
|
|
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
|
2021-10-25 17:24:27 +03:00
|
|
|
// Check if the data is in ancients
|
|
|
|
if isCanon(reader, number, hash) {
|
2023-02-21 13:17:34 +03:00
|
|
|
data, _ = reader.Ancient(ChainFreezerBodiesTable, number)
|
2021-10-25 17:24:27 +03:00
|
|
|
return nil
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-10-25 17:24:27 +03:00
|
|
|
// If not, try reading from leveldb
|
|
|
|
data, _ = db.Get(blockBodyKey(number, hash))
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return data
|
2018-05-07 14:35:06 +03:00
|
|
|
}
|
|
|
|
|
2020-05-11 18:58:43 +03:00
|
|
|
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
|
|
|
|
// block at number, in RLP encoding.
|
|
|
|
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
|
2021-10-25 17:24:27 +03:00
|
|
|
var data []byte
|
2022-05-06 14:28:42 +03:00
|
|
|
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
|
2023-02-21 13:17:34 +03:00
|
|
|
data, _ = reader.Ancient(ChainFreezerBodiesTable, number)
|
2021-10-25 17:24:27 +03:00
|
|
|
if len(data) > 0 {
|
|
|
|
return nil
|
2020-05-11 18:58:43 +03:00
|
|
|
}
|
2022-01-04 21:02:37 +03:00
|
|
|
// Block is not in ancients, read from leveldb by hash and number.
|
|
|
|
// Note: ReadCanonicalHash cannot be used here because it also
|
|
|
|
// calls ReadAncients internally.
|
|
|
|
hash, _ := db.Get(headerHashKey(number))
|
|
|
|
data, _ = db.Get(blockBodyKey(number, common.BytesToHash(hash)))
|
2021-10-25 17:24:27 +03:00
|
|
|
return nil
|
|
|
|
})
|
2020-05-11 18:58:43 +03:00
|
|
|
return data
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// WriteBodyRLP stores an RLP encoded block body into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to store block body", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasBody verifies the existence of a block body corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
2021-10-25 17:24:27 +03:00
|
|
|
if isCanon(db, number, hash) {
|
2019-03-08 16:56:20 +03:00
|
|
|
return true
|
|
|
|
}
|
2018-06-11 16:06:26 +03:00
|
|
|
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadBody retrieves the block body corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
|
2018-05-07 14:35:06 +03:00
|
|
|
data := ReadBodyRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
body := new(types.Body)
|
2023-08-24 11:47:42 +03:00
|
|
|
if err := rlp.DecodeBytes(data, body); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Error("Invalid block body RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return body
|
|
|
|
}
|
|
|
|
|
2019-04-26 12:22:21 +03:00
|
|
|
// WriteBody stores a block body into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
|
2018-05-07 14:35:06 +03:00
|
|
|
data, err := rlp.EncodeToBytes(body)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode body", "err", err)
|
|
|
|
}
|
|
|
|
WriteBodyRLP(db, hash, number, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBody removes all block body data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to delete block body", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 15:57:49 +03:00
|
|
|
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
2021-10-25 17:24:27 +03:00
|
|
|
var data []byte
|
2022-05-06 14:28:42 +03:00
|
|
|
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
|
2021-10-25 17:24:27 +03:00
|
|
|
// Check if the data is in ancients
|
|
|
|
if isCanon(reader, number, hash) {
|
2023-02-21 13:17:34 +03:00
|
|
|
data, _ = reader.Ancient(ChainFreezerDifficultyTable, number)
|
2021-10-25 17:24:27 +03:00
|
|
|
return nil
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-10-25 17:24:27 +03:00
|
|
|
// If not, try reading from leveldb
|
|
|
|
data, _ = db.Get(headerTDKey(number, hash))
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return data
|
2018-09-24 15:57:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
|
2018-09-24 15:57:49 +03:00
|
|
|
data := ReadTdRLP(db, hash, number)
|
2018-05-07 14:35:06 +03:00
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
td := new(big.Int)
|
2023-08-24 11:47:42 +03:00
|
|
|
if err := rlp.DecodeBytes(data, td); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return td
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTd stores the total difficulty of a block into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
|
2018-05-07 14:35:06 +03:00
|
|
|
data, err := rlp.EncodeToBytes(td)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode block total difficulty", "err", err)
|
|
|
|
}
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Put(headerTDKey(number, hash), data); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to store block total difficulty", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteTd removes all block total difficulty data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Delete(headerTDKey(number, hash)); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to delete block total difficulty", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-16 14:15:05 +03:00
|
|
|
// HasReceipts verifies the existence of all the transaction receipts belonging
|
|
|
|
// to a block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
2021-10-25 17:24:27 +03:00
|
|
|
if isCanon(db, number, hash) {
|
2019-03-08 16:56:20 +03:00
|
|
|
return true
|
|
|
|
}
|
2018-11-16 14:15:05 +03:00
|
|
|
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-09-24 15:57:49 +03:00
|
|
|
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
2021-10-25 17:24:27 +03:00
|
|
|
var data []byte
|
2022-05-06 14:28:42 +03:00
|
|
|
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
|
2021-10-25 17:24:27 +03:00
|
|
|
// Check if the data is in ancients
|
|
|
|
if isCanon(reader, number, hash) {
|
2023-02-21 13:17:34 +03:00
|
|
|
data, _ = reader.Ancient(ChainFreezerReceiptTable, number)
|
2021-10-25 17:24:27 +03:00
|
|
|
return nil
|
2019-11-19 13:32:57 +03:00
|
|
|
}
|
2021-10-25 17:24:27 +03:00
|
|
|
// If not, try reading from leveldb
|
|
|
|
data, _ = db.Get(blockReceiptsKey(number, hash))
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return data
|
2018-09-24 15:57:49 +03:00
|
|
|
}
|
|
|
|
|
2019-03-27 19:11:24 +03:00
|
|
|
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
|
|
|
|
// The receipt metadata fields are not guaranteed to be populated, so they
|
|
|
|
// should not be used. Use ReadReceipts instead if the metadata is needed.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
|
2018-05-07 14:35:06 +03:00
|
|
|
// Retrieve the flattened receipt slice
|
2018-09-24 15:57:49 +03:00
|
|
|
data := ReadReceiptsRLP(db, hash, number)
|
2018-05-07 14:35:06 +03:00
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-19 16:33:27 +03:00
|
|
|
// Convert the receipts from their storage form to their internal representation
|
2018-05-07 14:35:06 +03:00
|
|
|
storageReceipts := []*types.ReceiptForStorage{}
|
|
|
|
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
|
|
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-15 12:36:27 +03:00
|
|
|
receipts := make(types.Receipts, len(storageReceipts))
|
|
|
|
for i, storageReceipt := range storageReceipts {
|
|
|
|
receipts[i] = (*types.Receipt)(storageReceipt)
|
2018-05-07 14:35:06 +03:00
|
|
|
}
|
|
|
|
return receipts
|
|
|
|
}
|
|
|
|
|
2019-03-27 19:11:24 +03:00
|
|
|
// ReadReceipts retrieves all the transaction receipts belonging to a block, including
|
2022-04-25 10:28:03 +03:00
|
|
|
// its corresponding metadata fields. If it is unable to populate these metadata
|
2019-03-27 19:11:24 +03:00
|
|
|
// fields then nil is returned.
|
|
|
|
//
|
|
|
|
// The current implementation populates these metadata fields by reading the receipts'
|
|
|
|
// corresponding block body, so if the block body is not found it will return nil even
|
|
|
|
// if the receipt itself is stored.
|
2023-04-21 12:52:02 +03:00
|
|
|
func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64, config *params.ChainConfig) types.Receipts {
|
2019-04-15 12:36:27 +03:00
|
|
|
// We're deriving many fields from the block body, retrieve beside the receipt
|
2019-03-27 19:11:24 +03:00
|
|
|
receipts := ReadRawReceipts(db, hash, number)
|
|
|
|
if receipts == nil {
|
2019-04-15 12:36:27 +03:00
|
|
|
return nil
|
2019-03-27 19:11:24 +03:00
|
|
|
}
|
|
|
|
body := ReadBody(db, hash, number)
|
|
|
|
if body == nil {
|
|
|
|
log.Error("Missing body but have receipt", "hash", hash, "number", number)
|
|
|
|
return nil
|
|
|
|
}
|
2023-03-06 19:19:41 +03:00
|
|
|
header := ReadHeader(db, hash, number)
|
2023-07-27 14:11:09 +03:00
|
|
|
|
2023-03-06 19:19:41 +03:00
|
|
|
var baseFee *big.Int
|
|
|
|
if header == nil {
|
|
|
|
baseFee = big.NewInt(0)
|
|
|
|
} else {
|
|
|
|
baseFee = header.BaseFee
|
|
|
|
}
|
2023-08-01 10:07:25 +03:00
|
|
|
// Compute effective blob gas price.
|
2023-07-27 16:53:28 +03:00
|
|
|
var blobGasPrice *big.Int
|
|
|
|
if header != nil && header.ExcessBlobGas != nil {
|
|
|
|
blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas)
|
2023-07-27 14:11:09 +03:00
|
|
|
}
|
2023-07-27 16:53:28 +03:00
|
|
|
if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil {
|
2019-04-15 12:36:27 +03:00
|
|
|
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
|
2019-03-27 19:11:24 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return receipts
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// WriteReceipts stores all the transaction receipts belonging to a block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
|
2018-05-07 14:35:06 +03:00
|
|
|
// Convert the receipts into their storage form and serialize them
|
|
|
|
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
|
|
|
|
for i, receipt := range receipts {
|
|
|
|
storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
|
|
|
|
}
|
|
|
|
bytes, err := rlp.EncodeToBytes(storageReceipts)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode block receipts", "err", err)
|
|
|
|
}
|
|
|
|
// Store the flattened receipt slice
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to store block receipts", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteReceipts removes all receipt data associated with a block hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-06-11 16:06:26 +03:00
|
|
|
if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
|
2018-05-07 14:35:06 +03:00
|
|
|
log.Crit("Failed to delete block receipts", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-28 13:54:49 +03:00
|
|
|
// storedReceiptRLP is the storage encoding of a receipt.
|
|
|
|
// Re-definition in core/types/receipt.go.
|
2022-12-03 23:42:11 +03:00
|
|
|
// TODO: Re-use the existing definition.
|
2021-09-28 13:54:49 +03:00
|
|
|
type storedReceiptRLP struct {
|
|
|
|
PostStateOrStatus []byte
|
|
|
|
CumulativeGasUsed uint64
|
2022-12-03 23:42:11 +03:00
|
|
|
Logs []*types.Log
|
2021-09-28 13:54:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
|
|
|
|
// the list of logs. When decoding a stored receipt into this object we
|
|
|
|
// avoid creating the bloom filter.
|
|
|
|
type receiptLogs struct {
|
|
|
|
Logs []*types.Log
|
|
|
|
}
|
|
|
|
|
|
|
|
// DecodeRLP implements rlp.Decoder.
|
|
|
|
func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
|
|
|
|
var stored storedReceiptRLP
|
|
|
|
if err := s.Decode(&stored); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-12-03 23:42:11 +03:00
|
|
|
r.Logs = stored.Logs
|
2021-09-28 13:54:49 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
|
|
|
|
func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
|
|
|
|
logIndex := uint(0)
|
|
|
|
if len(txs) != len(receipts) {
|
|
|
|
return errors.New("transaction and receipt count mismatch")
|
|
|
|
}
|
|
|
|
for i := 0; i < len(receipts); i++ {
|
|
|
|
txHash := txs[i].Hash()
|
|
|
|
// The derived log fields can simply be set from the block and transaction
|
|
|
|
for j := 0; j < len(receipts[i].Logs); j++ {
|
|
|
|
receipts[i].Logs[j].BlockNumber = number
|
|
|
|
receipts[i].Logs[j].BlockHash = hash
|
|
|
|
receipts[i].Logs[j].TxHash = txHash
|
|
|
|
receipts[i].Logs[j].TxIndex = uint(i)
|
|
|
|
receipts[i].Logs[j].Index = logIndex
|
|
|
|
logIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-02-13 12:59:27 +03:00
|
|
|
// ReadLogs retrieves the logs for all transactions in a block. In case
|
|
|
|
// receipts is not found, a nil is returned.
|
|
|
|
// Note: ReadLogs does not derive unstored log fields.
|
2023-08-10 13:49:05 +03:00
|
|
|
func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
|
2021-09-28 13:54:49 +03:00
|
|
|
// Retrieve the flattened receipt slice
|
|
|
|
data := ReadReceiptsRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
receipts := []*receiptLogs{}
|
|
|
|
if err := rlp.DecodeBytes(data, &receipts); err != nil {
|
2021-12-14 13:11:05 +03:00
|
|
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
2021-09-28 13:54:49 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
logs := make([][]*types.Log, len(receipts))
|
|
|
|
for i, receipt := range receipts {
|
|
|
|
logs[i] = receipt.Logs
|
|
|
|
}
|
|
|
|
return logs
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// ReadBlock retrieves an entire block corresponding to the hash, assembling it
|
|
|
|
// back from the stored header and body. If either the header or body could not
|
|
|
|
// be retrieved nil is returned.
|
|
|
|
//
|
|
|
|
// Note, due to concurrent download of header and block body the header and thus
|
|
|
|
// canonical hash can be stored in the database but the body data not (yet).
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
|
2018-05-07 14:35:06 +03:00
|
|
|
header := ReadHeader(db, hash, number)
|
|
|
|
if header == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
body := ReadBody(db, hash, number)
|
|
|
|
if body == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2023-01-25 17:32:25 +03:00
|
|
|
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles).WithWithdrawals(body.Withdrawals)
|
2018-05-07 14:35:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// WriteBlock serializes a block into the database, header and body separately.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
|
2018-05-07 14:35:06 +03:00
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
}
|
|
|
|
|
2022-03-10 11:37:23 +03:00
|
|
|
// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
|
2021-09-07 13:31:17 +03:00
|
|
|
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
|
|
|
var (
|
|
|
|
tdSum = new(big.Int).Set(td)
|
|
|
|
stReceipts []*types.ReceiptForStorage
|
|
|
|
)
|
|
|
|
return db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
|
|
|
for i, block := range blocks {
|
|
|
|
// Convert receipts to storage format and sum up total difficulty.
|
|
|
|
stReceipts = stReceipts[:0]
|
|
|
|
for _, receipt := range receipts[i] {
|
|
|
|
stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt))
|
|
|
|
}
|
|
|
|
header := block.Header()
|
|
|
|
if i > 0 {
|
|
|
|
tdSum.Add(tdSum, header.Difficulty)
|
|
|
|
}
|
|
|
|
if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
|
|
|
|
num := block.NumberU64()
|
2023-02-21 13:17:34 +03:00
|
|
|
if err := op.AppendRaw(ChainFreezerHashTable, num, block.Hash().Bytes()); err != nil {
|
2021-09-07 13:31:17 +03:00
|
|
|
return fmt.Errorf("can't add block %d hash: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2023-02-21 13:17:34 +03:00
|
|
|
if err := op.Append(ChainFreezerHeaderTable, num, header); err != nil {
|
2021-09-07 13:31:17 +03:00
|
|
|
return fmt.Errorf("can't append block header %d: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2023-02-21 13:17:34 +03:00
|
|
|
if err := op.Append(ChainFreezerBodiesTable, num, block.Body()); err != nil {
|
2021-09-07 13:31:17 +03:00
|
|
|
return fmt.Errorf("can't append block body %d: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2023-02-21 13:17:34 +03:00
|
|
|
if err := op.Append(ChainFreezerReceiptTable, num, receipts); err != nil {
|
2021-09-07 13:31:17 +03:00
|
|
|
return fmt.Errorf("can't append block %d receipts: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2023-02-21 13:17:34 +03:00
|
|
|
if err := op.Append(ChainFreezerDifficultyTable, num, td); err != nil {
|
2021-09-07 13:31:17 +03:00
|
|
|
return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
2021-09-07 13:31:17 +03:00
|
|
|
return nil
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// DeleteBlock removes all block data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-05-07 14:35:06 +03:00
|
|
|
DeleteReceipts(db, hash, number)
|
|
|
|
DeleteHeader(db, hash, number)
|
|
|
|
DeleteBody(db, hash, number)
|
|
|
|
DeleteTd(db, hash, number)
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
// DeleteBlockWithoutNumber removes all block data associated with a hash, except
|
2018-09-24 15:57:49 +03:00
|
|
|
// the hash to number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-09-24 15:57:49 +03:00
|
|
|
DeleteReceipts(db, hash, number)
|
|
|
|
deleteHeaderWithoutNumber(db, hash, number)
|
|
|
|
DeleteBody(db, hash, number)
|
|
|
|
DeleteTd(db, hash, number)
|
|
|
|
}
|
|
|
|
|
2021-01-10 14:54:15 +03:00
|
|
|
const badBlockToKeep = 10
|
|
|
|
|
|
|
|
type badBlock struct {
|
|
|
|
Header *types.Header
|
|
|
|
Body *types.Body
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadBadBlock retrieves the bad block with the corresponding block hash.
|
|
|
|
func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2023-06-20 12:58:47 +03:00
|
|
|
var badBlocks []*badBlock
|
2021-01-10 14:54:15 +03:00
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for _, bad := range badBlocks {
|
|
|
|
if bad.Header.Hash() == hash {
|
2023-01-25 17:32:25 +03:00
|
|
|
return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals)
|
2021-01-10 14:54:15 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAllBadBlocks retrieves all the bad blocks in the database.
|
|
|
|
// All returned blocks are sorted in reverse order by number.
|
|
|
|
func ReadAllBadBlocks(db ethdb.Reader) []*types.Block {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2023-06-20 12:58:47 +03:00
|
|
|
var badBlocks []*badBlock
|
2021-01-10 14:54:15 +03:00
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var blocks []*types.Block
|
|
|
|
for _, bad := range badBlocks {
|
2023-01-25 17:32:25 +03:00
|
|
|
blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles).WithWithdrawals(bad.Body.Withdrawals))
|
2021-01-10 14:54:15 +03:00
|
|
|
}
|
|
|
|
return blocks
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteBadBlock serializes the bad block into the database. If the cumulated
|
|
|
|
// bad blocks exceeds the limitation, the oldest will be dropped.
|
|
|
|
func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Failed to load old bad blocks", "error", err)
|
|
|
|
}
|
2023-06-20 12:58:47 +03:00
|
|
|
var badBlocks []*badBlock
|
2021-01-10 14:54:15 +03:00
|
|
|
if len(blob) > 0 {
|
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
log.Crit("Failed to decode old bad blocks", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, b := range badBlocks {
|
|
|
|
if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
|
|
|
|
log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
badBlocks = append(badBlocks, &badBlock{
|
|
|
|
Header: block.Header(),
|
|
|
|
Body: block.Body(),
|
|
|
|
})
|
2023-08-12 01:04:12 +03:00
|
|
|
slices.SortFunc(badBlocks, func(a, b *badBlock) int {
|
2023-06-20 12:58:47 +03:00
|
|
|
// Note: sorting in descending number order.
|
2023-08-12 01:04:12 +03:00
|
|
|
return -a.Header.Number.Cmp(b.Header.Number)
|
2023-06-20 12:58:47 +03:00
|
|
|
})
|
2021-01-10 14:54:15 +03:00
|
|
|
if len(badBlocks) > badBlockToKeep {
|
|
|
|
badBlocks = badBlocks[:badBlockToKeep]
|
|
|
|
}
|
|
|
|
data, err := rlp.EncodeToBytes(badBlocks)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
if err := db.Put(badBlockKey, data); err != nil {
|
|
|
|
log.Crit("Failed to write bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBadBlocks deletes all the bad blocks from the database
|
|
|
|
func DeleteBadBlocks(db ethdb.KeyValueWriter) {
|
|
|
|
if err := db.Delete(badBlockKey); err != nil {
|
|
|
|
log.Crit("Failed to delete bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:35:06 +03:00
|
|
|
// FindCommonAncestor returns the last common ancestor of two block headers
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 17:59:48 +03:00
|
|
|
func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
|
2018-05-07 14:35:06 +03:00
|
|
|
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
|
|
|
|
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
|
|
|
if a == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for an := a.Number.Uint64(); an < b.Number.Uint64(); {
|
|
|
|
b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for a.Hash() != b.Hash() {
|
|
|
|
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
|
|
|
if a == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return a
|
|
|
|
}
|
2021-03-22 21:06:30 +03:00
|
|
|
|
|
|
|
// ReadHeadHeader returns the current canonical head header.
|
|
|
|
func ReadHeadHeader(db ethdb.Reader) *types.Header {
|
|
|
|
headHeaderHash := ReadHeadHeaderHash(db)
|
|
|
|
if headHeaderHash == (common.Hash{}) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
headHeaderNumber := ReadHeaderNumber(db, headHeaderHash)
|
|
|
|
if headHeaderNumber == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ReadHeader(db, headHeaderHash, *headHeaderNumber)
|
|
|
|
}
|
|
|
|
|
2021-04-29 20:23:07 +03:00
|
|
|
// ReadHeadBlock returns the current canonical head block.
|
2021-03-22 21:06:30 +03:00
|
|
|
func ReadHeadBlock(db ethdb.Reader) *types.Block {
|
|
|
|
headBlockHash := ReadHeadBlockHash(db)
|
|
|
|
if headBlockHash == (common.Hash{}) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
headBlockNumber := ReadHeaderNumber(db, headBlockHash)
|
|
|
|
if headBlockNumber == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ReadBlock(db, headBlockHash, *headBlockNumber)
|
|
|
|
}
|