bsc/trie/stacktrie.go

510 lines
13 KiB
Go
Raw Normal View History

// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"bufio"
"bytes"
"encoding/gob"
"errors"
"fmt"
"io"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
var ErrCommitDisabled = errors.New("no database for committing")
var stPool = sync.Pool{
New: func() interface{} {
return NewStackTrie(nil)
},
}
all: bloom-filter based pruning mechanism (#21724) * cmd, core, tests: initial state pruner core: fix db inspector cmd/geth: add verify-state cmd/geth: add verification tool core/rawdb: implement flatdb cmd, core: fix rebase core/state: use new contract code layout core/state/pruner: avoid deleting genesis state cmd/geth: add helper function core, cmd: fix extract genesis core: minor fixes contracts: remove useless core/state/snapshot: plugin stacktrie core: polish core/state/snapshot: iterate storage concurrently core/state/snapshot: fix iteration core: add comments core/state/snapshot: polish code core/state: polish core/state/snapshot: rebase core/rawdb: add comments core/rawdb: fix tests core/rawdb: improve tests core/state/snapshot: fix concurrent iteration core/state: run pruning during the recovery core, trie: implement martin's idea core, eth: delete flatdb and polish pruner trie: fix import core/state/pruner: add log core/state/pruner: fix issues core/state/pruner: don't read back core/state/pruner: fix contract code write core/state/pruner: check root node presence cmd, core: polish log core/state: use HEAD-127 as the target core/state/snapshot: improve tests cmd/geth: fix verification tool cmd/geth: use HEAD as the verification default target all: replace the bloomfilter with martin's fork cmd, core: polish code core, cmd: forcibly delete state root core/state/pruner: add hash64 core/state/pruner: fix blacklist core/state: remove blacklist cmd, core: delete trie clean cache before pruning cmd, core: fix lint cmd, core: fix rebase core/state: fix the special case for clique networks core/state/snapshot: remove useless code core/state/pruner: capping the snapshot after pruning cmd, core, eth: fixes core/rawdb: update db inspector cmd/geth: polish code core/state/pruner: fsync bloom filter cmd, core: print warning log core/state/pruner: adjust the parameters for bloom filter cmd, core: create the bloom filter by size core: polish core/state/pruner: sanitize invalid bloomfilter size cmd: address comments cmd/geth: address comments cmd/geth: address comment core/state/pruner: address comments core/state/pruner: rename homedir to datadir cmd, core: address comments core/state/pruner: address comment core/state: address comments core, cmd, tests: address comments core: address comments core/state/pruner: release the iterator after each commit core/state/pruner: improve pruner cmd, core: adjust bloom paramters core/state/pruner: fix lint core/state/pruner: fix tests core: fix rebase core/state/pruner: remove atomic rename core/state/pruner: address comments all: run go mod tidy core/state/pruner: avoid false-positive for the middle state roots core/state/pruner: add checks for middle roots cmd/geth: replace crit with error * core/state/pruner: fix lint * core: drop legacy bloom filter * core/state/snapshot: improve pruner * core/state/snapshot: polish concurrent logs to report ETA vs. hashes * core/state/pruner: add progress report for pruning and compaction too * core: fix snapshot test API * core/state: fix some pruning logs * core/state/pruner: support recovering from bloom flush fail Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 14:16:30 +03:00
func stackTrieFromPool(db ethdb.KeyValueWriter) *StackTrie {
st := stPool.Get().(*StackTrie)
st.db = db
return st
}
func returnToPool(st *StackTrie) {
st.Reset()
stPool.Put(st)
}
// StackTrie is a trie implementation that expects keys to be inserted
// in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses.
type StackTrie struct {
nodeType uint8 // node type (as in branch, ext, leaf)
val []byte // value contained by this node if it's a leaf
key []byte // key chunk covered by this (leaf|ext) node
children [16]*StackTrie // list of children (for branch and exts)
db ethdb.KeyValueWriter // Pointer to the commit db, can be nil
}
// NewStackTrie allocates and initializes an empty trie.
all: bloom-filter based pruning mechanism (#21724) * cmd, core, tests: initial state pruner core: fix db inspector cmd/geth: add verify-state cmd/geth: add verification tool core/rawdb: implement flatdb cmd, core: fix rebase core/state: use new contract code layout core/state/pruner: avoid deleting genesis state cmd/geth: add helper function core, cmd: fix extract genesis core: minor fixes contracts: remove useless core/state/snapshot: plugin stacktrie core: polish core/state/snapshot: iterate storage concurrently core/state/snapshot: fix iteration core: add comments core/state/snapshot: polish code core/state: polish core/state/snapshot: rebase core/rawdb: add comments core/rawdb: fix tests core/rawdb: improve tests core/state/snapshot: fix concurrent iteration core/state: run pruning during the recovery core, trie: implement martin's idea core, eth: delete flatdb and polish pruner trie: fix import core/state/pruner: add log core/state/pruner: fix issues core/state/pruner: don't read back core/state/pruner: fix contract code write core/state/pruner: check root node presence cmd, core: polish log core/state: use HEAD-127 as the target core/state/snapshot: improve tests cmd/geth: fix verification tool cmd/geth: use HEAD as the verification default target all: replace the bloomfilter with martin's fork cmd, core: polish code core, cmd: forcibly delete state root core/state/pruner: add hash64 core/state/pruner: fix blacklist core/state: remove blacklist cmd, core: delete trie clean cache before pruning cmd, core: fix lint cmd, core: fix rebase core/state: fix the special case for clique networks core/state/snapshot: remove useless code core/state/pruner: capping the snapshot after pruning cmd, core, eth: fixes core/rawdb: update db inspector cmd/geth: polish code core/state/pruner: fsync bloom filter cmd, core: print warning log core/state/pruner: adjust the parameters for bloom filter cmd, core: create the bloom filter by size core: polish core/state/pruner: sanitize invalid bloomfilter size cmd: address comments cmd/geth: address comments cmd/geth: address comment core/state/pruner: address comments core/state/pruner: rename homedir to datadir cmd, core: address comments core/state/pruner: address comment core/state: address comments core, cmd, tests: address comments core: address comments core/state/pruner: release the iterator after each commit core/state/pruner: improve pruner cmd, core: adjust bloom paramters core/state/pruner: fix lint core/state/pruner: fix tests core: fix rebase core/state/pruner: remove atomic rename core/state/pruner: address comments all: run go mod tidy core/state/pruner: avoid false-positive for the middle state roots core/state/pruner: add checks for middle roots cmd/geth: replace crit with error * core/state/pruner: fix lint * core: drop legacy bloom filter * core/state/snapshot: improve pruner * core/state/snapshot: polish concurrent logs to report ETA vs. hashes * core/state/pruner: add progress report for pruning and compaction too * core: fix snapshot test API * core/state: fix some pruning logs * core/state/pruner: support recovering from bloom flush fail Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 14:16:30 +03:00
func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie {
return &StackTrie{
nodeType: emptyNode,
db: db,
}
}
// NewFromBinary initialises a serialized stacktrie with the given db.
func NewFromBinary(data []byte, db ethdb.KeyValueWriter) (*StackTrie, error) {
var st StackTrie
if err := st.UnmarshalBinary(data); err != nil {
return nil, err
}
// If a database is used, we need to recursively add it to every child
if db != nil {
st.setDb(db)
}
return &st, nil
}
// MarshalBinary implements encoding.BinaryMarshaler
func (st *StackTrie) MarshalBinary() (data []byte, err error) {
var (
b bytes.Buffer
w = bufio.NewWriter(&b)
)
if err := gob.NewEncoder(w).Encode(struct {
Nodetype uint8
Val []byte
Key []byte
}{
st.nodeType,
st.val,
st.key,
}); err != nil {
return nil, err
}
for _, child := range st.children {
if child == nil {
w.WriteByte(0)
continue
}
w.WriteByte(1)
if childData, err := child.MarshalBinary(); err != nil {
return nil, err
} else {
w.Write(childData)
}
}
w.Flush()
return b.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (st *StackTrie) UnmarshalBinary(data []byte) error {
r := bytes.NewReader(data)
return st.unmarshalBinary(r)
}
func (st *StackTrie) unmarshalBinary(r io.Reader) error {
var dec struct {
Nodetype uint8
Val []byte
Key []byte
}
gob.NewDecoder(r).Decode(&dec)
st.nodeType = dec.Nodetype
st.val = dec.Val
st.key = dec.Key
var hasChild = make([]byte, 1)
for i := range st.children {
if _, err := r.Read(hasChild); err != nil {
return err
} else if hasChild[0] == 0 {
continue
}
var child StackTrie
child.unmarshalBinary(r)
st.children[i] = &child
}
return nil
}
func (st *StackTrie) setDb(db ethdb.KeyValueWriter) {
st.db = db
for _, child := range st.children {
if child != nil {
child.setDb(db)
}
}
}
func newLeaf(key, val []byte, db ethdb.KeyValueWriter) *StackTrie {
st := stackTrieFromPool(db)
st.nodeType = leafNode
st.key = append(st.key, key...)
st.val = val
return st
}
func newExt(key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie {
st := stackTrieFromPool(db)
st.nodeType = extNode
st.key = append(st.key, key...)
st.children[0] = child
return st
}
// List all values that StackTrie#nodeType can hold
const (
emptyNode = iota
branchNode
extNode
leafNode
hashedNode
)
// TryUpdate inserts a (key, value) pair into the stack trie
func (st *StackTrie) TryUpdate(key, value []byte) error {
k := keybytesToHex(key)
if len(value) == 0 {
panic("deletion not supported")
}
st.insert(k[:len(k)-1], value)
return nil
}
func (st *StackTrie) Update(key, value []byte) {
if err := st.TryUpdate(key, value); err != nil {
log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
}
}
func (st *StackTrie) Reset() {
st.db = nil
st.key = st.key[:0]
st.val = nil
for i := range st.children {
st.children[i] = nil
}
st.nodeType = emptyNode
}
// Helper function that, given a full key, determines the index
// at which the chunk pointed by st.keyOffset is different from
// the same chunk in the full key.
func (st *StackTrie) getDiffIndex(key []byte) int {
for idx, nibble := range st.key {
if nibble != key[idx] {
return idx
}
}
return len(st.key)
}
// Helper function to that inserts a (key, value) pair into
// the trie.
func (st *StackTrie) insert(key, value []byte) {
switch st.nodeType {
case branchNode: /* Branch */
idx := int(key[0])
// Unresolve elder siblings
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
if st.children[i].nodeType != hashedNode {
st.children[i].hash()
}
break
}
}
// Add new child
if st.children[idx] == nil {
st.children[idx] = newLeaf(key[1:], value, st.db)
} else {
st.children[idx].insert(key[1:], value)
}
case extNode: /* Ext */
// Compare both key chunks and see where they differ
diffidx := st.getDiffIndex(key)
// Check if chunks are identical. If so, recurse into
// the child node. Otherwise, the key has to be split
// into 1) an optional common prefix, 2) the fullnode
// representing the two differing path, and 3) a leaf
// for each of the differentiated subtrees.
if diffidx == len(st.key) {
// Ext key and key segment are identical, recurse into
// the child node.
st.children[0].insert(key[diffidx:], value)
return
}
// Save the original part. Depending if the break is
// at the extension's last byte or not, create an
// intermediate extension or use the extension's child
// node directly.
var n *StackTrie
if diffidx < len(st.key)-1 {
n = newExt(st.key[diffidx+1:], st.children[0], st.db)
} else {
// Break on the last byte, no need to insert
// an extension node: reuse the current node
n = st.children[0]
}
// Convert to hash
n.hash()
var p *StackTrie
if diffidx == 0 {
// the break is on the first byte, so
// the current node is converted into
// a branch node.
st.children[0] = nil
p = st
st.nodeType = branchNode
} else {
// the common prefix is at least one byte
// long, insert a new intermediate branch
// node.
st.children[0] = stackTrieFromPool(st.db)
st.children[0].nodeType = branchNode
p = st.children[0]
}
// Create a leaf for the inserted part
o := newLeaf(key[diffidx+1:], value, st.db)
// Insert both child leaves where they belong:
origIdx := st.key[diffidx]
newIdx := key[diffidx]
p.children[origIdx] = n
p.children[newIdx] = o
st.key = st.key[:diffidx]
case leafNode: /* Leaf */
// Compare both key chunks and see where they differ
diffidx := st.getDiffIndex(key)
// Overwriting a key isn't supported, which means that
// the current leaf is expected to be split into 1) an
// optional extension for the common prefix of these 2
// keys, 2) a fullnode selecting the path on which the
// keys differ, and 3) one leaf for the differentiated
// component of each key.
if diffidx >= len(st.key) {
panic("Trying to insert into existing key")
}
// Check if the split occurs at the first nibble of the
// chunk. In that case, no prefix extnode is necessary.
// Otherwise, create that
var p *StackTrie
if diffidx == 0 {
// Convert current leaf into a branch
st.nodeType = branchNode
p = st
st.children[0] = nil
} else {
// Convert current node into an ext,
// and insert a child branch node.
st.nodeType = extNode
st.children[0] = NewStackTrie(st.db)
st.children[0].nodeType = branchNode
p = st.children[0]
}
// Create the two child leaves: one containing the original
// value and another containing the new value. The child leaf
// is hashed directly in order to free up some memory.
origIdx := st.key[diffidx]
p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val, st.db)
p.children[origIdx].hash()
newIdx := key[diffidx]
p.children[newIdx] = newLeaf(key[diffidx+1:], value, st.db)
// Finally, cut off the key part that has been passed
// over to the children.
st.key = st.key[:diffidx]
st.val = nil
case emptyNode: /* Empty */
st.nodeType = leafNode
st.key = key
st.val = value
case hashedNode:
panic("trying to insert into hash")
default:
panic("invalid type")
}
}
// hash converts st into a 'hashedNode', if possible. Possible outcomes:
//
// 1. The rlp-encoded value was >= 32 bytes:
// - Then the 32-byte `hash` will be accessible in `st.val`.
// - And the 'st.type' will be 'hashedNode'
// 2. The rlp-encoded value was < 32 bytes
// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
// - And the 'st.type' will be 'hashedNode' AGAIN
//
// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
func (st *StackTrie) hash() {
h := newHasher(false)
defer returnHasherToPool(h)
st.hashRec(h)
}
func (st *StackTrie) hashRec(hasher *hasher) {
// The switch below sets this to the RLP-encoding of this node.
var encodedNode []byte
switch st.nodeType {
case hashedNode:
return
case emptyNode:
st.val = emptyRoot.Bytes()
st.key = st.key[:0]
st.nodeType = hashedNode
return
case branchNode:
var nodes rawFullNode
for i, child := range st.children {
if child == nil {
nodes[i] = nilValueNode
continue
}
child.hashRec(hasher)
if len(child.val) < 32 {
nodes[i] = rawNode(child.val)
} else {
nodes[i] = hashNode(child.val)
}
// Release child back to pool.
st.children[i] = nil
returnToPool(child)
}
nodes.encode(hasher.encbuf)
encodedNode = hasher.encodedBytes()
case extNode:
st.children[0].hashRec(hasher)
sz := hexToCompactInPlace(st.key)
n := rawShortNode{Key: st.key[:sz]}
if len(st.children[0].val) < 32 {
n.Val = rawNode(st.children[0].val)
} else {
n.Val = hashNode(st.children[0].val)
}
n.encode(hasher.encbuf)
encodedNode = hasher.encodedBytes()
// Release child back to pool.
returnToPool(st.children[0])
st.children[0] = nil
case leafNode:
st.key = append(st.key, byte(16))
sz := hexToCompactInPlace(st.key)
n := rawShortNode{Key: st.key[:sz], Val: valueNode(st.val)}
n.encode(hasher.encbuf)
encodedNode = hasher.encodedBytes()
default:
panic("invalid node type")
}
st.nodeType = hashedNode
st.key = st.key[:0]
if len(encodedNode) < 32 {
st.val = common.CopyBytes(encodedNode)
return
}
// Write the hash to the 'val'. We allocate a new val here to not mutate
// input values
st.val = hasher.hashData(encodedNode)
if st.db != nil {
// TODO! Is it safe to Put the slice here?
// Do all db implementations copy the value provided?
st.db.Put(st.val, encodedNode)
}
}
// Hash returns the hash of the current node.
func (st *StackTrie) Hash() (h common.Hash) {
hasher := newHasher(false)
defer returnHasherToPool(hasher)
st.hashRec(hasher)
if len(st.val) == 32 {
copy(h[:], st.val)
return h
}
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed, and instead contain the rlp-encoding of the
// node. For the top level node, we need to force the hashing.
hasher.sha.Reset()
hasher.sha.Write(st.val)
hasher.sha.Read(h[:])
return h
}
// Commit will firstly hash the entrie trie if it's still not hashed
// and then commit all nodes to the associated database. Actually most
// of the trie nodes MAY have been committed already. The main purpose
// here is to commit the root node.
//
// The associated database is expected, otherwise the whole commit
// functionality should be disabled.
func (st *StackTrie) Commit() (h common.Hash, err error) {
if st.db == nil {
return common.Hash{}, ErrCommitDisabled
}
hasher := newHasher(false)
defer returnHasherToPool(hasher)
st.hashRec(hasher)
if len(st.val) == 32 {
copy(h[:], st.val)
return h, nil
}
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed (and committed), and instead contain the rlp-encoding of the
// node. For the top level node, we need to force the hashing+commit.
hasher.sha.Reset()
hasher.sha.Write(st.val)
hasher.sha.Read(h[:])
st.db.Put(h[:], st.val)
return h, nil
}