2019-08-06 13:40:28 +03:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
// Package snapshot implements a journalled, dynamic state dump.
|
|
|
|
package snapshot
|
|
|
|
|
|
|
|
import (
|
2019-11-26 10:48:29 +03:00
|
|
|
"bytes"
|
2019-08-06 13:40:28 +03:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
2020-01-19 22:57:56 +03:00
|
|
|
"sync/atomic"
|
2019-08-06 13:40:28 +03:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2020-10-29 22:01:58 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2019-11-26 10:48:29 +03:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2019-08-06 13:40:28 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
|
|
|
|
snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
|
2019-12-03 11:00:26 +03:00
|
|
|
snapshotCleanAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
|
|
|
|
snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
|
|
|
|
|
|
|
|
snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
|
|
|
|
snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
|
2019-12-03 11:00:26 +03:00
|
|
|
snapshotCleanStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
|
|
|
|
snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
|
|
|
|
|
|
|
|
snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
|
|
|
|
snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
|
2019-12-03 11:00:26 +03:00
|
|
|
snapshotDirtyAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
|
|
|
|
snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
|
|
|
|
|
|
|
|
snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
|
|
|
|
snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
|
2019-12-03 11:00:26 +03:00
|
|
|
snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
|
|
|
|
snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
|
|
|
|
|
2019-12-03 11:00:26 +03:00
|
|
|
snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
|
|
|
snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
|
|
|
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
|
|
|
|
snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
|
|
|
|
snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
|
|
|
|
snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
|
|
|
|
|
|
|
|
snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
|
|
|
|
snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
|
|
|
|
|
|
|
|
snapshotBloomAccountTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
|
|
|
|
snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
|
|
|
|
snapshotBloomAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
|
|
|
|
|
|
|
|
snapshotBloomStorageTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
|
|
|
|
snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
|
|
|
|
snapshotBloomStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
|
2019-10-04 16:24:01 +03:00
|
|
|
|
|
|
|
// ErrSnapshotStale is returned from data accessors if the underlying snapshot
|
|
|
|
// layer had been invalidated due to the chain progressing forward far enough
|
|
|
|
// to not maintain the layer's original state.
|
|
|
|
ErrSnapshotStale = errors.New("snapshot stale")
|
2019-11-22 14:23:49 +03:00
|
|
|
|
2019-11-26 10:48:29 +03:00
|
|
|
// ErrNotCoveredYet is returned from data accessors if the underlying snapshot
|
|
|
|
// is being generated currently and the requested data item is not yet in the
|
|
|
|
// range of accounts covered.
|
|
|
|
ErrNotCoveredYet = errors.New("not covered yet")
|
|
|
|
|
2020-10-28 15:27:37 +03:00
|
|
|
// ErrNotConstructed is returned if the callers want to iterate the snapshot
|
|
|
|
// while the generation is not finished yet.
|
|
|
|
ErrNotConstructed = errors.New("snapshot is not constructed")
|
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
// errSnapshotCycle is returned if a snapshot is attempted to be inserted
|
|
|
|
// that forms a cycle in the snapshot tree.
|
|
|
|
errSnapshotCycle = errors.New("snapshot cycle")
|
2019-08-06 13:40:28 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// Snapshot represents the functionality supported by a snapshot storage layer.
|
|
|
|
type Snapshot interface {
|
2019-11-22 14:23:49 +03:00
|
|
|
// Root returns the root hash for which this snapshot was made.
|
|
|
|
Root() common.Hash
|
2019-08-06 13:40:28 +03:00
|
|
|
|
|
|
|
// Account directly retrieves the account associated with a particular hash in
|
|
|
|
// the snapshot slim data format.
|
2019-10-04 16:24:01 +03:00
|
|
|
Account(hash common.Hash) (*Account, error)
|
2019-08-06 13:40:28 +03:00
|
|
|
|
|
|
|
// AccountRLP directly retrieves the account RLP associated with a particular
|
|
|
|
// hash in the snapshot slim data format.
|
2019-10-04 16:24:01 +03:00
|
|
|
AccountRLP(hash common.Hash) ([]byte, error)
|
2019-08-06 13:40:28 +03:00
|
|
|
|
|
|
|
// Storage directly retrieves the storage data associated with a particular hash,
|
|
|
|
// within a particular account.
|
2019-10-04 16:24:01 +03:00
|
|
|
Storage(accountHash, storageHash common.Hash) ([]byte, error)
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// snapshot is the internal version of the snapshot data layer that supports some
|
|
|
|
// additional methods compared to the public API.
|
|
|
|
type snapshot interface {
|
|
|
|
Snapshot
|
|
|
|
|
2019-12-10 12:00:03 +03:00
|
|
|
// Parent returns the subsequent layer of a snapshot, or nil if the base was
|
|
|
|
// reached.
|
|
|
|
//
|
|
|
|
// Note, the method is an internal helper to avoid type switching between the
|
|
|
|
// disk and diff layers. There is no locking involved.
|
|
|
|
Parent() snapshot
|
|
|
|
|
2019-08-06 13:40:28 +03:00
|
|
|
// Update creates a new layer on top of the existing snapshot diff tree with
|
2019-12-10 12:00:03 +03:00
|
|
|
// the specified data items.
|
|
|
|
//
|
|
|
|
// Note, the maps are retained by the method to avoid copying everything.
|
2020-03-03 16:52:00 +03:00
|
|
|
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
|
2019-08-06 13:40:28 +03:00
|
|
|
|
2019-12-02 14:27:20 +03:00
|
|
|
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
2019-08-06 13:40:28 +03:00
|
|
|
// This is meant to be used during shutdown to persist the snapshot without
|
|
|
|
// flattening everything down (bad for reorgs).
|
2019-12-02 14:27:20 +03:00
|
|
|
Journal(buffer *bytes.Buffer) (common.Hash, error)
|
2019-11-22 14:23:49 +03:00
|
|
|
|
|
|
|
// Stale return whether this layer has become stale (was flattened across) or
|
|
|
|
// if it's still live.
|
|
|
|
Stale() bool
|
2019-12-10 12:00:03 +03:00
|
|
|
|
|
|
|
// AccountIterator creates an account iterator over an arbitrary layer.
|
|
|
|
AccountIterator(seek common.Hash) AccountIterator
|
2020-04-29 12:53:08 +03:00
|
|
|
|
|
|
|
// StorageIterator creates a storage iterator over an arbitrary layer.
|
|
|
|
StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
2022-09-23 21:20:36 +03:00
|
|
|
// Config includes the configurations for snapshots.
|
|
|
|
type Config struct {
|
|
|
|
CacheSize int // Megabytes permitted to use for read caches
|
|
|
|
Recovery bool // Indicator that the snapshots is in the recovery mode
|
|
|
|
NoBuild bool // Indicator that the snapshots generation is disallowed
|
|
|
|
AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously
|
|
|
|
}
|
|
|
|
|
2021-04-29 17:33:45 +03:00
|
|
|
// Tree is an Ethereum state snapshot tree. It consists of one persistent base
|
|
|
|
// layer backed by a key-value store, on top of which arbitrarily many in-memory
|
|
|
|
// diff layers are topped. The memory diffs can form a tree with branching, but
|
|
|
|
// the disk layer is singleton and common to all. If a reorg goes deeper than the
|
|
|
|
// disk layer, everything needs to be deleted.
|
2019-08-06 13:40:28 +03:00
|
|
|
//
|
|
|
|
// The goal of a state snapshot is twofold: to allow direct access to account and
|
|
|
|
// storage data to avoid expensive multi-level trie lookups; and to allow sorted,
|
|
|
|
// cheap iteration of the account/storage tries for sync aid.
|
2019-11-22 14:23:49 +03:00
|
|
|
type Tree struct {
|
2022-09-23 21:20:36 +03:00
|
|
|
config Config // Snapshots configurations
|
2019-11-26 10:48:29 +03:00
|
|
|
diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
|
|
|
|
triedb *trie.Database // In-memory cache to access the trie through
|
|
|
|
layers map[common.Hash]snapshot // Collection of all known layers
|
2019-08-06 13:40:28 +03:00
|
|
|
lock sync.RWMutex
|
2021-10-15 10:52:40 +03:00
|
|
|
|
|
|
|
// Test hooks
|
|
|
|
onFlatten func() // Hook invoked when the bottom most diff layers are flattened
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// New attempts to load an already existing snapshot from a persistent key-value
|
|
|
|
// store (with a number of memory layers from a journal), ensuring that the head
|
|
|
|
// of the snapshot matches the expected one.
|
|
|
|
//
|
2021-08-10 12:16:53 +03:00
|
|
|
// If the snapshot is missing or the disk layer is broken, the snapshot will be
|
|
|
|
// reconstructed using both the existing data and the state trie.
|
|
|
|
// The repair happens on a background thread.
|
2021-08-10 16:58:38 +03:00
|
|
|
//
|
2021-08-10 12:16:53 +03:00
|
|
|
// If the memory layers in the journal do not match the disk layer (e.g. there is
|
|
|
|
// a gap) or the journal is missing, there are two repair cases:
|
2021-08-10 16:58:38 +03:00
|
|
|
//
|
2022-09-10 14:25:40 +03:00
|
|
|
// - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
|
|
|
|
// This case happens when the snapshot is 'ahead' of the state trie.
|
|
|
|
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
|
|
|
// a background thread.
|
2022-09-23 21:20:36 +03:00
|
|
|
func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash) (*Tree, error) {
|
2019-11-26 10:48:29 +03:00
|
|
|
// Create a new, empty snapshot tree
|
2019-11-22 14:23:49 +03:00
|
|
|
snap := &Tree{
|
2022-09-23 21:20:36 +03:00
|
|
|
config: config,
|
2019-11-26 10:48:29 +03:00
|
|
|
diskdb: diskdb,
|
|
|
|
triedb: triedb,
|
2019-08-06 13:40:28 +03:00
|
|
|
layers: make(map[common.Hash]snapshot),
|
|
|
|
}
|
2022-09-23 21:20:36 +03:00
|
|
|
// Create the building waiter iff the background generation is allowed
|
|
|
|
if !config.NoBuild && !config.AsyncBuild {
|
2020-03-03 10:10:23 +03:00
|
|
|
defer snap.waitBuild()
|
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
// Attempt to load a previously persisted snapshot and rebuild one if failed
|
2022-09-23 21:20:36 +03:00
|
|
|
head, disabled, err := loadSnapshot(diskdb, triedb, root, config.CacheSize, config.Recovery, config.NoBuild)
|
2021-04-29 17:33:45 +03:00
|
|
|
if disabled {
|
|
|
|
log.Warn("Snapshot maintenance disabled (syncing)")
|
|
|
|
return snap, nil
|
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
if err != nil {
|
2022-09-23 21:20:36 +03:00
|
|
|
log.Warn("Failed to load snapshot", "err", err)
|
|
|
|
if !config.NoBuild {
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 14:16:30 +03:00
|
|
|
snap.Rebuild(root)
|
|
|
|
return snap, nil
|
|
|
|
}
|
|
|
|
return nil, err // Bail out the error, don't rebuild automatically.
|
2019-11-26 10:48:29 +03:00
|
|
|
}
|
|
|
|
// Existing snapshot loaded, seed all the layers
|
2019-08-06 13:40:28 +03:00
|
|
|
for head != nil {
|
2019-11-22 14:23:49 +03:00
|
|
|
snap.layers[head.Root()] = head
|
2019-12-10 12:00:03 +03:00
|
|
|
head = head.Parent()
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 14:16:30 +03:00
|
|
|
return snap, nil
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
2020-03-03 10:10:23 +03:00
|
|
|
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
|
2020-10-29 22:01:58 +03:00
|
|
|
// to be used by tests to ensure we're testing what we believe we are.
|
2020-03-03 10:10:23 +03:00
|
|
|
func (t *Tree) waitBuild() {
|
|
|
|
// Find the rebuild termination channel
|
|
|
|
var done chan struct{}
|
|
|
|
|
|
|
|
t.lock.RLock()
|
|
|
|
for _, layer := range t.layers {
|
|
|
|
if layer, ok := layer.(*diskLayer); ok {
|
|
|
|
done = layer.genPending
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.lock.RUnlock()
|
|
|
|
|
|
|
|
// Wait until the snapshot is generated
|
|
|
|
if done != nil {
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-29 17:33:45 +03:00
|
|
|
// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
|
|
|
// layers in memory and marks snapshots disabled globally. In order to resume
|
|
|
|
// the snapshot functionality, the caller must invoke Rebuild.
|
|
|
|
func (t *Tree) Disable() {
|
|
|
|
// Interrupt any live snapshot layers
|
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
|
|
|
|
|
|
|
for _, layer := range t.layers {
|
|
|
|
switch layer := layer.(type) {
|
|
|
|
case *diskLayer:
|
|
|
|
// If the base layer is generating, abort it
|
|
|
|
if layer.genAbort != nil {
|
|
|
|
abort := make(chan *generatorStats)
|
|
|
|
layer.genAbort <- abort
|
|
|
|
<-abort
|
|
|
|
}
|
|
|
|
// Layer should be inactive now, mark it as stale
|
|
|
|
layer.lock.Lock()
|
|
|
|
layer.stale = true
|
|
|
|
layer.lock.Unlock()
|
|
|
|
|
|
|
|
case *diffLayer:
|
|
|
|
// If the layer is a simple diff, simply mark as stale
|
|
|
|
layer.lock.Lock()
|
|
|
|
atomic.StoreUint32(&layer.stale, 1)
|
|
|
|
layer.lock.Unlock()
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown layer type: %T", layer))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.layers = map[common.Hash]snapshot{}
|
|
|
|
|
|
|
|
// Delete all snapshot liveness information from the database
|
|
|
|
batch := t.diskdb.NewBatch()
|
|
|
|
|
|
|
|
rawdb.WriteSnapshotDisabled(batch)
|
|
|
|
rawdb.DeleteSnapshotRoot(batch)
|
|
|
|
rawdb.DeleteSnapshotJournal(batch)
|
|
|
|
rawdb.DeleteSnapshotGenerator(batch)
|
|
|
|
rawdb.DeleteSnapshotRecoveryNumber(batch)
|
|
|
|
// Note, we don't delete the sync progress
|
|
|
|
|
|
|
|
if err := batch.Write(); err != nil {
|
|
|
|
log.Crit("Failed to disable snapshots", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-06 13:40:28 +03:00
|
|
|
// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
|
|
|
|
// snapshot is maintained for that block.
|
2019-11-22 14:23:49 +03:00
|
|
|
func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
|
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
2019-08-06 13:40:28 +03:00
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
return t.layers[blockRoot]
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 14:16:30 +03:00
|
|
|
// Snapshots returns all visited layers from the topmost layer with specific
|
|
|
|
// root and traverses downward. The layer amount is limited by the given number.
|
|
|
|
// If nodisk is set, then disk layer is excluded.
|
|
|
|
func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
|
|
|
|
t.lock.RLock()
|
|
|
|
defer t.lock.RUnlock()
|
|
|
|
|
|
|
|
if limits == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
layer := t.layers[root]
|
|
|
|
if layer == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var ret []Snapshot
|
|
|
|
for {
|
|
|
|
if _, isdisk := layer.(*diskLayer); isdisk && nodisk {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
ret = append(ret, layer)
|
|
|
|
limits -= 1
|
|
|
|
if limits == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
parent := layer.Parent()
|
|
|
|
if parent == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
layer = parent
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2019-08-06 13:40:28 +03:00
|
|
|
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
|
|
|
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
2020-03-03 16:52:00 +03:00
|
|
|
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
2019-11-22 14:23:49 +03:00
|
|
|
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
|
|
|
// special case that can only happen for Clique networks where empty blocks
|
|
|
|
// don't modify the state (0 block subsidy).
|
|
|
|
//
|
|
|
|
// Although we could silently ignore this internally, it should be the caller's
|
|
|
|
// responsibility to avoid even attempting to insert such a snapshot.
|
|
|
|
if blockRoot == parentRoot {
|
|
|
|
return errSnapshotCycle
|
|
|
|
}
|
2019-08-06 13:40:28 +03:00
|
|
|
// Generate a new snapshot on top of the parent
|
2021-03-22 23:41:28 +03:00
|
|
|
parent := t.Snapshot(parentRoot)
|
2019-08-06 13:40:28 +03:00
|
|
|
if parent == nil {
|
|
|
|
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
|
|
|
|
}
|
2021-03-22 23:41:28 +03:00
|
|
|
snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
|
2019-08-06 13:40:28 +03:00
|
|
|
|
|
|
|
// Save the new snapshot for later
|
2019-11-22 14:23:49 +03:00
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
2019-08-06 13:40:28 +03:00
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
t.layers[snap.root] = snap
|
2019-08-06 13:40:28 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cap traverses downwards the snapshot tree from a head block hash until the
|
|
|
|
// number of allowed layers are crossed. All layers beyond the permitted number
|
|
|
|
// are flattened downwards.
|
2021-02-16 10:04:07 +03:00
|
|
|
//
|
|
|
|
// Note, the final diff layer count in general will be one more than the amount
|
|
|
|
// requested. This happens because the bottom-most diff layer is the accumulator
|
|
|
|
// which may or may not overflow and cascade to disk. Since this last layer's
|
|
|
|
// survival is only known *after* capping, we need to omit it from the count if
|
|
|
|
// we want to ensure that *at least* the requested number of diff layers remain.
|
2019-11-26 10:48:29 +03:00
|
|
|
func (t *Tree) Cap(root common.Hash, layers int) error {
|
2019-08-06 13:40:28 +03:00
|
|
|
// Retrieve the head snapshot to cap from
|
2019-11-22 14:23:49 +03:00
|
|
|
snap := t.Snapshot(root)
|
|
|
|
if snap == nil {
|
|
|
|
return fmt.Errorf("snapshot [%#x] missing", root)
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
2019-10-17 18:30:31 +03:00
|
|
|
diff, ok := snap.(*diffLayer)
|
|
|
|
if !ok {
|
2019-11-22 14:23:49 +03:00
|
|
|
return fmt.Errorf("snapshot [%#x] is disk layer", root)
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
2020-08-24 13:22:36 +03:00
|
|
|
// If the generator is still running, use a more aggressive cap
|
|
|
|
diff.origin.lock.RLock()
|
|
|
|
if diff.origin.genMarker != nil && layers > 8 {
|
|
|
|
layers = 8
|
|
|
|
}
|
|
|
|
diff.origin.lock.RUnlock()
|
|
|
|
|
2019-08-06 13:40:28 +03:00
|
|
|
// Run the internal capping and discard all stale layers
|
2019-11-22 14:23:49 +03:00
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
2019-08-06 13:40:28 +03:00
|
|
|
|
2019-10-17 18:30:31 +03:00
|
|
|
// Flattening the bottom-most diff layer requires special casing since there's
|
|
|
|
// no child to rewire to the grandparent. In that case we can fake a temporary
|
|
|
|
// child for the capping and then remove it.
|
2021-02-16 10:04:07 +03:00
|
|
|
if layers == 0 {
|
2019-10-17 18:30:31 +03:00
|
|
|
// If full commit was requested, flatten the diffs and merge onto disk
|
|
|
|
diff.lock.RLock()
|
|
|
|
base := diffToDisk(diff.flatten().(*diffLayer))
|
|
|
|
diff.lock.RUnlock()
|
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
// Replace the entire snapshot tree with the flat base
|
|
|
|
t.layers = map[common.Hash]snapshot{base.root: base}
|
|
|
|
return nil
|
|
|
|
}
|
2021-02-16 10:04:07 +03:00
|
|
|
persisted := t.cap(diff, layers)
|
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
// Remove any layer that is stale or links into a stale layer
|
|
|
|
children := make(map[common.Hash][]common.Hash)
|
|
|
|
for root, snap := range t.layers {
|
|
|
|
if diff, ok := snap.(*diffLayer); ok {
|
|
|
|
parent := diff.parent.Root()
|
|
|
|
children[parent] = append(children[parent], root)
|
|
|
|
}
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
2019-11-22 14:23:49 +03:00
|
|
|
var remove func(root common.Hash)
|
|
|
|
remove = func(root common.Hash) {
|
|
|
|
delete(t.layers, root)
|
|
|
|
for _, child := range children[root] {
|
|
|
|
remove(child)
|
|
|
|
}
|
|
|
|
delete(children, root)
|
|
|
|
}
|
|
|
|
for root, snap := range t.layers {
|
|
|
|
if snap.Stale() {
|
|
|
|
remove(root)
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
}
|
2020-05-25 11:21:28 +03:00
|
|
|
// If the disk layer was modified, regenerate all the cumulative blooms
|
2019-11-26 10:48:29 +03:00
|
|
|
if persisted != nil {
|
|
|
|
var rebloom func(root common.Hash)
|
|
|
|
rebloom = func(root common.Hash) {
|
|
|
|
if diff, ok := t.layers[root].(*diffLayer); ok {
|
|
|
|
diff.rebloom(persisted)
|
|
|
|
}
|
|
|
|
for _, child := range children[root] {
|
|
|
|
rebloom(child)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rebloom(persisted.root)
|
|
|
|
}
|
2019-08-06 13:40:28 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-17 18:30:31 +03:00
|
|
|
// cap traverses downwards the diff tree until the number of allowed layers are
|
2019-11-22 14:23:49 +03:00
|
|
|
// crossed. All diffs beyond the permitted number are flattened downwards. If the
|
|
|
|
// layer limit is reached, memory cap is also enforced (but not before).
|
2019-11-26 10:48:29 +03:00
|
|
|
//
|
2021-01-07 09:36:21 +03:00
|
|
|
// The method returns the new disk layer if diffs were persisted into it.
|
2021-02-16 10:04:07 +03:00
|
|
|
//
|
|
|
|
// Note, the final diff layer count in general will be one more than the amount
|
|
|
|
// requested. This happens because the bottom-most diff layer is the accumulator
|
|
|
|
// which may or may not overflow and cascade to disk. Since this last layer's
|
|
|
|
// survival is only known *after* capping, we need to omit it from the count if
|
|
|
|
// we want to ensure that *at least* the requested number of diff layers remain.
|
2019-11-26 10:48:29 +03:00
|
|
|
func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
|
2019-10-17 18:30:31 +03:00
|
|
|
// Dive until we run out of layers or reach the persistent database
|
2021-02-16 10:04:07 +03:00
|
|
|
for i := 0; i < layers-1; i++ {
|
2019-10-23 16:19:02 +03:00
|
|
|
// If we still have diff layers below, continue down
|
2019-10-17 18:30:31 +03:00
|
|
|
if parent, ok := diff.parent.(*diffLayer); ok {
|
2019-10-23 16:19:02 +03:00
|
|
|
diff = parent
|
|
|
|
} else {
|
2019-11-22 14:23:49 +03:00
|
|
|
// Diff stack too shallow, return without modifications
|
2019-11-26 10:48:29 +03:00
|
|
|
return nil
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// We're out of layers, flatten anything below, stopping if it's the disk or if
|
|
|
|
// the memory limit is not yet exceeded.
|
|
|
|
switch parent := diff.parent.(type) {
|
|
|
|
case *diskLayer:
|
2019-11-26 10:48:29 +03:00
|
|
|
return nil
|
2019-10-17 18:30:31 +03:00
|
|
|
|
|
|
|
case *diffLayer:
|
2021-10-15 10:52:40 +03:00
|
|
|
// Hold the write lock until the flattened parent is linked correctly.
|
|
|
|
// Otherwise, the stale layer may be accessed by external reads in the
|
|
|
|
// meantime.
|
|
|
|
diff.lock.Lock()
|
|
|
|
defer diff.lock.Unlock()
|
|
|
|
|
2019-10-17 18:30:31 +03:00
|
|
|
// Flatten the parent into the grandparent. The flattening internally obtains a
|
|
|
|
// write lock on grandparent.
|
|
|
|
flattened := parent.flatten().(*diffLayer)
|
2019-11-22 14:23:49 +03:00
|
|
|
t.layers[flattened.root] = flattened
|
2019-10-17 18:30:31 +03:00
|
|
|
|
2021-10-15 10:52:40 +03:00
|
|
|
// Invoke the hook if it's registered. Ugly hack.
|
|
|
|
if t.onFlatten != nil {
|
|
|
|
t.onFlatten()
|
|
|
|
}
|
2019-10-17 18:30:31 +03:00
|
|
|
diff.parent = flattened
|
2019-11-26 10:48:29 +03:00
|
|
|
if flattened.memory < aggregatorMemoryLimit {
|
|
|
|
// Accumulator layer is smaller than the limit, so we can abort, unless
|
|
|
|
// there's a snapshot being generated currently. In that case, the trie
|
2021-08-10 12:04:29 +03:00
|
|
|
// will move from underneath the generator so we **must** merge all the
|
2019-11-26 10:48:29 +03:00
|
|
|
// partial data down into the snapshot and restart the generation.
|
|
|
|
if flattened.parent.(*diskLayer).genAbort == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown data layer: %T", parent))
|
|
|
|
}
|
|
|
|
// If the bottom-most layer is larger than our memory cap, persist to disk
|
|
|
|
bottom := diff.parent.(*diffLayer)
|
|
|
|
|
|
|
|
bottom.lock.RLock()
|
|
|
|
base := diffToDisk(bottom)
|
|
|
|
bottom.lock.RUnlock()
|
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
t.layers[base.root] = base
|
2019-10-17 18:30:31 +03:00
|
|
|
diff.parent = base
|
2019-11-26 10:48:29 +03:00
|
|
|
return base
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
|
|
|
|
// it. The method will panic if called onto a non-bottom-most diff layer.
|
2020-10-29 22:01:58 +03:00
|
|
|
//
|
|
|
|
// The disk layer persistence should be operated in an atomic way. All updates should
|
|
|
|
// be discarded if the whole transition if not finished.
|
2019-10-17 18:30:31 +03:00
|
|
|
func diffToDisk(bottom *diffLayer) *diskLayer {
|
|
|
|
var (
|
|
|
|
base = bottom.parent.(*diskLayer)
|
2019-11-26 10:48:29 +03:00
|
|
|
batch = base.diskdb.NewBatch()
|
|
|
|
stats *generatorStats
|
2019-10-17 18:30:31 +03:00
|
|
|
)
|
2019-11-26 10:48:29 +03:00
|
|
|
// If the disk layer is running a snapshot generator, abort it
|
|
|
|
if base.genAbort != nil {
|
|
|
|
abort := make(chan *generatorStats)
|
|
|
|
base.genAbort <- abort
|
|
|
|
stats = <-abort
|
|
|
|
}
|
2020-10-29 22:01:58 +03:00
|
|
|
// Put the deletion in the batch writer, flush all updates in the final step.
|
2019-11-22 14:23:49 +03:00
|
|
|
rawdb.DeleteSnapshotRoot(batch)
|
2019-10-17 18:30:31 +03:00
|
|
|
|
|
|
|
// Mark the original base as stale as we're going to create a new wrapper
|
|
|
|
base.lock.Lock()
|
|
|
|
if base.stale {
|
|
|
|
panic("parent disk layer is stale") // we've committed into the same base from two children, boo
|
|
|
|
}
|
|
|
|
base.stale = true
|
|
|
|
base.lock.Unlock()
|
|
|
|
|
2020-03-03 16:52:00 +03:00
|
|
|
// Destroy all the destructed accounts from the database
|
|
|
|
for hash := range bottom.destructSet {
|
2019-11-26 10:48:29 +03:00
|
|
|
// Skip any account not covered yet by the snapshot
|
|
|
|
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
2020-03-03 16:52:00 +03:00
|
|
|
// Remove all storage slots
|
|
|
|
rawdb.DeleteAccountSnapshot(batch, hash)
|
|
|
|
base.cache.Set(hash[:], nil)
|
|
|
|
|
|
|
|
it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
|
|
|
|
for it.Next() {
|
2022-03-15 11:28:26 +03:00
|
|
|
key := it.Key()
|
|
|
|
batch.Delete(key)
|
|
|
|
base.cache.Del(key[1:])
|
|
|
|
snapshotFlushStorageItemMeter.Mark(1)
|
|
|
|
|
|
|
|
// Ensure we don't delete too much data blindly (contract can be
|
|
|
|
// huge). It's ok to flush, the root will go missing in case of a
|
|
|
|
// crash and we'll detect and regenerate the snapshot.
|
|
|
|
if batch.ValueSize() > ethdb.IdealBatchSize {
|
|
|
|
if err := batch.Write(); err != nil {
|
|
|
|
log.Crit("Failed to write storage deletions", "err", err)
|
2021-03-30 19:04:22 +03:00
|
|
|
}
|
2022-03-15 11:28:26 +03:00
|
|
|
batch.Reset()
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
2020-03-03 16:52:00 +03:00
|
|
|
}
|
|
|
|
it.Release()
|
|
|
|
}
|
|
|
|
// Push all updated accounts into the database
|
|
|
|
for hash, data := range bottom.accountData {
|
|
|
|
// Skip any account not covered yet by the snapshot
|
|
|
|
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Push the account to disk
|
|
|
|
rawdb.WriteAccountSnapshot(batch, hash, data)
|
|
|
|
base.cache.Set(hash[:], data)
|
|
|
|
snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
|
|
|
|
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotFlushAccountItemMeter.Mark(1)
|
|
|
|
snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
|
2021-03-30 19:04:22 +03:00
|
|
|
|
|
|
|
// Ensure we don't write too much data blindly. It's ok to flush, the
|
|
|
|
// root will go missing in case of a crash and we'll detect and regen
|
|
|
|
// the snapshot.
|
|
|
|
if batch.ValueSize() > ethdb.IdealBatchSize {
|
|
|
|
if err := batch.Write(); err != nil {
|
|
|
|
log.Crit("Failed to write storage deletions", "err", err)
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
}
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
|
|
|
// Push all the storage slots into the database
|
|
|
|
for accountHash, storage := range bottom.storageData {
|
2019-11-26 10:48:29 +03:00
|
|
|
// Skip any account not covered yet by the snapshot
|
|
|
|
if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Generation might be mid-account, track that case too
|
|
|
|
midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength])
|
|
|
|
|
2019-10-17 18:30:31 +03:00
|
|
|
for storageHash, data := range storage {
|
2019-11-26 10:48:29 +03:00
|
|
|
// Skip any slot not covered yet by the snapshot
|
|
|
|
if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-17 18:30:31 +03:00
|
|
|
if len(data) > 0 {
|
|
|
|
rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
|
2019-11-25 17:30:29 +03:00
|
|
|
base.cache.Set(append(accountHash[:], storageHash[:]...), data)
|
2019-12-02 14:27:20 +03:00
|
|
|
snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
|
2019-10-17 18:30:31 +03:00
|
|
|
} else {
|
|
|
|
rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
|
2019-11-25 17:30:29 +03:00
|
|
|
base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotFlushStorageItemMeter.Mark(1)
|
|
|
|
snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Update the snapshot block marker and write any remainder data
|
2019-11-22 14:23:49 +03:00
|
|
|
rawdb.WriteSnapshotRoot(batch, bottom.root)
|
2020-10-29 22:01:58 +03:00
|
|
|
|
2020-11-09 17:03:58 +03:00
|
|
|
// Write out the generator progress marker and report
|
|
|
|
journalProgress(batch, base.genMarker, stats)
|
2020-10-29 22:01:58 +03:00
|
|
|
|
|
|
|
// Flush all the updates in the single db operation. Ensure the
|
|
|
|
// disk layer transition is atomic.
|
2019-10-17 18:30:31 +03:00
|
|
|
if err := batch.Write(); err != nil {
|
|
|
|
log.Crit("Failed to write leftover snapshot", "err", err)
|
|
|
|
}
|
2020-10-29 22:01:58 +03:00
|
|
|
log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
|
2019-11-26 10:48:29 +03:00
|
|
|
res := &diskLayer{
|
2020-03-03 10:10:23 +03:00
|
|
|
root: bottom.root,
|
|
|
|
cache: base.cache,
|
|
|
|
diskdb: base.diskdb,
|
|
|
|
triedb: base.triedb,
|
|
|
|
genMarker: base.genMarker,
|
|
|
|
genPending: base.genPending,
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
// If snapshot generation hasn't finished yet, port over all the starts and
|
|
|
|
// continue where the previous round left off.
|
|
|
|
//
|
|
|
|
// Note, the `base.genAbort` comparison is not used normally, it's checked
|
|
|
|
// to allow the tests to play with the marker without triggering this path.
|
|
|
|
if base.genMarker != nil && base.genAbort != nil {
|
|
|
|
res.genMarker = base.genMarker
|
|
|
|
res.genAbort = make(chan chan *generatorStats)
|
|
|
|
go res.generate(stats)
|
|
|
|
}
|
|
|
|
return res
|
2019-10-17 18:30:31 +03:00
|
|
|
}
|
|
|
|
|
2019-12-02 14:27:20 +03:00
|
|
|
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
2019-08-06 13:40:28 +03:00
|
|
|
// This is meant to be used during shutdown to persist the snapshot without
|
|
|
|
// flattening everything down (bad for reorgs).
|
2019-11-26 10:48:29 +03:00
|
|
|
//
|
|
|
|
// The method returns the root hash of the base layer that needs to be persisted
|
|
|
|
// to disk as a trie too to allow continuing any pending generation op.
|
2019-12-02 14:27:20 +03:00
|
|
|
func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
2019-11-22 14:23:49 +03:00
|
|
|
// Retrieve the head snapshot to journal from var snap snapshot
|
2019-11-26 10:48:29 +03:00
|
|
|
snap := t.Snapshot(root)
|
2019-11-22 14:23:49 +03:00
|
|
|
if snap == nil {
|
2019-11-26 10:48:29 +03:00
|
|
|
return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
// Run the journaling
|
2019-11-22 14:23:49 +03:00
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
2019-08-06 13:40:28 +03:00
|
|
|
|
2020-10-29 22:01:58 +03:00
|
|
|
// Firstly write out the metadata of journal
|
2019-12-02 14:27:20 +03:00
|
|
|
journal := new(bytes.Buffer)
|
2020-10-29 22:01:58 +03:00
|
|
|
if err := rlp.Encode(journal, journalVersion); err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
|
|
|
diskroot := t.diskRoot()
|
|
|
|
if diskroot == (common.Hash{}) {
|
|
|
|
return common.Hash{}, errors.New("invalid disk root")
|
|
|
|
}
|
|
|
|
// Secondly write out the disk layer root, ensure the
|
|
|
|
// diff journal is continuous with disk.
|
|
|
|
if err := rlp.Encode(journal, diskroot); err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
|
|
|
// Finally write out the journal of each layer in reverse order.
|
2019-12-02 14:27:20 +03:00
|
|
|
base, err := snap.(snapshot).Journal(journal)
|
2019-11-26 10:48:29 +03:00
|
|
|
if err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
2019-12-02 14:27:20 +03:00
|
|
|
// Store the journal into the database and return
|
|
|
|
rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
|
|
|
|
return base, nil
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
2019-11-26 10:48:29 +03:00
|
|
|
// Rebuild wipes all available snapshot data from the persistent database and
|
|
|
|
// discard all caches and diff layers. Afterwards, it starts a new snapshot
|
|
|
|
// generator with the given root hash.
|
|
|
|
func (t *Tree) Rebuild(root common.Hash) {
|
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
|
|
|
|
2020-10-29 22:01:58 +03:00
|
|
|
// Firstly delete any recovery flag in the database. Because now we are
|
2021-04-29 17:33:45 +03:00
|
|
|
// building a brand new snapshot. Also reenable the snapshot feature.
|
2020-10-29 22:01:58 +03:00
|
|
|
rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
|
2021-04-29 17:33:45 +03:00
|
|
|
rawdb.DeleteSnapshotDisabled(t.diskdb)
|
2020-10-29 22:01:58 +03:00
|
|
|
|
2019-11-26 10:48:29 +03:00
|
|
|
// Iterate over and mark all layers stale
|
|
|
|
for _, layer := range t.layers {
|
|
|
|
switch layer := layer.(type) {
|
|
|
|
case *diskLayer:
|
|
|
|
// If the base layer is generating, abort it and save
|
|
|
|
if layer.genAbort != nil {
|
|
|
|
abort := make(chan *generatorStats)
|
|
|
|
layer.genAbort <- abort
|
core, eth: faster snapshot generation (#22504)
* eth/protocols: persist received state segments
* core: initial implementation
* core/state/snapshot: add tests
* core, eth: updates
* eth/protocols/snapshot: count flat state size
* core/state: add metrics
* core/state/snapshot: skip unnecessary deletion
* core/state/snapshot: rename
* core/state/snapshot: use the global batch
* core/state/snapshot: add logs and fix wiping
* core/state/snapshot: fix
* core/state/snapshot: save generation progress even if the batch is empty
* core/state/snapshot: fixes
* core/state/snapshot: fix initial account range length
* core/state/snapshot: fix initial account range
* eth/protocols/snap: store flat states during the healing
* eth/protocols/snap: print logs
* core/state/snapshot: refactor (#4)
* core/state/snapshot: refactor
* core/state/snapshot: tiny fix and polish
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
* core, eth: fixes
* core, eth: fix healing writer
* core, trie, eth: fix paths
* eth/protocols/snap: fix encoding
* eth, core: add debug log
* core/state/generate: release iterator asap (#5)
core/state/snapshot: less copy
core/state/snapshot: revert split loop
core/state/snapshot: handle storage becoming empty, improve test robustness
core/state: test modified codehash
core/state/snapshot: polish
* core/state/snapshot: optimize stats counter
* core, eth: add metric
* core/state/snapshot: update comments
* core/state/snapshot: improve tests
* core/state/snapshot: replace secure trie with standard trie
* core/state/snapshot: wrap return as the struct
* core/state/snapshot: skip wiping correct states
* core/state/snapshot: updates
* core/state/snapshot: fixes
* core/state/snapshot: fix panic due to reference flaw in closure
* core/state/snapshot: fix errors in state generation logic + fix log output
* core/state/snapshot: remove an error case
* core/state/snapshot: fix condition-check for exhausted snap state
* core/state/snapshot: use stackTrie for small tries
* core/state/snapshot: don't resolve small storage tries in vain
* core/state/snapshot: properly clean up storage of deleted accounts
* core/state/snapshot: avoid RLP-encoding in some cases + minor nitpicks
* core/state/snapshot: fix error (+testcase)
* core/state/snapshot: clean up tests a bit
* core/state/snapshot: work in progress on better tests
* core/state/snapshot: polish code
* core/state/snapshot: fix trie iteration abortion trigger
* core/state/snapshot: fixes flaws
* core/state/snapshot: remove panic
* core/state/snapshot: fix abort
* core/state/snapshot: more tests (plus failing testcase)
* core/state/snapshot: more testcases + fix for failing test
* core/state/snapshot: testcase for malformed data
* core/state/snapshot: some test nitpicks
* core/state/snapshot: improvements to logging
* core/state/snapshot: testcase to demo error in abortion
* core/state/snapshot: fix abortion
* cmd/geth: make verify-state report the root
* trie: fix failing test
* core/state/snapshot: add timer metrics
* core/state/snapshot: fix metrics
* core/state/snapshot: udpate tests
* eth/protocols/snap: write snapshot account even if code or state is needed
* core/state/snapshot: fix diskmore check
* core/state/snapshot: review fixes
* core/state/snapshot: improve error message
* cmd/geth: rename 'error' to 'err' in logs
* core/state/snapshot: fix some review concerns
* core/state/snapshot, eth/protocols/snap: clear snapshot marker when starting/resuming snap sync
* core: add error log
* core/state/snapshot: use proper timers for metrics collection
* core/state/snapshot: address some review concerns
* eth/protocols/snap: improved log message
* eth/protocols/snap: fix heal logs to condense infos
* core/state/snapshot: wait for generator termination before restarting
* core/state/snapshot: revert timers to counters to track total time
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-04-14 23:23:11 +03:00
|
|
|
<-abort
|
2019-11-26 10:48:29 +03:00
|
|
|
}
|
|
|
|
// Layer should be inactive now, mark it as stale
|
|
|
|
layer.lock.Lock()
|
|
|
|
layer.stale = true
|
|
|
|
layer.lock.Unlock()
|
|
|
|
|
|
|
|
case *diffLayer:
|
|
|
|
// If the layer is a simple diff, simply mark as stale
|
|
|
|
layer.lock.Lock()
|
2020-01-19 22:57:56 +03:00
|
|
|
atomic.StoreUint32(&layer.stale, 1)
|
2019-11-26 10:48:29 +03:00
|
|
|
layer.lock.Unlock()
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown layer type: %T", layer))
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
}
|
2021-01-07 09:36:21 +03:00
|
|
|
// Start generating a new snapshot from scratch on a background thread. The
|
2019-11-26 10:48:29 +03:00
|
|
|
// generator will run a wiper first if there's not one running right now.
|
|
|
|
log.Info("Rebuilding state snapshot")
|
|
|
|
t.layers = map[common.Hash]snapshot{
|
2022-09-23 21:20:36 +03:00
|
|
|
root: generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, root),
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
}
|
2019-12-10 12:00:03 +03:00
|
|
|
|
|
|
|
// AccountIterator creates a new account iterator for the specified root hash and
|
|
|
|
// seeks to a starting account hash.
|
|
|
|
func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
|
2020-10-28 15:27:37 +03:00
|
|
|
ok, err := t.generating()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if ok {
|
|
|
|
return nil, ErrNotConstructed
|
|
|
|
}
|
2019-12-10 12:00:03 +03:00
|
|
|
return newFastAccountIterator(t, root, seek)
|
|
|
|
}
|
2020-04-29 12:53:08 +03:00
|
|
|
|
|
|
|
// StorageIterator creates a new storage iterator for the specified root hash and
|
|
|
|
// account. The iterator will be move to the specific start position.
|
|
|
|
func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
|
2020-10-28 15:27:37 +03:00
|
|
|
ok, err := t.generating()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if ok {
|
|
|
|
return nil, ErrNotConstructed
|
|
|
|
}
|
2020-04-29 12:53:08 +03:00
|
|
|
return newFastStorageIterator(t, root, account, seek)
|
|
|
|
}
|
2020-10-28 15:27:37 +03:00
|
|
|
|
all: bloom-filter based pruning mechanism (#21724)
* cmd, core, tests: initial state pruner
core: fix db inspector
cmd/geth: add verify-state
cmd/geth: add verification tool
core/rawdb: implement flatdb
cmd, core: fix rebase
core/state: use new contract code layout
core/state/pruner: avoid deleting genesis state
cmd/geth: add helper function
core, cmd: fix extract genesis
core: minor fixes
contracts: remove useless
core/state/snapshot: plugin stacktrie
core: polish
core/state/snapshot: iterate storage concurrently
core/state/snapshot: fix iteration
core: add comments
core/state/snapshot: polish code
core/state: polish
core/state/snapshot: rebase
core/rawdb: add comments
core/rawdb: fix tests
core/rawdb: improve tests
core/state/snapshot: fix concurrent iteration
core/state: run pruning during the recovery
core, trie: implement martin's idea
core, eth: delete flatdb and polish pruner
trie: fix import
core/state/pruner: add log
core/state/pruner: fix issues
core/state/pruner: don't read back
core/state/pruner: fix contract code write
core/state/pruner: check root node presence
cmd, core: polish log
core/state: use HEAD-127 as the target
core/state/snapshot: improve tests
cmd/geth: fix verification tool
cmd/geth: use HEAD as the verification default target
all: replace the bloomfilter with martin's fork
cmd, core: polish code
core, cmd: forcibly delete state root
core/state/pruner: add hash64
core/state/pruner: fix blacklist
core/state: remove blacklist
cmd, core: delete trie clean cache before pruning
cmd, core: fix lint
cmd, core: fix rebase
core/state: fix the special case for clique networks
core/state/snapshot: remove useless code
core/state/pruner: capping the snapshot after pruning
cmd, core, eth: fixes
core/rawdb: update db inspector
cmd/geth: polish code
core/state/pruner: fsync bloom filter
cmd, core: print warning log
core/state/pruner: adjust the parameters for bloom filter
cmd, core: create the bloom filter by size
core: polish
core/state/pruner: sanitize invalid bloomfilter size
cmd: address comments
cmd/geth: address comments
cmd/geth: address comment
core/state/pruner: address comments
core/state/pruner: rename homedir to datadir
cmd, core: address comments
core/state/pruner: address comment
core/state: address comments
core, cmd, tests: address comments
core: address comments
core/state/pruner: release the iterator after each commit
core/state/pruner: improve pruner
cmd, core: adjust bloom paramters
core/state/pruner: fix lint
core/state/pruner: fix tests
core: fix rebase
core/state/pruner: remove atomic rename
core/state/pruner: address comments
all: run go mod tidy
core/state/pruner: avoid false-positive for the middle state roots
core/state/pruner: add checks for middle roots
cmd/geth: replace crit with error
* core/state/pruner: fix lint
* core: drop legacy bloom filter
* core/state/snapshot: improve pruner
* core/state/snapshot: polish concurrent logs to report ETA vs. hashes
* core/state/pruner: add progress report for pruning and compaction too
* core: fix snapshot test API
* core/state: fix some pruning logs
* core/state/pruner: support recovering from bloom flush fail
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 14:16:30 +03:00
|
|
|
// Verify iterates the whole state(all the accounts as well as the corresponding storages)
|
|
|
|
// with the specific root and compares the re-computed hash with the original one.
|
|
|
|
func (t *Tree) Verify(root common.Hash) error {
|
|
|
|
acctIt, err := t.AccountIterator(root, common.Hash{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer acctIt.Release()
|
|
|
|
|
|
|
|
got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
|
|
|
|
storageIt, err := t.StorageIterator(root, accountHash, common.Hash{})
|
|
|
|
if err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
|
|
|
defer storageIt.Release()
|
|
|
|
|
|
|
|
hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
|
|
|
|
if err != nil {
|
|
|
|
return common.Hash{}, err
|
|
|
|
}
|
|
|
|
return hash, nil
|
|
|
|
}, newGenerateStats(), true)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if got != root {
|
|
|
|
return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-28 15:27:37 +03:00
|
|
|
// disklayer is an internal helper function to return the disk layer.
|
|
|
|
// The lock of snapTree is assumed to be held already.
|
|
|
|
func (t *Tree) disklayer() *diskLayer {
|
|
|
|
var snap snapshot
|
|
|
|
for _, s := range t.layers {
|
|
|
|
snap = s
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if snap == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
switch layer := snap.(type) {
|
|
|
|
case *diskLayer:
|
|
|
|
return layer
|
|
|
|
case *diffLayer:
|
|
|
|
return layer.origin
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("%T: undefined layer", snap))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-29 22:01:58 +03:00
|
|
|
// diskRoot is a internal helper function to return the disk layer root.
|
|
|
|
// The lock of snapTree is assumed to be held already.
|
|
|
|
func (t *Tree) diskRoot() common.Hash {
|
|
|
|
disklayer := t.disklayer()
|
|
|
|
if disklayer == nil {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return disklayer.Root()
|
|
|
|
}
|
|
|
|
|
2020-10-28 15:27:37 +03:00
|
|
|
// generating is an internal helper function which reports whether the snapshot
|
|
|
|
// is still under the construction.
|
|
|
|
func (t *Tree) generating() (bool, error) {
|
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
|
|
|
|
|
|
|
layer := t.disklayer()
|
|
|
|
if layer == nil {
|
|
|
|
return false, errors.New("disk layer is missing")
|
|
|
|
}
|
|
|
|
layer.lock.RLock()
|
|
|
|
defer layer.lock.RUnlock()
|
|
|
|
return layer.genMarker != nil, nil
|
|
|
|
}
|
2020-10-29 22:01:58 +03:00
|
|
|
|
|
|
|
// diskRoot is a external helper function to return the disk layer root.
|
|
|
|
func (t *Tree) DiskRoot() common.Hash {
|
|
|
|
t.lock.Lock()
|
|
|
|
defer t.lock.Unlock()
|
|
|
|
|
|
|
|
return t.diskRoot()
|
|
|
|
}
|