2019-08-06 13:40:28 +03:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package snapshot
|
|
|
|
|
|
|
|
import (
|
2019-11-26 10:48:29 +03:00
|
|
|
"encoding/binary"
|
2019-08-06 13:40:28 +03:00
|
|
|
"fmt"
|
2019-11-26 10:48:29 +03:00
|
|
|
"math"
|
2019-12-02 11:31:07 +03:00
|
|
|
"math/rand"
|
2019-08-06 13:40:28 +03:00
|
|
|
"sort"
|
|
|
|
"sync"
|
2020-01-19 22:57:56 +03:00
|
|
|
"sync/atomic"
|
2019-11-26 10:48:29 +03:00
|
|
|
"time"
|
2019-08-06 13:40:28 +03:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2019-11-26 10:48:29 +03:00
|
|
|
"github.com/steakknife/bloomfilter"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// aggregatorMemoryLimit is the maximum size of the bottom-most diff layer
|
|
|
|
// that aggregates the writes from above until it's flushed into the disk
|
|
|
|
// layer.
|
|
|
|
//
|
|
|
|
// Note, bumping this up might drastically increase the size of the bloom
|
|
|
|
// filters that's stored in every diff layer. Don't do that without fully
|
|
|
|
// understanding all the implications.
|
|
|
|
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
|
|
|
|
|
|
|
|
// aggregatorItemLimit is an approximate number of items that will end up
|
|
|
|
// in the agregator layer before it's flushed out to disk. A plain account
|
2019-12-03 11:00:26 +03:00
|
|
|
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
|
2021-01-07 09:36:21 +03:00
|
|
|
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
|
2019-12-03 11:00:26 +03:00
|
|
|
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
|
|
|
|
// smaller number to be on the safe side.
|
|
|
|
aggregatorItemLimit = aggregatorMemoryLimit / 42
|
2019-11-26 10:48:29 +03:00
|
|
|
|
|
|
|
// bloomTargetError is the target false positive rate when the aggregator
|
|
|
|
// layer is at its fullest. The actual value will probably move around up
|
|
|
|
// and down from this number, it's mostly a ballpark figure.
|
|
|
|
//
|
|
|
|
// Note, dropping this down might drastically increase the size of the bloom
|
|
|
|
// filters that's stored in every diff layer. Don't do that without fully
|
|
|
|
// understanding all the implications.
|
|
|
|
bloomTargetError = 0.02
|
|
|
|
|
|
|
|
// bloomSize is the ideal bloom filter size given the maximum number of items
|
|
|
|
// it's expected to hold and the target false positive error rate.
|
|
|
|
bloomSize = math.Ceil(float64(aggregatorItemLimit) * math.Log(bloomTargetError) / math.Log(1/math.Pow(2, math.Log(2))))
|
|
|
|
|
|
|
|
// bloomFuncs is the ideal number of bits a single entry should set in the
|
|
|
|
// bloom filter to keep its size to a minimum (given it's size and maximum
|
|
|
|
// entry count).
|
|
|
|
bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2))
|
2019-12-02 11:31:07 +03:00
|
|
|
|
2020-03-03 16:52:00 +03:00
|
|
|
// the bloom offsets are runtime constants which determines which part of the
|
2019-12-02 11:31:07 +03:00
|
|
|
// the account/storage hash the hasher functions looks at, to determine the
|
|
|
|
// bloom key for an account/slot. This is randomized at init(), so that the
|
|
|
|
// global population of nodes do not all display the exact same behaviour with
|
|
|
|
// regards to bloom content
|
2020-03-03 16:52:00 +03:00
|
|
|
bloomDestructHasherOffset = 0
|
|
|
|
bloomAccountHasherOffset = 0
|
|
|
|
bloomStorageHasherOffset = 0
|
2019-08-06 13:40:28 +03:00
|
|
|
)
|
|
|
|
|
2019-12-02 11:31:07 +03:00
|
|
|
func init() {
|
2020-03-03 16:52:00 +03:00
|
|
|
// Init the bloom offsets in the range [0:24] (requires 8 bytes)
|
|
|
|
bloomDestructHasherOffset = rand.Intn(25)
|
|
|
|
bloomAccountHasherOffset = rand.Intn(25)
|
|
|
|
bloomStorageHasherOffset = rand.Intn(25)
|
|
|
|
|
|
|
|
// The destruct and account blooms must be different, as the storage slots
|
|
|
|
// will check for destruction too for every bloom miss. It should not collide
|
|
|
|
// with modified accounts.
|
|
|
|
for bloomAccountHasherOffset == bloomDestructHasherOffset {
|
|
|
|
bloomAccountHasherOffset = rand.Intn(25)
|
|
|
|
}
|
2019-12-02 11:31:07 +03:00
|
|
|
}
|
|
|
|
|
2019-08-06 13:40:28 +03:00
|
|
|
// diffLayer represents a collection of modifications made to a state snapshot
|
|
|
|
// after running a block on top. It contains one sorted list for the account trie
|
|
|
|
// and one-one list for each storage tries.
|
|
|
|
//
|
|
|
|
// The goal of a diff layer is to act as a journal, tracking recent modifications
|
|
|
|
// made to the state, that have not yet graduated into a semi-immutable state.
|
|
|
|
type diffLayer struct {
|
2019-11-26 10:48:29 +03:00
|
|
|
origin *diskLayer // Base disk layer to directly use on bloom misses
|
|
|
|
parent snapshot // Parent snapshot modified by this one, never nil
|
|
|
|
memory uint64 // Approximate guess as to how much memory we use
|
2019-08-06 13:40:28 +03:00
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
root common.Hash // Root hash to which this snapshot diff belongs to
|
2020-01-19 22:57:56 +03:00
|
|
|
stale uint32 // Signals that the layer became stale (state progressed)
|
2019-08-06 13:40:28 +03:00
|
|
|
|
2020-04-29 12:53:08 +03:00
|
|
|
// destructSet is a very special helper marker. If an account is marked as
|
|
|
|
// deleted, then it's recorded in this set. However it's allowed that an account
|
|
|
|
// is included here but still available in other sets(e.g. storageData). The
|
|
|
|
// reason is the diff layer includes all the changes in a *block*. It can
|
|
|
|
// happen that in the tx_1, account A is self-destructed while in the tx_2
|
|
|
|
// it's recreated. But we still need this marker to indicate the "old" A is
|
|
|
|
// deleted, all data in other set belongs to the "new" A.
|
2020-03-03 16:52:00 +03:00
|
|
|
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
|
2019-10-04 16:24:01 +03:00
|
|
|
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
2021-01-07 09:36:21 +03:00
|
|
|
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
|
2019-10-04 16:24:01 +03:00
|
|
|
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
2021-01-07 09:36:21 +03:00
|
|
|
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
2019-08-06 13:40:28 +03:00
|
|
|
|
2019-11-26 10:48:29 +03:00
|
|
|
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
|
|
|
|
|
2019-08-06 13:40:28 +03:00
|
|
|
lock sync.RWMutex
|
|
|
|
}
|
|
|
|
|
2020-03-03 16:52:00 +03:00
|
|
|
// destructBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
|
|
|
// API requirements of the bloom library used. It's used to convert a destruct
|
|
|
|
// event into a 64 bit mini hash.
|
|
|
|
type destructBloomHasher common.Hash
|
|
|
|
|
|
|
|
func (h destructBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
|
|
|
|
func (h destructBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
|
|
|
|
func (h destructBloomHasher) Reset() { panic("not implemented") }
|
|
|
|
func (h destructBloomHasher) BlockSize() int { panic("not implemented") }
|
|
|
|
func (h destructBloomHasher) Size() int { return 8 }
|
|
|
|
func (h destructBloomHasher) Sum64() uint64 {
|
|
|
|
return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8])
|
|
|
|
}
|
|
|
|
|
2019-11-26 10:48:29 +03:00
|
|
|
// accountBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
|
|
|
// API requirements of the bloom library used. It's used to convert an account
|
|
|
|
// hash into a 64 bit mini hash.
|
|
|
|
type accountBloomHasher common.Hash
|
|
|
|
|
|
|
|
func (h accountBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
|
|
|
|
func (h accountBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
|
|
|
|
func (h accountBloomHasher) Reset() { panic("not implemented") }
|
|
|
|
func (h accountBloomHasher) BlockSize() int { panic("not implemented") }
|
|
|
|
func (h accountBloomHasher) Size() int { return 8 }
|
|
|
|
func (h accountBloomHasher) Sum64() uint64 {
|
2020-03-03 16:52:00 +03:00
|
|
|
return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8])
|
2019-11-26 10:48:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// storageBloomHasher is a wrapper around a [2]common.Hash to satisfy the interface
|
|
|
|
// API requirements of the bloom library used. It's used to convert an account
|
|
|
|
// hash into a 64 bit mini hash.
|
|
|
|
type storageBloomHasher [2]common.Hash
|
|
|
|
|
|
|
|
func (h storageBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
|
|
|
|
func (h storageBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
|
|
|
|
func (h storageBloomHasher) Reset() { panic("not implemented") }
|
|
|
|
func (h storageBloomHasher) BlockSize() int { panic("not implemented") }
|
|
|
|
func (h storageBloomHasher) Size() int { return 8 }
|
|
|
|
func (h storageBloomHasher) Sum64() uint64 {
|
2020-03-03 16:52:00 +03:00
|
|
|
return binary.BigEndian.Uint64(h[0][bloomStorageHasherOffset:bloomStorageHasherOffset+8]) ^
|
|
|
|
binary.BigEndian.Uint64(h[1][bloomStorageHasherOffset:bloomStorageHasherOffset+8])
|
2019-11-26 10:48:29 +03:00
|
|
|
}
|
|
|
|
|
2019-08-06 13:40:28 +03:00
|
|
|
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
|
|
|
// level persistent database or a hierarchical diff already.
|
2020-03-03 16:52:00 +03:00
|
|
|
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
2019-08-06 13:40:28 +03:00
|
|
|
// Create the new layer with some pre-allocated data segments
|
|
|
|
dl := &diffLayer{
|
|
|
|
parent: parent,
|
|
|
|
root: root,
|
2020-03-03 16:52:00 +03:00
|
|
|
destructSet: destructs,
|
2019-08-06 13:40:28 +03:00
|
|
|
accountData: accounts,
|
|
|
|
storageData: storage,
|
2020-04-29 12:53:08 +03:00
|
|
|
storageList: make(map[common.Hash][]common.Hash),
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
switch parent := parent.(type) {
|
|
|
|
case *diskLayer:
|
|
|
|
dl.rebloom(parent)
|
|
|
|
case *diffLayer:
|
|
|
|
dl.rebloom(parent.origin)
|
|
|
|
default:
|
|
|
|
panic("unknown parent type")
|
|
|
|
}
|
2020-03-03 16:52:00 +03:00
|
|
|
// Sanity check that accounts or storage slots are never nil
|
|
|
|
for accountHash, blob := range accounts {
|
|
|
|
if blob == nil {
|
|
|
|
panic(fmt.Sprintf("account %#x nil", accountHash))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for accountHash, slots := range storage {
|
|
|
|
if slots == nil {
|
|
|
|
panic(fmt.Sprintf("storage %#x nil", accountHash))
|
|
|
|
}
|
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
// Determine memory size and track the dirty writes
|
2019-10-04 16:24:01 +03:00
|
|
|
for _, data := range accounts {
|
2019-11-26 10:48:29 +03:00
|
|
|
dl.memory += uint64(common.HashLength + len(data))
|
|
|
|
snapshotDirtyAccountWriteMeter.Mark(int64(len(data)))
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
2020-03-03 16:52:00 +03:00
|
|
|
// Determine memory size and track the dirty writes
|
|
|
|
for _, slots := range storage {
|
2019-10-04 16:24:01 +03:00
|
|
|
for _, data := range slots {
|
2019-11-26 10:48:29 +03:00
|
|
|
dl.memory += uint64(common.HashLength + len(data))
|
|
|
|
snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
}
|
2020-04-29 12:53:08 +03:00
|
|
|
dl.memory += uint64(len(destructs) * common.HashLength)
|
2019-08-06 13:40:28 +03:00
|
|
|
return dl
|
|
|
|
}
|
|
|
|
|
2019-11-26 10:48:29 +03:00
|
|
|
// rebloom discards the layer's current bloom and rebuilds it from scratch based
|
|
|
|
// on the parent's and the local diffs.
|
|
|
|
func (dl *diffLayer) rebloom(origin *diskLayer) {
|
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
|
|
|
|
|
|
|
defer func(start time.Time) {
|
|
|
|
snapshotBloomIndexTimer.Update(time.Since(start))
|
|
|
|
}(time.Now())
|
|
|
|
|
|
|
|
// Inject the new origin that triggered the rebloom
|
|
|
|
dl.origin = origin
|
|
|
|
|
|
|
|
// Retrieve the parent bloom or create a fresh empty one
|
|
|
|
if parent, ok := dl.parent.(*diffLayer); ok {
|
|
|
|
parent.lock.RLock()
|
|
|
|
dl.diffed, _ = parent.diffed.Copy()
|
|
|
|
parent.lock.RUnlock()
|
|
|
|
} else {
|
|
|
|
dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs))
|
|
|
|
}
|
|
|
|
// Iterate over all the accounts and storage slots and index them
|
2020-03-03 16:52:00 +03:00
|
|
|
for hash := range dl.destructSet {
|
|
|
|
dl.diffed.Add(destructBloomHasher(hash))
|
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
for hash := range dl.accountData {
|
|
|
|
dl.diffed.Add(accountBloomHasher(hash))
|
|
|
|
}
|
|
|
|
for accountHash, slots := range dl.storageData {
|
|
|
|
for storageHash := range slots {
|
|
|
|
dl.diffed.Add(storageBloomHasher{accountHash, storageHash})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Calculate the current false positive rate and update the error rate meter.
|
|
|
|
// This is a bit cheating because subsequent layers will overwrite it, but it
|
|
|
|
// should be fine, we're only interested in ballpark figures.
|
|
|
|
k := float64(dl.diffed.K())
|
|
|
|
n := float64(dl.diffed.N())
|
|
|
|
m := float64(dl.diffed.M())
|
|
|
|
snapshotBloomErrorGauge.Update(math.Pow(1.0-math.Exp((-k)*(n+0.5)/(m-1)), k))
|
|
|
|
}
|
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
// Root returns the root hash for which this snapshot was made.
|
|
|
|
func (dl *diffLayer) Root() common.Hash {
|
|
|
|
return dl.root
|
|
|
|
}
|
|
|
|
|
2019-12-10 12:00:03 +03:00
|
|
|
// Parent returns the subsequent layer of a diff layer.
|
|
|
|
func (dl *diffLayer) Parent() snapshot {
|
|
|
|
return dl.parent
|
|
|
|
}
|
|
|
|
|
2019-11-22 14:23:49 +03:00
|
|
|
// Stale return whether this layer has become stale (was flattened across) or if
|
|
|
|
// it's still live.
|
|
|
|
func (dl *diffLayer) Stale() bool {
|
2020-01-19 22:57:56 +03:00
|
|
|
return atomic.LoadUint32(&dl.stale) != 0
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Account directly retrieves the account associated with a particular hash in
|
|
|
|
// the snapshot slim data format.
|
2019-10-04 16:24:01 +03:00
|
|
|
func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
|
|
|
|
data, err := dl.AccountRLP(hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-06 13:40:28 +03:00
|
|
|
if len(data) == 0 { // can be both nil and []byte{}
|
2019-10-04 16:24:01 +03:00
|
|
|
return nil, nil
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
account := new(Account)
|
|
|
|
if err := rlp.DecodeBytes(data, account); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2019-10-04 16:24:01 +03:00
|
|
|
return account, nil
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// AccountRLP directly retrieves the account RLP associated with a particular
|
|
|
|
// hash in the snapshot slim data format.
|
2020-04-29 12:53:08 +03:00
|
|
|
//
|
|
|
|
// Note the returned account is not a copy, please don't modify it.
|
2019-10-04 16:24:01 +03:00
|
|
|
func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
|
2019-11-26 10:48:29 +03:00
|
|
|
// Check the bloom filter first whether there's even a point in reaching into
|
|
|
|
// all the maps in all the layers below
|
|
|
|
dl.lock.RLock()
|
|
|
|
hit := dl.diffed.Contains(accountBloomHasher(hash))
|
2020-03-03 16:52:00 +03:00
|
|
|
if !hit {
|
|
|
|
hit = dl.diffed.Contains(destructBloomHasher(hash))
|
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
dl.lock.RUnlock()
|
|
|
|
|
|
|
|
// If the bloom filter misses, don't even bother with traversing the memory
|
|
|
|
// diff layers, reach straight into the bottom persistent disk layer
|
|
|
|
if !hit {
|
|
|
|
snapshotBloomAccountMissMeter.Mark(1)
|
|
|
|
return dl.origin.AccountRLP(hash)
|
|
|
|
}
|
|
|
|
// The bloom filter hit, start poking in the internal maps
|
2019-12-03 11:00:26 +03:00
|
|
|
return dl.accountRLP(hash, 0)
|
2019-11-26 10:48:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// accountRLP is an internal version of AccountRLP that skips the bloom filter
|
|
|
|
// checks and uses the internal maps to try and retrieve the data. It's meant
|
|
|
|
// to be used if a higher layer's bloom filter hit already.
|
2019-12-03 11:00:26 +03:00
|
|
|
func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
|
2019-08-06 13:40:28 +03:00
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
2019-10-04 16:24:01 +03:00
|
|
|
// If the layer was flattened into, consider it invalid (any live reference to
|
|
|
|
// the original should be marked as unusable).
|
2020-01-19 22:57:56 +03:00
|
|
|
if dl.Stale() {
|
2019-10-04 16:24:01 +03:00
|
|
|
return nil, ErrSnapshotStale
|
|
|
|
}
|
2020-03-03 16:52:00 +03:00
|
|
|
// If the account is known locally, return it
|
2019-08-06 13:40:28 +03:00
|
|
|
if data, ok := dl.accountData[hash]; ok {
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotDirtyAccountHitMeter.Mark(1)
|
2019-12-03 11:00:26 +03:00
|
|
|
snapshotDirtyAccountHitDepthHist.Update(int64(depth))
|
2020-03-03 16:52:00 +03:00
|
|
|
snapshotDirtyAccountReadMeter.Mark(int64(len(data)))
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotBloomAccountTrueHitMeter.Mark(1)
|
2019-10-04 16:24:01 +03:00
|
|
|
return data, nil
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
2020-03-03 16:52:00 +03:00
|
|
|
// If the account is known locally, but deleted, return it
|
|
|
|
if _, ok := dl.destructSet[hash]; ok {
|
|
|
|
snapshotDirtyAccountHitMeter.Mark(1)
|
|
|
|
snapshotDirtyAccountHitDepthHist.Update(int64(depth))
|
|
|
|
snapshotDirtyAccountInexMeter.Mark(1)
|
|
|
|
snapshotBloomAccountTrueHitMeter.Mark(1)
|
|
|
|
return nil, nil
|
|
|
|
}
|
2019-08-06 13:40:28 +03:00
|
|
|
// Account unknown to this diff, resolve from parent
|
2019-11-26 10:48:29 +03:00
|
|
|
if diff, ok := dl.parent.(*diffLayer); ok {
|
2019-12-03 11:00:26 +03:00
|
|
|
return diff.accountRLP(hash, depth+1)
|
2019-11-26 10:48:29 +03:00
|
|
|
}
|
|
|
|
// Failed to resolve through diff layers, mark a bloom error and use the disk
|
|
|
|
snapshotBloomAccountFalseHitMeter.Mark(1)
|
2019-08-06 13:40:28 +03:00
|
|
|
return dl.parent.AccountRLP(hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Storage directly retrieves the storage data associated with a particular hash,
|
|
|
|
// within a particular account. If the slot is unknown to this diff, it's parent
|
|
|
|
// is consulted.
|
2020-04-29 12:53:08 +03:00
|
|
|
//
|
|
|
|
// Note the returned slot is not a copy, please don't modify it.
|
2019-10-04 16:24:01 +03:00
|
|
|
func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
|
2019-11-26 10:48:29 +03:00
|
|
|
// Check the bloom filter first whether there's even a point in reaching into
|
|
|
|
// all the maps in all the layers below
|
|
|
|
dl.lock.RLock()
|
|
|
|
hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash})
|
2020-03-03 16:52:00 +03:00
|
|
|
if !hit {
|
|
|
|
hit = dl.diffed.Contains(destructBloomHasher(accountHash))
|
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
dl.lock.RUnlock()
|
|
|
|
|
|
|
|
// If the bloom filter misses, don't even bother with traversing the memory
|
|
|
|
// diff layers, reach straight into the bottom persistent disk layer
|
|
|
|
if !hit {
|
|
|
|
snapshotBloomStorageMissMeter.Mark(1)
|
|
|
|
return dl.origin.Storage(accountHash, storageHash)
|
|
|
|
}
|
|
|
|
// The bloom filter hit, start poking in the internal maps
|
2019-12-03 11:00:26 +03:00
|
|
|
return dl.storage(accountHash, storageHash, 0)
|
2019-11-26 10:48:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// storage is an internal version of Storage that skips the bloom filter checks
|
|
|
|
// and uses the internal maps to try and retrieve the data. It's meant to be
|
|
|
|
// used if a higher layer's bloom filter hit already.
|
2019-12-03 11:00:26 +03:00
|
|
|
func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
|
2019-08-06 13:40:28 +03:00
|
|
|
dl.lock.RLock()
|
|
|
|
defer dl.lock.RUnlock()
|
|
|
|
|
2019-10-04 16:24:01 +03:00
|
|
|
// If the layer was flattened into, consider it invalid (any live reference to
|
|
|
|
// the original should be marked as unusable).
|
2020-01-19 22:57:56 +03:00
|
|
|
if dl.Stale() {
|
2019-10-04 16:24:01 +03:00
|
|
|
return nil, ErrSnapshotStale
|
|
|
|
}
|
2020-03-03 16:52:00 +03:00
|
|
|
// If the account is known locally, try to resolve the slot locally
|
2019-08-06 13:40:28 +03:00
|
|
|
if storage, ok := dl.storageData[accountHash]; ok {
|
|
|
|
if data, ok := storage[storageHash]; ok {
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotDirtyStorageHitMeter.Mark(1)
|
2019-12-03 11:00:26 +03:00
|
|
|
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
|
|
|
|
if n := len(data); n > 0 {
|
|
|
|
snapshotDirtyStorageReadMeter.Mark(int64(n))
|
|
|
|
} else {
|
|
|
|
snapshotDirtyStorageInexMeter.Mark(1)
|
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
snapshotBloomStorageTrueHitMeter.Mark(1)
|
2019-10-04 16:24:01 +03:00
|
|
|
return data, nil
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
}
|
2020-03-03 16:52:00 +03:00
|
|
|
// If the account is known locally, but deleted, return an empty slot
|
|
|
|
if _, ok := dl.destructSet[accountHash]; ok {
|
|
|
|
snapshotDirtyStorageHitMeter.Mark(1)
|
|
|
|
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
|
|
|
|
snapshotDirtyStorageInexMeter.Mark(1)
|
|
|
|
snapshotBloomStorageTrueHitMeter.Mark(1)
|
|
|
|
return nil, nil
|
|
|
|
}
|
2019-11-26 10:48:29 +03:00
|
|
|
// Storage slot unknown to this diff, resolve from parent
|
|
|
|
if diff, ok := dl.parent.(*diffLayer); ok {
|
2019-12-03 11:00:26 +03:00
|
|
|
return diff.storage(accountHash, storageHash, depth+1)
|
2019-11-26 10:48:29 +03:00
|
|
|
}
|
|
|
|
// Failed to resolve through diff layers, mark a bloom error and use the disk
|
|
|
|
snapshotBloomStorageFalseHitMeter.Mark(1)
|
2019-08-06 13:40:28 +03:00
|
|
|
return dl.parent.Storage(accountHash, storageHash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update creates a new layer on top of the existing snapshot diff tree with
|
|
|
|
// the specified data items.
|
2020-03-03 16:52:00 +03:00
|
|
|
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
|
|
|
return newDiffLayer(dl, blockRoot, destructs, accounts, storage)
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// flatten pushes all data from this point downwards, flattening everything into
|
|
|
|
// a single diff at the bottom. Since usually the lowermost diff is the largest,
|
2020-05-25 11:21:28 +03:00
|
|
|
// the flattening builds up from there in reverse.
|
2019-08-06 13:40:28 +03:00
|
|
|
func (dl *diffLayer) flatten() snapshot {
|
|
|
|
// If the parent is not diff, we're the first in line, return unmodified
|
|
|
|
parent, ok := dl.parent.(*diffLayer)
|
|
|
|
if !ok {
|
|
|
|
return dl
|
|
|
|
}
|
|
|
|
// Parent is a diff, flatten it first (note, apart from weird corned cases,
|
|
|
|
// flatten will realistically only ever merge 1 layer, so there's no need to
|
|
|
|
// be smarter about grouping flattens together).
|
|
|
|
parent = parent.flatten().(*diffLayer)
|
|
|
|
|
2019-10-04 16:24:01 +03:00
|
|
|
parent.lock.Lock()
|
|
|
|
defer parent.lock.Unlock()
|
|
|
|
|
|
|
|
// Before actually writing all our data to the parent, first ensure that the
|
|
|
|
// parent hasn't been 'corrupted' by someone else already flattening into it
|
2020-01-19 22:57:56 +03:00
|
|
|
if atomic.SwapUint32(&parent.stale, 1) != 0 {
|
2019-10-04 16:24:01 +03:00
|
|
|
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
|
|
|
|
}
|
2019-08-06 13:40:28 +03:00
|
|
|
// Overwrite all the updated accounts blindly, merge the sorted list
|
2020-03-03 16:52:00 +03:00
|
|
|
for hash := range dl.destructSet {
|
|
|
|
parent.destructSet[hash] = struct{}{}
|
|
|
|
delete(parent.accountData, hash)
|
|
|
|
delete(parent.storageData, hash)
|
|
|
|
}
|
2019-08-06 13:40:28 +03:00
|
|
|
for hash, data := range dl.accountData {
|
|
|
|
parent.accountData[hash] = data
|
|
|
|
}
|
2019-12-10 12:00:03 +03:00
|
|
|
// Overwrite all the updated storage slots (individually)
|
2019-08-06 13:40:28 +03:00
|
|
|
for accountHash, storage := range dl.storageData {
|
2020-03-03 16:52:00 +03:00
|
|
|
// If storage didn't exist (or was deleted) in the parent, overwrite blindly
|
|
|
|
if _, ok := parent.storageData[accountHash]; !ok {
|
2019-08-06 13:40:28 +03:00
|
|
|
parent.storageData[accountHash] = storage
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Storage exists in both parent and child, merge the slots
|
|
|
|
comboData := parent.storageData[accountHash]
|
|
|
|
for storageHash, data := range storage {
|
|
|
|
comboData[storageHash] = data
|
|
|
|
}
|
|
|
|
parent.storageData[accountHash] = comboData
|
|
|
|
}
|
|
|
|
// Return the combo parent
|
2019-10-04 16:24:01 +03:00
|
|
|
return &diffLayer{
|
|
|
|
parent: parent.parent,
|
2019-12-01 22:49:00 +03:00
|
|
|
origin: parent.origin,
|
2019-10-04 16:24:01 +03:00
|
|
|
root: dl.root,
|
2020-03-03 16:52:00 +03:00
|
|
|
destructSet: parent.destructSet,
|
2019-10-04 16:24:01 +03:00
|
|
|
accountData: parent.accountData,
|
2019-12-10 12:00:03 +03:00
|
|
|
storageData: parent.storageData,
|
|
|
|
storageList: make(map[common.Hash][]common.Hash),
|
2019-11-26 10:48:29 +03:00
|
|
|
diffed: dl.diffed,
|
2019-10-04 16:24:01 +03:00
|
|
|
memory: parent.memory + dl.memory,
|
|
|
|
}
|
2019-08-06 13:40:28 +03:00
|
|
|
}
|
|
|
|
|
2021-01-07 09:36:21 +03:00
|
|
|
// AccountList returns a sorted list of all accounts in this diffLayer, including
|
2019-12-10 12:00:03 +03:00
|
|
|
// the deleted ones.
|
|
|
|
//
|
|
|
|
// Note, the returned slice is not a copy, so do not modify it.
|
2019-10-04 16:24:01 +03:00
|
|
|
func (dl *diffLayer) AccountList() []common.Hash {
|
2019-12-10 12:00:03 +03:00
|
|
|
// If an old list already exists, return it
|
|
|
|
dl.lock.RLock()
|
|
|
|
list := dl.accountList
|
|
|
|
dl.lock.RUnlock()
|
|
|
|
|
|
|
|
if list != nil {
|
|
|
|
return list
|
|
|
|
}
|
|
|
|
// No old sorted account list exists, generate a new one
|
2019-10-04 16:24:01 +03:00
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
2019-12-10 12:00:03 +03:00
|
|
|
|
2020-03-03 16:52:00 +03:00
|
|
|
dl.accountList = make([]common.Hash, 0, len(dl.destructSet)+len(dl.accountData))
|
2019-12-10 12:00:03 +03:00
|
|
|
for hash := range dl.accountData {
|
|
|
|
dl.accountList = append(dl.accountList, hash)
|
2019-10-04 16:24:01 +03:00
|
|
|
}
|
2020-03-04 16:06:04 +03:00
|
|
|
for hash := range dl.destructSet {
|
|
|
|
if _, ok := dl.accountData[hash]; !ok {
|
|
|
|
dl.accountList = append(dl.accountList, hash)
|
|
|
|
}
|
|
|
|
}
|
2019-12-10 12:00:03 +03:00
|
|
|
sort.Sort(hashes(dl.accountList))
|
2020-04-29 12:53:08 +03:00
|
|
|
dl.memory += uint64(len(dl.accountList) * common.HashLength)
|
2019-10-04 16:24:01 +03:00
|
|
|
return dl.accountList
|
|
|
|
}
|
|
|
|
|
2021-01-07 09:36:21 +03:00
|
|
|
// StorageList returns a sorted list of all storage slot hashes in this diffLayer
|
2020-04-29 12:53:08 +03:00
|
|
|
// for the given account. If the whole storage is destructed in this layer, then
|
|
|
|
// an additional flag *destructed = true* will be returned, otherwise the flag is
|
|
|
|
// false. Besides, the returned list will include the hash of deleted storage slot.
|
|
|
|
// Note a special case is an account is deleted in a prior tx but is recreated in
|
|
|
|
// the following tx with some storage slots set. In this case the returned list is
|
|
|
|
// not empty but the flag is true.
|
2019-12-10 12:00:03 +03:00
|
|
|
//
|
|
|
|
// Note, the returned slice is not a copy, so do not modify it.
|
2020-04-29 12:53:08 +03:00
|
|
|
func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) {
|
2019-12-10 12:00:03 +03:00
|
|
|
dl.lock.RLock()
|
2020-04-29 12:53:08 +03:00
|
|
|
_, destructed := dl.destructSet[accountHash]
|
2020-05-06 17:22:38 +03:00
|
|
|
if _, ok := dl.storageData[accountHash]; !ok {
|
|
|
|
// Account not tracked by this layer
|
|
|
|
dl.lock.RUnlock()
|
|
|
|
return nil, destructed
|
|
|
|
}
|
2020-05-07 10:07:59 +03:00
|
|
|
// If an old list already exists, return it
|
2020-04-29 12:53:08 +03:00
|
|
|
if list, exist := dl.storageList[accountHash]; exist {
|
|
|
|
dl.lock.RUnlock()
|
2020-05-07 10:07:59 +03:00
|
|
|
return list, destructed // the cached list can't be nil
|
2020-04-29 12:53:08 +03:00
|
|
|
}
|
2019-12-10 12:00:03 +03:00
|
|
|
dl.lock.RUnlock()
|
|
|
|
|
|
|
|
// No old sorted account list exists, generate a new one
|
2019-10-04 16:24:01 +03:00
|
|
|
dl.lock.Lock()
|
|
|
|
defer dl.lock.Unlock()
|
2019-12-10 12:00:03 +03:00
|
|
|
|
|
|
|
storageMap := dl.storageData[accountHash]
|
|
|
|
storageList := make([]common.Hash, 0, len(storageMap))
|
2020-02-24 14:26:34 +03:00
|
|
|
for k := range storageMap {
|
2019-12-10 12:00:03 +03:00
|
|
|
storageList = append(storageList, k)
|
2019-10-04 16:24:01 +03:00
|
|
|
}
|
2019-12-10 12:00:03 +03:00
|
|
|
sort.Sort(hashes(storageList))
|
|
|
|
dl.storageList[accountHash] = storageList
|
2020-04-29 12:53:08 +03:00
|
|
|
dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength)
|
|
|
|
return storageList, destructed
|
2019-10-04 16:24:01 +03:00
|
|
|
}
|