2015-07-07 03:54:22 +03:00
// Copyright 2014 The go-ethereum Authors
2015-07-22 19:48:40 +03:00
// This file is part of the go-ethereum library.
2015-07-07 03:54:22 +03:00
//
2015-07-23 19:35:11 +03:00
// The go-ethereum library is free software: you can redistribute it and/or modify
2015-07-07 03:54:22 +03:00
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
2015-07-22 19:48:40 +03:00
// The go-ethereum library is distributed in the hope that it will be useful,
2015-07-07 03:54:22 +03:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 19:48:40 +03:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 03:54:22 +03:00
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
2015-07-22 19:48:40 +03:00
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
2015-07-07 03:54:22 +03:00
2015-07-07 06:08:16 +03:00
// Package state provides a caching layer atop the Ethereum state trie.
2014-10-31 15:43:14 +02:00
package state
2014-07-22 12:54:48 +03:00
import (
2024-03-11 14:51:58 +03:00
"errors"
2015-12-11 02:29:41 +02:00
"fmt"
2022-07-05 06:14:21 +03:00
"runtime"
2016-10-04 13:36:02 +03:00
"sort"
2022-07-05 06:14:21 +03:00
"sync"
2019-03-25 11:01:18 +03:00
"time"
2014-07-30 01:31:15 +03:00
2015-03-16 12:27:38 +02:00
"github.com/ethereum/go-ethereum/common"
2022-07-05 06:14:21 +03:00
"github.com/ethereum/go-ethereum/common/gopool"
2020-08-21 15:10:40 +03:00
"github.com/ethereum/go-ethereum/core/rawdb"
2019-08-06 13:40:28 +03:00
"github.com/ethereum/go-ethereum/core/state/snapshot"
2017-01-05 16:03:50 +03:00
"github.com/ethereum/go-ethereum/core/types"
2016-10-01 15:44:53 +03:00
"github.com/ethereum/go-ethereum/crypto"
2022-07-05 06:14:21 +03:00
"github.com/ethereum/go-ethereum/ethdb"
2017-02-22 15:10:07 +03:00
"github.com/ethereum/go-ethereum/log"
2019-03-25 11:01:18 +03:00
"github.com/ethereum/go-ethereum/metrics"
2022-11-16 12:18:52 +03:00
"github.com/ethereum/go-ethereum/params"
2015-12-11 02:29:41 +02:00
"github.com/ethereum/go-ethereum/rlp"
2015-01-08 12:47:04 +02:00
"github.com/ethereum/go-ethereum/trie"
2023-05-09 10:11:04 +03:00
"github.com/ethereum/go-ethereum/trie/trienode"
2023-07-11 16:43:23 +03:00
"github.com/ethereum/go-ethereum/trie/triestate"
2024-01-23 16:51:58 +03:00
"github.com/holiman/uint256"
2016-09-25 21:49:02 +03:00
)
2023-08-26 11:13:22 +03:00
const (
defaultNumOfSlots = 100
// storageDeleteLimit denotes the highest permissible memory allocation
// employed for contract storage deletion.
storageDeleteLimit = 512 * 1024 * 1024
)
2022-07-05 06:14:21 +03:00
2016-10-04 13:36:02 +03:00
type revision struct {
id int
journalIndex int
}
2020-08-19 09:54:21 +03:00
// StateDB structs within the ethereum protocol are used to store anything
2014-12-04 12:40:20 +02:00
// within the merkle trie. StateDBs take care of caching and storing
2014-07-22 12:54:48 +03:00
// nested states. It's the general query interface to retrieve:
cmd, core/state, eth, tests, trie: improve state reader (#27428)
The state availability is checked during the creation of a state reader.
- In hash-based database, if the specified root node does not exist on disk disk, then
the state reader won't be created and an error will be returned.
- In path-based database, if the specified state layer is not available, then the
state reader won't be created and an error will be returned.
This change also contains a stricter semantics regarding the `Commit` operation: once it has been performed, the trie is no longer usable, and certain operations will return an error.
2023-06-20 22:31:45 +03:00
//
2014-07-22 12:54:48 +03:00
// * Contracts
// * Accounts
cmd, core/state, eth, tests, trie: improve state reader (#27428)
The state availability is checked during the creation of a state reader.
- In hash-based database, if the specified root node does not exist on disk disk, then
the state reader won't be created and an error will be returned.
- In path-based database, if the specified state layer is not available, then the
state reader won't be created and an error will be returned.
This change also contains a stricter semantics regarding the `Commit` operation: once it has been performed, the trie is no longer usable, and certain operations will return an error.
2023-06-20 22:31:45 +03:00
//
// Once the state is committed, tries cached in stateDB (including account
// trie, storage tries) will no longer be functional. A new state instance
// must be created with new root and updated database for accessing post-
// commit states.
2014-12-04 12:40:20 +02:00
type StateDB struct {
2022-07-05 06:14:21 +03:00
db Database
prefetcherLock sync . Mutex
prefetcher * triePrefetcher
trie Trie
noTrie bool
hasher crypto . KeccakState
2023-08-23 12:46:08 +03:00
snaps * snapshot . Tree // Nil if snapshot is not available
snap snapshot . Snapshot // Nil if snapshot is not available
2022-06-06 18:14:55 +03:00
// originalRoot is the pre-state root, before any changes were made.
// It will be updated when the Commit is called.
originalRoot common . Hash
2023-08-23 12:46:08 +03:00
expectedRoot common . Hash // The state root in the block header
stateRoot common . Hash // The calculation result of IntermediateRoot
2023-09-07 11:39:29 +03:00
fullProcessed bool
pipeCommit bool
2022-07-05 06:14:21 +03:00
2023-07-11 16:43:23 +03:00
// These maps hold the state changes (including the corresponding
// original value) that occurred in this **block**.
2023-08-23 12:46:08 +03:00
AccountMux sync . Mutex // Mutex for accounts access
StorageMux sync . Mutex // Mutex for storages access
2023-07-31 15:07:51 +03:00
accounts map [ common . Hash ] [ ] byte // The mutated accounts in 'slim RLP' encoding
storages map [ common . Hash ] map [ common . Hash ] [ ] byte // The mutated slots in prefix-zero trimmed rlp format
accountsOrigin map [ common . Address ] [ ] byte // The original value of mutated accounts in 'slim RLP' encoding
storagesOrigin map [ common . Address ] map [ common . Hash ] [ ] byte // The original value of mutated slots in prefix-zero trimmed rlp format
2019-08-06 13:40:28 +03:00
2023-07-11 16:43:23 +03:00
// This map holds 'live' objects, which will get modified while processing
// a state transition.
2022-12-28 16:53:43 +03:00
stateObjects map [ common . Address ] * stateObject
2023-07-11 16:43:23 +03:00
stateObjectsPending map [ common . Address ] struct { } // State objects finalized but not yet written to the trie
stateObjectsDirty map [ common . Address ] struct { } // State objects modified in the current execution
stateObjectsDestruct map [ common . Address ] * types . StateAccount // State objects destructed in the block along with its previous value
2016-09-22 22:04:58 +03:00
2022-07-05 06:14:21 +03:00
storagePool * StoragePool // sharedPool to store L1 originStorage of stateObjects
writeOnSharedStorage bool // Write to the shared origin storage of a stateObject while reading from the underlying storage layer.
2017-06-27 16:57:06 +03:00
// DB error.
// State objects are used by the consensus core and VM which are
// unable to deal with database-level errors. Any error that occurs
2023-03-16 10:12:34 +03:00
// during a database read is memoized here and will eventually be
// returned by StateDB.Commit. Notably, this error is also shared
// by all cached state objects in case the database failure occurs
// when accessing state of accounts.
2017-06-27 16:57:06 +03:00
dbErr error
2016-09-22 22:04:58 +03:00
// The refund counter, also used by state transitioning.
2017-11-13 14:47:27 +03:00
refund uint64
2014-10-30 14:32:50 +02:00
2023-07-11 16:43:23 +03:00
// The tx context and all occurred logs in the scope of transaction.
2021-06-30 16:17:01 +03:00
thash common . Hash
txIndex int
logs map [ common . Hash ] [ ] * types . Log
logSize uint
2016-09-27 13:13:13 +03:00
2023-07-11 16:43:23 +03:00
// Preimages occurred seen by VM in the scope of block.
2017-01-17 14:19:50 +03:00
preimages map [ common . Hash ] [ ] byte
2020-10-23 09:26:57 +03:00
// Per-transaction access list
accessList * accessList
2022-11-16 12:18:52 +03:00
// Transient storage
transientStorage transientStorage
2016-10-04 13:36:02 +03:00
// Journal of state modifications. This is the backbone of
// Snapshot and RevertToSnapshot.
2018-03-27 15:13:30 +03:00
journal * journal
2016-10-04 13:36:02 +03:00
validRevisions [ ] revision
nextRevisionId int
2019-03-25 11:01:18 +03:00
// Measurements gathered during execution for debugging purposes
2023-09-15 08:49:07 +03:00
// MetricsMux should be used in more places, but will affect on performance, so following meteration is not accruate
2022-07-05 06:14:21 +03:00
MetricsMux sync . Mutex
2019-08-06 13:40:28 +03:00
AccountReads time . Duration
AccountHashes time . Duration
AccountUpdates time . Duration
AccountCommits time . Duration
StorageReads time . Duration
StorageHashes time . Duration
StorageUpdates time . Duration
StorageCommits time . Duration
SnapshotAccountReads time . Duration
SnapshotStorageReads time . Duration
SnapshotCommits time . Duration
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 11:01:02 +03:00
TrieDBCommits time . Duration
2021-08-24 22:00:42 +03:00
AccountUpdated int
StorageUpdated int
AccountDeleted int
StorageDeleted int
2023-09-07 16:17:14 +03:00
// Testing hooks
onCommit func ( states * triestate . Set ) // Hook invoked when commit is performed
2014-07-22 12:54:48 +03:00
}
2022-07-05 06:14:21 +03:00
// NewWithSharedPool creates a new state with sharedStorge on layer 1.5
func NewWithSharedPool ( root common . Hash , db Database , snaps * snapshot . Tree ) ( * StateDB , error ) {
2023-09-07 11:39:29 +03:00
statedb , err := New ( root , db , snaps )
2015-07-06 02:19:48 +03:00
if err != nil {
2015-10-06 17:35:55 +03:00
return nil , err
2015-07-06 02:19:48 +03:00
}
2022-07-05 06:14:21 +03:00
statedb . storagePool = NewStoragePool ( )
return statedb , nil
}
2023-09-07 11:39:29 +03:00
// New creates a new state from a given trie.
func New ( root common . Hash , db Database , snaps * snapshot . Tree ) ( * StateDB , error ) {
2019-08-06 13:40:28 +03:00
sdb := & StateDB {
2022-12-28 16:53:43 +03:00
db : db ,
originalRoot : root ,
snaps : snaps ,
2023-07-11 16:43:23 +03:00
accounts : make ( map [ common . Hash ] [ ] byte ) ,
storages : make ( map [ common . Hash ] map [ common . Hash ] [ ] byte ) ,
2023-07-31 15:07:51 +03:00
accountsOrigin : make ( map [ common . Address ] [ ] byte ) ,
storagesOrigin : make ( map [ common . Address ] map [ common . Hash ] [ ] byte ) ,
2023-08-23 12:46:08 +03:00
stateObjects : make ( map [ common . Address ] * stateObject , defaultNumOfSlots ) ,
stateObjectsPending : make ( map [ common . Address ] struct { } , defaultNumOfSlots ) ,
stateObjectsDirty : make ( map [ common . Address ] struct { } , defaultNumOfSlots ) ,
stateObjectsDestruct : make ( map [ common . Address ] * types . StateAccount , defaultNumOfSlots ) ,
2022-12-28 16:53:43 +03:00
logs : make ( map [ common . Hash ] [ ] * types . Log ) ,
preimages : make ( map [ common . Hash ] [ ] byte ) ,
journal : newJournal ( ) ,
accessList : newAccessList ( ) ,
transientStorage : newTransientStorage ( ) ,
hasher : crypto . NewKeccakState ( ) ,
2019-08-06 13:40:28 +03:00
}
2022-07-05 06:14:21 +03:00
2019-08-06 13:40:28 +03:00
if sdb . snaps != nil {
2023-07-11 16:43:23 +03:00
sdb . snap = sdb . snaps . Snapshot ( root )
2019-08-06 13:40:28 +03:00
}
2022-07-05 06:14:21 +03:00
2023-09-15 08:49:07 +03:00
tr , err := db . OpenTrie ( root )
2022-07-05 06:14:21 +03:00
// return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification
2023-09-07 11:39:29 +03:00
if err != nil && ( sdb . snap == nil || sdb . snap . Verified ( ) ) {
2022-07-05 06:14:21 +03:00
return nil , err
}
_ , sdb . noTrie = tr . ( * trie . EmptyTrie )
2023-09-15 08:49:07 +03:00
sdb . trie = tr
2019-08-06 13:40:28 +03:00
return sdb , nil
2015-03-19 11:57:02 +02:00
}
2022-07-05 06:14:21 +03:00
func ( s * StateDB ) EnableWriteOnSharedStorage ( ) {
s . writeOnSharedStorage = true
}
2022-11-25 09:05:19 +03:00
// In mining mode, we will try multi-fillTransactions to get the most profitable one.
// StateDB will be created for each fillTransactions with same block height.
// Share a single triePrefetcher to avoid too much prefetch routines.
func ( s * StateDB ) TransferPrefetcher ( prev * StateDB ) {
if prev == nil {
return
}
var fetcher * triePrefetcher
prev . prefetcherLock . Lock ( )
fetcher = prev . prefetcher
prev . prefetcher = nil
prev . prefetcherLock . Unlock ( )
s . prefetcherLock . Lock ( )
s . prefetcher = fetcher
s . prefetcherLock . Unlock ( )
}
2021-01-08 16:01:49 +03:00
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot.
func ( s * StateDB ) StartPrefetcher ( namespace string ) {
2022-07-05 06:14:21 +03:00
if s . noTrie {
return
}
s . prefetcherLock . Lock ( )
defer s . prefetcherLock . Unlock ( )
2021-01-08 16:01:49 +03:00
if s . prefetcher != nil {
s . prefetcher . close ( )
s . prefetcher = nil
}
if s . snap != nil {
2022-07-29 09:21:37 +03:00
parent := s . snap . Parent ( )
if parent != nil {
s . prefetcher = newTriePrefetcher ( s . db , s . originalRoot , parent . Root ( ) , namespace )
} else {
s . prefetcher = newTriePrefetcher ( s . db , s . originalRoot , common . Hash { } , namespace )
}
2021-01-08 16:01:49 +03:00
}
}
// StopPrefetcher terminates a running prefetcher and reports any leftover stats
// from the gathered metrics.
func ( s * StateDB ) StopPrefetcher ( ) {
2022-07-05 06:14:21 +03:00
if s . noTrie {
return
}
s . prefetcherLock . Lock ( )
2021-01-08 16:01:49 +03:00
if s . prefetcher != nil {
s . prefetcher . close ( )
2023-04-03 17:11:09 +03:00
s . prefetcher = nil
2020-02-05 15:12:09 +03:00
}
2022-08-31 08:30:25 +03:00
s . prefetcherLock . Unlock ( )
2020-02-05 15:12:09 +03:00
}
[Feature]: Improve trie prefetch (#952)
* trie prefetcher for From/To address in advance
We found that trie prefetch could be not fast enough, especially trie prefetch of
the outer big state trie tree.
Instead of do trie prefetch until a transaction is finalized, we could do trie prefetch
in advance. Try to prefetch the trie node of the From/To accounts, since their root hash
are most likely to be changed.
* Parallel TriePrefetch for large trie update.
Currently, we create a subfetch for each account address to do trie prefetch. If the address
has very large state change, trie prefetch could be not fast enough, e.g. a contract modified
lots of KV pair or a large number of account's root hash is changed in a block.
With this commit, there will be children subfetcher created to do trie prefetch in parallell if
the parent subfetch's workload exceed the threshold.
* some improvemnts of parallel trie prefetch implementation
1.childrenLock is removed, since it is not necessary
APIs of triePrefetcher is not thread safe, they should be used sequentially.
A prefetch will be interrupted by trie() or clos(), so we only need mark it as
interrupted and check before call scheduleParallel to avoid the concurrent access to paraChildren
2.rename subfetcher.children to subfetcher.paraChildren
3.use subfetcher.pendingSize to replace totalSize & processedIndex
4.randomly select the start child to avoid always feed the first one
5.increase threshold and capacity to avoid create too many child routine
* fix review comments
** nil check refine
** create a separate routine for From/To prefetch, avoid blocking the cirtical path
* remove the interrupt member
* not create a signer for each transaction
* some changes to triePrefetcher
** remove the abortLoop, move the subfetcher abort operation into mainLoop
since we want to make subfetcher's create & schedule & abort within a loop to
avoid concurrent access locks.
** no wait subfetcher's term signal in abort()
it could speed up the close by closing subfetcher concurrently.
we send stop signnal to all subfetchers in burst and wait their term signal later.
* some coding improve for subfetcher.scheduleParallel
* fix a UT crash of s.prefetcher == nil
* update parallel trie prefetcher configuration
tested with different combination of parallelTriePrefetchThreshold & parallelTriePrefetchCapacity,
found the most efficient configure could be:
parallelTriePrefetchThreshold = 10
parallelTriePrefetchCapacity = 20
* fix review comments: code refine
2022-07-15 14:17:08 +03:00
func ( s * StateDB ) TriePrefetchInAdvance ( block * types . Block , signer types . Signer ) {
2022-07-26 17:23:55 +03:00
// s is a temporary throw away StateDB, s.prefetcher won't be resetted to nil
// so no need to add lock for s.prefetcher
prefetcher := s . prefetcher
[Feature]: Improve trie prefetch (#952)
* trie prefetcher for From/To address in advance
We found that trie prefetch could be not fast enough, especially trie prefetch of
the outer big state trie tree.
Instead of do trie prefetch until a transaction is finalized, we could do trie prefetch
in advance. Try to prefetch the trie node of the From/To accounts, since their root hash
are most likely to be changed.
* Parallel TriePrefetch for large trie update.
Currently, we create a subfetch for each account address to do trie prefetch. If the address
has very large state change, trie prefetch could be not fast enough, e.g. a contract modified
lots of KV pair or a large number of account's root hash is changed in a block.
With this commit, there will be children subfetcher created to do trie prefetch in parallell if
the parent subfetch's workload exceed the threshold.
* some improvemnts of parallel trie prefetch implementation
1.childrenLock is removed, since it is not necessary
APIs of triePrefetcher is not thread safe, they should be used sequentially.
A prefetch will be interrupted by trie() or clos(), so we only need mark it as
interrupted and check before call scheduleParallel to avoid the concurrent access to paraChildren
2.rename subfetcher.children to subfetcher.paraChildren
3.use subfetcher.pendingSize to replace totalSize & processedIndex
4.randomly select the start child to avoid always feed the first one
5.increase threshold and capacity to avoid create too many child routine
* fix review comments
** nil check refine
** create a separate routine for From/To prefetch, avoid blocking the cirtical path
* remove the interrupt member
* not create a signer for each transaction
* some changes to triePrefetcher
** remove the abortLoop, move the subfetcher abort operation into mainLoop
since we want to make subfetcher's create & schedule & abort within a loop to
avoid concurrent access locks.
** no wait subfetcher's term signal in abort()
it could speed up the close by closing subfetcher concurrently.
we send stop signnal to all subfetchers in burst and wait their term signal later.
* some coding improve for subfetcher.scheduleParallel
* fix a UT crash of s.prefetcher == nil
* update parallel trie prefetcher configuration
tested with different combination of parallelTriePrefetchThreshold & parallelTriePrefetchCapacity,
found the most efficient configure could be:
parallelTriePrefetchThreshold = 10
parallelTriePrefetchCapacity = 20
* fix review comments: code refine
2022-07-15 14:17:08 +03:00
if prefetcher == nil {
return
}
accounts := make ( map [ common . Address ] struct { } , block . Transactions ( ) . Len ( ) << 1 )
for _ , tx := range block . Transactions ( ) {
from , err := types . Sender ( signer , tx )
if err != nil {
// invalid block, skip prefetch
return
}
accounts [ from ] = struct { } { }
if tx . To ( ) != nil {
accounts [ * tx . To ( ) ] = struct { } { }
}
}
addressesToPrefetch := make ( [ ] [ ] byte , 0 , len ( accounts ) )
for addr := range accounts {
addressesToPrefetch = append ( addressesToPrefetch , common . CopyBytes ( addr [ : ] ) ) // Copy needed for closure
}
if len ( addressesToPrefetch ) > 0 {
2023-08-23 12:46:08 +03:00
prefetcher . prefetch ( common . Hash { } , s . originalRoot , common . Address { } , addressesToPrefetch )
[Feature]: Improve trie prefetch (#952)
* trie prefetcher for From/To address in advance
We found that trie prefetch could be not fast enough, especially trie prefetch of
the outer big state trie tree.
Instead of do trie prefetch until a transaction is finalized, we could do trie prefetch
in advance. Try to prefetch the trie node of the From/To accounts, since their root hash
are most likely to be changed.
* Parallel TriePrefetch for large trie update.
Currently, we create a subfetch for each account address to do trie prefetch. If the address
has very large state change, trie prefetch could be not fast enough, e.g. a contract modified
lots of KV pair or a large number of account's root hash is changed in a block.
With this commit, there will be children subfetcher created to do trie prefetch in parallell if
the parent subfetch's workload exceed the threshold.
* some improvemnts of parallel trie prefetch implementation
1.childrenLock is removed, since it is not necessary
APIs of triePrefetcher is not thread safe, they should be used sequentially.
A prefetch will be interrupted by trie() or clos(), so we only need mark it as
interrupted and check before call scheduleParallel to avoid the concurrent access to paraChildren
2.rename subfetcher.children to subfetcher.paraChildren
3.use subfetcher.pendingSize to replace totalSize & processedIndex
4.randomly select the start child to avoid always feed the first one
5.increase threshold and capacity to avoid create too many child routine
* fix review comments
** nil check refine
** create a separate routine for From/To prefetch, avoid blocking the cirtical path
* remove the interrupt member
* not create a signer for each transaction
* some changes to triePrefetcher
** remove the abortLoop, move the subfetcher abort operation into mainLoop
since we want to make subfetcher's create & schedule & abort within a loop to
avoid concurrent access locks.
** no wait subfetcher's term signal in abort()
it could speed up the close by closing subfetcher concurrently.
we send stop signnal to all subfetchers in burst and wait their term signal later.
* some coding improve for subfetcher.scheduleParallel
* fix a UT crash of s.prefetcher == nil
* update parallel trie prefetcher configuration
tested with different combination of parallelTriePrefetchThreshold & parallelTriePrefetchCapacity,
found the most efficient configure could be:
parallelTriePrefetchThreshold = 10
parallelTriePrefetchCapacity = 20
* fix review comments: code refine
2022-07-15 14:17:08 +03:00
}
}
2022-07-05 06:14:21 +03:00
// Mark that the block is processed by diff layer
func ( s * StateDB ) SetExpectedStateRoot ( root common . Hash ) {
s . expectedRoot = root
}
// Enable the pipeline commit function of statedb
func ( s * StateDB ) EnablePipeCommit ( ) {
if s . snap != nil && s . snaps . Layers ( ) > 1 {
2023-09-07 11:39:29 +03:00
// after big merge, disable pipeCommit for now,
// because `s.db.TrieDB().Update` should be called after `s.trie.Commit(true)`
s . pipeCommit = false
2022-07-05 06:14:21 +03:00
}
}
// IsPipeCommit checks whether pipecommit is enabled on the statedb or not
func ( s * StateDB ) IsPipeCommit ( ) bool {
return s . pipeCommit
}
// Mark that the block is full processed
func ( s * StateDB ) MarkFullProcessed ( ) {
s . fullProcessed = true
}
2017-06-27 16:57:06 +03:00
// setError remembers the first non-nil error it is called with.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) setError ( err error ) {
if s . dbErr == nil {
s . dbErr = err
2016-09-22 22:04:58 +03:00
}
2017-06-27 16:57:06 +03:00
}
2022-07-05 06:14:21 +03:00
func ( s * StateDB ) NoTrie ( ) bool {
return s . noTrie
}
2023-03-16 10:12:34 +03:00
// Error returns the memorized database failure occurred earlier.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) Error ( ) error {
return s . dbErr
2016-09-27 13:13:13 +03:00
}
2022-07-05 06:14:21 +03:00
// Not thread safe
func ( s * StateDB ) Trie ( ) ( Trie , error ) {
if s . trie == nil {
err := s . WaitPipeVerification ( )
if err != nil {
return nil , err
}
tr , err := s . db . OpenTrie ( s . originalRoot )
if err != nil {
return nil , err
}
s . trie = tr
}
return s . trie , nil
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) AddLog ( log * types . Log ) {
s . journal . append ( addLogChange { txhash : s . thash } )
2016-10-04 13:36:02 +03:00
2019-11-22 17:56:05 +03:00
log . TxHash = s . thash
log . TxIndex = uint ( s . txIndex )
log . Index = s . logSize
s . logs [ s . thash ] = append ( s . logs [ s . thash ] , log )
s . logSize ++
2015-04-08 18:14:58 +03:00
}
2022-12-13 15:54:16 +03:00
// GetLogs returns the logs matching the specified transaction hash, and annotates
// them with the given blockNumber and blockHash.
func ( s * StateDB ) GetLogs ( hash common . Hash , blockNumber uint64 , blockHash common . Hash ) [ ] * types . Log {
2021-06-30 16:17:01 +03:00
logs := s . logs [ hash ]
for _ , l := range logs {
2022-12-13 15:54:16 +03:00
l . BlockNumber = blockNumber
2021-06-30 16:17:01 +03:00
l . BlockHash = blockHash
}
return logs
2014-10-30 14:32:50 +02:00
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) Logs ( ) [ ] * types . Log {
2017-01-05 16:03:50 +03:00
var logs [ ] * types . Log
2019-11-22 17:56:05 +03:00
for _ , lgs := range s . logs {
2015-04-08 18:14:58 +03:00
logs = append ( logs , lgs ... )
}
return logs
}
2017-01-17 14:19:50 +03:00
// AddPreimage records a SHA3 preimage seen by the VM.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) AddPreimage ( hash common . Hash , preimage [ ] byte ) {
if _ , ok := s . preimages [ hash ] ; ! ok {
s . journal . append ( addPreimageChange { hash : hash } )
2017-01-17 14:19:50 +03:00
pi := make ( [ ] byte , len ( preimage ) )
copy ( pi , preimage )
2019-11-22 17:56:05 +03:00
s . preimages [ hash ] = pi
2017-01-17 14:19:50 +03:00
}
}
// Preimages returns a list of SHA3 preimages that have been submitted.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) Preimages ( ) map [ common . Hash ] [ ] byte {
return s . preimages
2017-01-17 14:19:50 +03:00
}
2018-08-12 00:03:54 +03:00
// AddRefund adds gas to the refund counter
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) AddRefund ( gas uint64 ) {
s . journal . append ( refundChange { prev : s . refund } )
s . refund += gas
2015-02-12 00:46:45 +02:00
}
2018-08-12 00:03:54 +03:00
// SubRefund removes gas from the refund counter.
// This method will panic if the refund counter goes below zero
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) SubRefund ( gas uint64 ) {
s . journal . append ( refundChange { prev : s . refund } )
if gas > s . refund {
2020-01-10 12:12:32 +03:00
panic ( fmt . Sprintf ( "Refund counter below zero (gas: %d > refund: %d)" , gas , s . refund ) )
2018-08-12 00:03:54 +03:00
}
2019-11-22 17:56:05 +03:00
s . refund -= gas
2018-08-12 00:03:54 +03:00
}
2016-10-04 13:36:02 +03:00
// Exist reports whether the given account address exists in the state.
2023-07-15 17:35:30 +03:00
// Notably this also returns true for self-destructed accounts.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) Exist ( addr common . Address ) bool {
return s . getStateObject ( addr ) != nil
2015-08-30 11:19:10 +03:00
}
2017-01-06 20:44:35 +03:00
// Empty returns whether the state object is either non-existent
2016-10-20 14:36:29 +03:00
// or empty according to the EIP161 specification (balance = nonce = code = 0)
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) Empty ( addr common . Address ) bool {
so := s . getStateObject ( addr )
2016-10-20 14:36:29 +03:00
return so == nil || so . empty ( )
}
2020-08-19 09:54:21 +03:00
// GetBalance retrieves the balance from the given address or 0 if object not found
2024-01-23 16:51:58 +03:00
func ( s * StateDB ) GetBalance ( addr common . Address ) * uint256 . Int {
2019-11-22 17:56:05 +03:00
stateObject := s . getStateObject ( addr )
2014-07-22 12:54:48 +03:00
if stateObject != nil {
2016-09-22 22:04:58 +03:00
return stateObject . Balance ( )
2014-07-22 12:54:48 +03:00
}
2024-01-23 16:51:58 +03:00
return common . U2560
2014-07-22 12:54:48 +03:00
}
2023-08-31 21:33:18 +03:00
// GetNonce retrieves the nonce from the given address or 0 if object not found
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) GetNonce ( addr common . Address ) uint64 {
stateObject := s . getStateObject ( addr )
2014-07-22 12:54:48 +03:00
if stateObject != nil {
2016-09-22 22:04:58 +03:00
return stateObject . Nonce ( )
2014-07-22 12:54:48 +03:00
}
2016-11-20 14:18:39 +03:00
return 0
2014-07-22 12:54:48 +03:00
}
2023-08-31 21:33:18 +03:00
// GetStorageRoot retrieves the storage root from the given address or empty
// if object not found.
func ( s * StateDB ) GetStorageRoot ( addr common . Address ) common . Hash {
stateObject := s . getStateObject ( addr )
if stateObject != nil {
return stateObject . Root ( )
}
return common . Hash { }
}
2019-03-27 15:39:25 +03:00
// TxIndex returns the current transaction index set by Prepare.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) TxIndex ( ) int {
return s . txIndex
2019-03-27 15:39:25 +03:00
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) GetCode ( addr common . Address ) [ ] byte {
stateObject := s . getStateObject ( addr )
2014-10-16 14:38:21 +03:00
if stateObject != nil {
2023-07-24 13:22:09 +03:00
return stateObject . Code ( )
2014-10-16 14:38:21 +03:00
}
2015-02-19 23:33:22 +02:00
return nil
2014-10-16 14:38:21 +03:00
}
2023-03-31 09:34:47 +03:00
func ( s * StateDB ) GetRoot ( addr common . Address ) common . Hash {
stateObject := s . getStateObject ( addr )
if stateObject != nil {
return stateObject . data . Root
}
return common . Hash { }
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) GetCodeSize ( addr common . Address ) int {
stateObject := s . getStateObject ( addr )
2020-05-11 10:14:49 +03:00
if stateObject != nil {
2023-07-24 13:22:09 +03:00
return stateObject . CodeSize ( )
2016-09-25 21:49:02 +03:00
}
2020-05-11 10:14:49 +03:00
return 0
2016-09-22 22:04:58 +03:00
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) GetCodeHash ( addr common . Address ) common . Hash {
stateObject := s . getStateObject ( addr )
2023-12-26 11:38:11 +03:00
if stateObject != nil {
return common . BytesToHash ( stateObject . CodeHash ( ) )
2016-10-01 15:44:53 +03:00
}
2023-12-26 11:38:11 +03:00
return common . Hash { }
2016-10-01 15:44:53 +03:00
}
2018-09-18 16:24:35 +03:00
// GetState retrieves a value from the given account's storage trie.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) GetState ( addr common . Address , hash common . Hash ) common . Hash {
stateObject := s . getStateObject ( addr )
2014-09-08 01:50:04 +03:00
if stateObject != nil {
2023-07-24 13:22:09 +03:00
return stateObject . GetState ( hash )
2014-09-08 01:50:04 +03:00
}
2015-06-17 12:24:40 +03:00
return common . Hash { }
2014-09-08 01:50:04 +03:00
}
2018-09-18 16:24:35 +03:00
// GetCommittedState retrieves a value from the given account's committed storage trie.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) GetCommittedState ( addr common . Address , hash common . Hash ) common . Hash {
stateObject := s . getStateObject ( addr )
2018-08-12 15:47:03 +03:00
if stateObject != nil {
2023-07-24 13:22:09 +03:00
return stateObject . GetCommittedState ( hash )
2018-08-12 15:47:03 +03:00
}
return common . Hash { }
}
2018-02-05 19:40:32 +03:00
// Database retrieves the low level database supporting the lower level trie ops.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) Database ( ) Database {
return s . db
2018-02-05 19:40:32 +03:00
}
2023-07-15 17:35:30 +03:00
func ( s * StateDB ) HasSelfDestructed ( addr common . Address ) bool {
2019-11-22 17:56:05 +03:00
stateObject := s . getStateObject ( addr )
2015-04-01 11:53:32 +03:00
if stateObject != nil {
2023-07-15 17:35:30 +03:00
return stateObject . selfDestructed
2015-04-01 11:53:32 +03:00
}
return false
}
/ *
* SETTERS
* /
2018-03-28 09:26:37 +03:00
// AddBalance adds amount to the account associated with addr.
2024-01-23 16:51:58 +03:00
func ( s * StateDB ) AddBalance ( addr common . Address , amount * uint256 . Int ) {
2024-01-14 14:32:23 +03:00
stateObject := s . getOrNewStateObject ( addr )
2015-04-01 11:53:32 +03:00
if stateObject != nil {
stateObject . AddBalance ( amount )
}
}
2018-03-28 09:26:37 +03:00
// SubBalance subtracts amount from the account associated with addr.
2024-01-23 16:51:58 +03:00
func ( s * StateDB ) SubBalance ( addr common . Address , amount * uint256 . Int ) {
2024-01-14 14:32:23 +03:00
stateObject := s . getOrNewStateObject ( addr )
2016-12-06 04:16:03 +03:00
if stateObject != nil {
stateObject . SubBalance ( amount )
}
}
2024-01-23 16:51:58 +03:00
func ( s * StateDB ) SetBalance ( addr common . Address , amount * uint256 . Int ) {
2024-01-14 14:32:23 +03:00
stateObject := s . getOrNewStateObject ( addr )
2016-10-04 13:36:02 +03:00
if stateObject != nil {
stateObject . SetBalance ( amount )
}
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) SetNonce ( addr common . Address , nonce uint64 ) {
2024-01-14 14:32:23 +03:00
stateObject := s . getOrNewStateObject ( addr )
2014-12-20 03:21:13 +02:00
if stateObject != nil {
2015-02-20 15:19:34 +02:00
stateObject . SetNonce ( nonce )
2014-12-20 03:21:13 +02:00
}
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) SetCode ( addr common . Address , code [ ] byte ) {
2024-01-14 14:32:23 +03:00
stateObject := s . getOrNewStateObject ( addr )
2014-10-15 18:12:26 +03:00
if stateObject != nil {
2016-10-01 15:44:53 +03:00
stateObject . SetCode ( crypto . Keccak256Hash ( code ) , code )
2014-10-15 18:12:26 +03:00
}
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) SetState ( addr common . Address , key , value common . Hash ) {
2024-01-14 14:32:23 +03:00
stateObject := s . getOrNewStateObject ( addr )
2014-10-16 14:38:21 +03:00
if stateObject != nil {
2023-07-24 13:22:09 +03:00
stateObject . SetState ( key , value )
2014-10-16 14:38:21 +03:00
}
}
2019-08-08 16:44:11 +03:00
// SetStorage replaces the entire storage for the specified account with given
2023-07-11 16:43:23 +03:00
// storage. This function should only be used for debugging and the mutations
// must be discarded afterwards.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) SetStorage ( addr common . Address , storage map [ common . Hash ] common . Hash ) {
2023-01-10 16:24:30 +03:00
// SetStorage needs to wipe existing storage. We achieve this by pretending
// that the account self-destructed earlier in this block, by flagging
// it in stateObjectsDestruct. The effect of doing so is that storage lookups
// will not hit disk, since it is assumed that the disk-data is belonging
// to a previous incarnation of the object.
2023-07-11 16:43:23 +03:00
//
// TODO(rjl493456442) this function should only be supported by 'unwritable'
// state and all mutations made should all be discarded afterwards.
if _ , ok := s . stateObjectsDestruct [ addr ] ; ! ok {
s . stateObjectsDestruct [ addr ] = nil
}
2024-01-14 14:32:23 +03:00
stateObject := s . getOrNewStateObject ( addr )
2023-01-10 16:24:30 +03:00
for k , v := range storage {
2023-07-24 13:22:09 +03:00
stateObject . SetState ( k , v )
2019-08-08 16:44:11 +03:00
}
}
2023-07-15 17:35:30 +03:00
// SelfDestruct marks the given account as selfdestructed.
2016-10-04 13:36:02 +03:00
// This clears the account balance.
//
// The account's state object is still available until the state is committed,
2023-07-15 17:35:30 +03:00
// getStateObject will return a non-nil account after SelfDestruct.
func ( s * StateDB ) SelfDestruct ( addr common . Address ) {
2019-11-22 17:56:05 +03:00
stateObject := s . getStateObject ( addr )
2016-10-04 13:36:02 +03:00
if stateObject == nil {
2023-07-15 17:35:30 +03:00
return
2014-10-16 14:38:21 +03:00
}
2023-07-15 17:35:30 +03:00
s . journal . append ( selfDestructChange {
2016-10-04 13:36:02 +03:00
account : & addr ,
2023-07-15 17:35:30 +03:00
prev : stateObject . selfDestructed ,
2024-01-23 16:51:58 +03:00
prevbalance : new ( uint256 . Int ) . Set ( stateObject . Balance ( ) ) ,
2016-10-04 13:36:02 +03:00
} )
2023-07-15 17:35:30 +03:00
stateObject . markSelfdestructed ( )
2024-01-23 16:51:58 +03:00
stateObject . data . Balance = new ( uint256 . Int )
2014-10-16 14:38:21 +03:00
}
2017-05-12 22:47:09 +03:00
2023-07-17 20:02:18 +03:00
func ( s * StateDB ) Selfdestruct6780 ( addr common . Address ) {
stateObject := s . getStateObject ( addr )
if stateObject == nil {
return
}
if stateObject . created {
s . SelfDestruct ( addr )
}
}
2022-11-16 12:18:52 +03:00
// SetTransientState sets transient storage for a given account. It
// adds the change to the journal so that it can be rolled back
// to its previous value if there is a revert.
func ( s * StateDB ) SetTransientState ( addr common . Address , key , value common . Hash ) {
prev := s . GetTransientState ( addr , key )
if prev == value {
return
}
s . journal . append ( transientStorageChange {
account : & addr ,
key : key ,
prevalue : prev ,
} )
s . setTransientState ( addr , key , value )
}
// setTransientState is a lower level setter for transient storage. It
// is called during a revert to prevent modifications to the journal.
func ( s * StateDB ) setTransientState ( addr common . Address , key , value common . Hash ) {
s . transientStorage . Set ( addr , key , value )
}
// GetTransientState gets transient storage for a given account.
func ( s * StateDB ) GetTransientState ( addr common . Address , key common . Hash ) common . Hash {
return s . transientStorage . Get ( addr , key )
2014-10-16 14:38:21 +03:00
}
2014-07-22 12:54:48 +03:00
//
2018-03-28 09:26:37 +03:00
// Setting, updating & deleting state object methods.
2014-07-22 12:54:48 +03:00
//
2016-10-04 13:36:02 +03:00
// updateStateObject writes the given object to the trie.
2019-08-12 22:56:07 +03:00
func ( s * StateDB ) updateStateObject ( obj * stateObject ) {
2022-07-05 06:14:21 +03:00
if s . noTrie {
return
}
2019-03-25 11:01:18 +03:00
// Track the amount of time wasted on updating the account from the trie
if metrics . EnabledExpensive {
defer func ( start time . Time ) { s . AccountUpdates += time . Since ( start ) } ( time . Now ( ) )
}
// Encode the account and update the account trie
2019-08-12 22:56:07 +03:00
addr := obj . Address ( )
2023-03-27 11:48:46 +03:00
if err := s . trie . UpdateAccount ( addr , & obj . data ) ; err != nil {
2020-05-08 21:52:20 +03:00
s . setError ( fmt . Errorf ( "updateStateObject (%x) error: %v" , addr [ : ] , err ) )
}
2023-08-09 19:02:45 +03:00
if obj . dirtyCode {
s . trie . UpdateContractCode ( obj . Address ( ) , common . BytesToHash ( obj . CodeHash ( ) ) , obj . code )
}
2023-07-11 16:43:23 +03:00
// Track the original value of mutated account, nil means it was not present.
// Skip if it has been tracked (because updateStateObject may be called
// multiple times in a block).
2023-07-31 15:07:51 +03:00
if _ , ok := s . accountsOrigin [ obj . address ] ; ! ok {
2023-07-11 16:43:23 +03:00
if obj . origin == nil {
2023-07-31 15:07:51 +03:00
s . accountsOrigin [ obj . address ] = nil
2023-07-11 16:43:23 +03:00
} else {
2023-07-31 15:07:51 +03:00
s . accountsOrigin [ obj . address ] = types . SlimAccountRLP ( * obj . origin )
2023-07-11 16:43:23 +03:00
}
2020-03-03 17:55:06 +03:00
}
2014-07-22 12:54:48 +03:00
}
2016-10-04 13:36:02 +03:00
// deleteStateObject removes the given object from the state trie.
2019-08-12 22:56:07 +03:00
func ( s * StateDB ) deleteStateObject ( obj * stateObject ) {
2022-07-05 06:14:21 +03:00
if s . noTrie {
return
}
2019-03-25 11:01:18 +03:00
// Track the amount of time wasted on deleting the account from the trie
if metrics . EnabledExpensive {
defer func ( start time . Time ) { s . AccountUpdates += time . Since ( start ) } ( time . Now ( ) )
}
// Delete the account from the trie
2019-08-12 22:56:07 +03:00
addr := obj . Address ( )
2023-03-27 11:48:46 +03:00
if err := s . trie . DeleteAccount ( addr ) ; err != nil {
2020-05-08 21:52:20 +03:00
s . setError ( fmt . Errorf ( "deleteStateObject (%x) error: %v" , addr [ : ] , err ) )
}
2014-07-22 12:54:48 +03:00
}
2019-08-12 22:56:07 +03:00
// getStateObject retrieves a state object given by the address, returning nil if
// the object is not found or was deleted in this execution context. If you need
// to differentiate between non-existent/just-deleted, use getDeletedStateObject.
func ( s * StateDB ) getStateObject ( addr common . Address ) * stateObject {
if obj := s . getDeletedStateObject ( addr ) ; obj != nil && ! obj . deleted {
return obj
}
return nil
}
// getDeletedStateObject is similar to getStateObject, but instead of returning
// nil for a deleted state object, it returns the actual object with the deleted
2019-11-22 17:56:05 +03:00
// flag set. This is needed by the state journal to revert to the correct s-
2019-08-12 22:56:07 +03:00
// destructed object instead of wiping all knowledge about the state object.
func ( s * StateDB ) getDeletedStateObject ( addr common . Address ) * stateObject {
// Prefer live objects if any is available
2019-03-25 11:01:18 +03:00
if obj := s . stateObjects [ addr ] ; obj != nil {
2016-09-22 22:04:58 +03:00
return obj
}
2019-08-06 13:40:28 +03:00
// If no live objects are available, attempt to use snapshots
2022-02-14 11:22:57 +03:00
var data * types . StateAccount
2019-08-06 13:40:28 +03:00
if s . snap != nil {
2022-02-14 11:22:57 +03:00
start := time . Now ( )
acc , err := s . snap . Account ( crypto . HashData ( s . hasher , addr . Bytes ( ) ) )
2019-08-06 13:40:28 +03:00
if metrics . EnabledExpensive {
2022-02-14 11:22:57 +03:00
s . SnapshotAccountReads += time . Since ( start )
2019-08-06 13:40:28 +03:00
}
2022-02-14 11:22:57 +03:00
if err == nil {
2019-10-04 16:24:01 +03:00
if acc == nil {
return nil
}
2021-09-28 11:48:07 +03:00
data = & types . StateAccount {
2020-07-16 15:06:19 +03:00
Nonce : acc . Nonce ,
Balance : acc . Balance ,
CodeHash : acc . CodeHash ,
Root : common . BytesToHash ( acc . Root ) ,
}
2019-10-04 16:24:01 +03:00
if len ( data . CodeHash ) == 0 {
2023-02-21 14:12:27 +03:00
data . CodeHash = types . EmptyCodeHash . Bytes ( )
2019-10-04 16:24:01 +03:00
}
if data . Root == ( common . Hash { } ) {
2023-02-21 14:12:27 +03:00
data . Root = types . EmptyRootHash
2019-10-04 16:24:01 +03:00
}
2019-08-06 13:40:28 +03:00
}
2019-10-04 16:24:01 +03:00
}
2022-07-05 06:14:21 +03:00
2019-10-04 16:24:01 +03:00
// If snapshot unavailable or reading from it failed, load from the database
2022-02-14 11:22:57 +03:00
if data == nil {
2022-07-05 06:14:21 +03:00
if s . trie == nil {
tr , err := s . db . OpenTrie ( s . originalRoot )
if err != nil {
2024-03-11 09:52:33 +03:00
s . setError ( errors . New ( "failed to open trie tree" ) )
2022-07-05 06:14:21 +03:00
return nil
}
s . trie = tr
}
2022-02-14 11:22:57 +03:00
start := time . Now ( )
2022-08-04 17:13:18 +03:00
var err error
2023-03-27 11:48:46 +03:00
data , err = s . trie . GetAccount ( addr )
2019-08-06 13:40:28 +03:00
if metrics . EnabledExpensive {
2022-02-14 11:22:57 +03:00
s . AccountReads += time . Since ( start )
2019-08-06 13:40:28 +03:00
}
2020-05-08 21:52:20 +03:00
if err != nil {
2022-08-04 17:13:18 +03:00
s . setError ( fmt . Errorf ( "getDeleteStateObject (%x) error: %w" , addr . Bytes ( ) , err ) )
2019-08-06 13:40:28 +03:00
return nil
}
2022-08-04 17:13:18 +03:00
if data == nil {
2019-08-06 13:40:28 +03:00
return nil
}
2015-12-11 02:29:41 +02:00
}
2019-03-25 11:01:18 +03:00
// Insert into the live set
2023-07-11 16:43:23 +03:00
obj := newObject ( s , addr , data )
2019-03-25 11:01:18 +03:00
s . setStateObject ( obj )
2016-09-22 22:04:58 +03:00
return obj
2014-07-22 12:54:48 +03:00
}
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) setStateObject ( object * stateObject ) {
s . stateObjects [ object . Address ( ) ] = object
2014-10-15 01:40:41 +03:00
}
2024-01-14 14:32:23 +03:00
// getOrNewStateObject retrieves a state object or create a new state object if nil.
func ( s * StateDB ) getOrNewStateObject ( addr common . Address ) * stateObject {
2019-11-22 17:56:05 +03:00
stateObject := s . getStateObject ( addr )
2019-08-12 22:56:07 +03:00
if stateObject == nil {
2019-11-22 17:56:05 +03:00
stateObject , _ = s . createObject ( addr )
2014-07-22 12:54:48 +03:00
}
return stateObject
}
2016-10-04 13:36:02 +03:00
// createObject creates a new state object. If there is an existing account with
// the given address, it is overwritten and returned as the second return value.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) createObject ( addr common . Address ) ( newobj , prev * stateObject ) {
prev = s . getDeletedStateObject ( addr ) // Note, prev might have been deleted, we need that!
2023-07-11 16:43:23 +03:00
newobj = newObject ( s , addr , nil )
2016-10-04 13:36:02 +03:00
if prev == nil {
2019-11-22 17:56:05 +03:00
s . journal . append ( createObjectChange { account : & addr } )
2016-10-04 13:36:02 +03:00
} else {
2023-06-05 16:25:57 +03:00
// The original account should be marked as destructed and all cached
// account and storage data should be cleared as well. Note, it must
2023-07-11 16:43:23 +03:00
// be done here, otherwise the destruction event of "original account"
// will be lost.
2023-06-01 12:09:32 +03:00
_ , prevdestruct := s . stateObjectsDestruct [ prev . address ]
if ! prevdestruct {
2023-07-11 16:43:23 +03:00
s . stateObjectsDestruct [ prev . address ] = prev . origin
2023-06-01 12:09:32 +03:00
}
2023-06-05 16:25:57 +03:00
// There may be some cached account/storage data already since IntermediateRoot
// will be called for each transaction before byzantium fork which will always
// cache the latest account/storage data.
2023-07-31 15:07:51 +03:00
prevAccount , ok := s . accountsOrigin [ prev . address ]
2023-06-05 16:25:57 +03:00
s . journal . append ( resetObjectChange {
2023-07-11 16:43:23 +03:00
account : & addr ,
prev : prev ,
prevdestruct : prevdestruct ,
prevAccount : s . accounts [ prev . addrHash ] ,
prevStorage : s . storages [ prev . addrHash ] ,
prevAccountOriginExist : ok ,
prevAccountOrigin : prevAccount ,
2023-07-31 15:07:51 +03:00
prevStorageOrigin : s . storagesOrigin [ prev . address ] ,
2023-06-05 16:25:57 +03:00
} )
2023-07-11 16:43:23 +03:00
delete ( s . accounts , prev . addrHash )
delete ( s . storages , prev . addrHash )
2023-07-31 15:07:51 +03:00
delete ( s . accountsOrigin , prev . address )
delete ( s . storagesOrigin , prev . address )
2015-04-01 11:53:32 +03:00
}
2019-11-22 17:56:05 +03:00
s . setStateObject ( newobj )
2020-08-21 15:10:40 +03:00
if prev != nil && ! prev . deleted {
return newobj , prev
}
return newobj , nil
2015-08-30 11:19:10 +03:00
}
2016-10-04 13:36:02 +03:00
// CreateAccount explicitly creates a state object. If a state object with the address
// already exists the balance is carried over to the new account.
//
// CreateAccount is called during the EVM CREATE operation. The situation might arise that
// a contract does the following:
2014-07-22 12:54:48 +03:00
//
2023-02-02 14:36:37 +03:00
// 1. sends funds to sha(account ++ (nonce + 1))
// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
2014-07-22 12:54:48 +03:00
//
2016-10-04 13:36:02 +03:00
// Carrying over the balance ensures that Ether doesn't disappear.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) CreateAccount ( addr common . Address ) {
newObj , prev := s . createObject ( addr )
2016-10-04 13:36:02 +03:00
if prev != nil {
2018-12-14 16:55:03 +03:00
newObj . setBalance ( prev . data . Balance )
2016-10-04 13:36:02 +03:00
}
2017-02-23 01:29:59 +03:00
}
2016-10-04 13:36:02 +03:00
// Copy creates a deep, independent copy of the state.
// Snapshots of the copied state cannot be applied to the copy.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) Copy ( ) * StateDB {
2022-07-26 17:23:55 +03:00
return s . copyInternal ( false )
}
// It is mainly for state prefetcher to do trie prefetch right now.
func ( s * StateDB ) CopyDoPrefetch ( ) * StateDB {
return s . copyInternal ( true )
}
// If doPrefetch is true, it tries to reuse the prefetcher, the copied StateDB will do active trie prefetch.
// otherwise, just do inactive copy trie prefetcher.
func ( s * StateDB ) copyInternal ( doPrefetch bool ) * StateDB {
2016-09-22 22:04:58 +03:00
// Copy all the basic fields, initialize the memory ones
state := & StateDB {
2023-09-15 08:49:07 +03:00
db : s . db ,
trie : s . db . CopyTrie ( s . trie ) ,
// noTrie:s.noTrie,
// expectedRoot: s.expectedRoot,
// stateRoot: s.stateRoot,
originalRoot : s . originalRoot ,
// fullProcessed: s.fullProcessed,
// pipeCommit: s.pipeCommit,
2023-07-11 16:43:23 +03:00
accounts : make ( map [ common . Hash ] [ ] byte ) ,
storages : make ( map [ common . Hash ] map [ common . Hash ] [ ] byte ) ,
2023-07-31 15:07:51 +03:00
accountsOrigin : make ( map [ common . Address ] [ ] byte ) ,
storagesOrigin : make ( map [ common . Address ] map [ common . Hash ] [ ] byte ) ,
2022-12-28 16:53:43 +03:00
stateObjects : make ( map [ common . Address ] * stateObject , len ( s . journal . dirties ) ) ,
stateObjectsPending : make ( map [ common . Address ] struct { } , len ( s . stateObjectsPending ) ) ,
stateObjectsDirty : make ( map [ common . Address ] struct { } , len ( s . journal . dirties ) ) ,
2023-07-11 16:43:23 +03:00
stateObjectsDestruct : make ( map [ common . Address ] * types . StateAccount , len ( s . stateObjectsDestruct ) ) ,
2023-08-23 12:46:08 +03:00
storagePool : s . storagePool ,
2023-09-15 08:49:07 +03:00
// writeOnSharedStorage: s.writeOnSharedStorage,
refund : s . refund ,
logs : make ( map [ common . Hash ] [ ] * types . Log , len ( s . logs ) ) ,
logSize : s . logSize ,
preimages : make ( map [ common . Hash ] [ ] byte , len ( s . preimages ) ) ,
journal : newJournal ( ) ,
hasher : crypto . NewKeccakState ( ) ,
2023-07-11 16:43:23 +03:00
// In order for the block producer to be able to use and make additions
// to the snapshot tree, we need to copy that as well. Otherwise, any
// block mined by ourselves will cause gaps in the tree, and force the
// miner to operate trie-backed only.
snaps : s . snaps ,
snap : s . snap ,
2016-09-22 22:04:58 +03:00
}
2017-01-17 14:19:50 +03:00
// Copy the dirty states, logs, and preimages
2019-11-22 17:56:05 +03:00
for addr := range s . journal . dirties {
2018-04-11 16:03:49 +03:00
// As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527),
// and in the Finalise-method, there is a case where an object is in the journal but not
// in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for
// nil
2019-11-22 17:56:05 +03:00
if object , exist := s . stateObjects [ addr ] ; exist {
2019-09-24 10:49:59 +03:00
// Even though the original object is dirty, we are not copying the journal,
2022-10-11 10:37:00 +03:00
// so we need to make sure that any side-effect the journal would have caused
2019-09-24 10:49:59 +03:00
// during a commit (or similar op) is already applied to the copy.
2018-04-11 16:03:49 +03:00
state . stateObjects [ addr ] = object . deepCopy ( state )
2019-09-24 10:49:59 +03:00
state . stateObjectsDirty [ addr ] = struct { } { } // Mark the copy dirty to force internal (code/state) commits
state . stateObjectsPending [ addr ] = struct { } { } // Mark the copy pending to force external (account) commits
2018-04-11 16:03:49 +03:00
}
2015-02-12 00:46:45 +02:00
}
2022-12-28 16:53:43 +03:00
// Above, we don't copy the actual journal. This means that if the copy
// is copied, the loop above will be a no-op, since the copy's journal
// is empty. Thus, here we iterate over stateObjects, to enable copies
// of copies.
2019-11-22 17:56:05 +03:00
for addr := range s . stateObjectsPending {
2019-08-12 22:56:07 +03:00
if _ , exist := state . stateObjects [ addr ] ; ! exist {
2019-11-22 17:56:05 +03:00
state . stateObjects [ addr ] = s . stateObjects [ addr ] . deepCopy ( state )
2019-08-12 22:56:07 +03:00
}
state . stateObjectsPending [ addr ] = struct { } { }
}
2019-11-22 17:56:05 +03:00
for addr := range s . stateObjectsDirty {
2018-04-10 21:59:07 +03:00
if _ , exist := state . stateObjects [ addr ] ; ! exist {
2019-11-22 17:56:05 +03:00
state . stateObjects [ addr ] = s . stateObjects [ addr ] . deepCopy ( state )
2018-04-10 21:59:07 +03:00
}
2019-08-12 22:56:07 +03:00
state . stateObjectsDirty [ addr ] = struct { } { }
2018-04-10 21:59:07 +03:00
}
2023-07-11 16:43:23 +03:00
// Deep copy the destruction markers.
for addr , value := range s . stateObjectsDestruct {
state . stateObjectsDestruct [ addr ] = value
2022-12-28 16:53:43 +03:00
}
2023-07-11 16:43:23 +03:00
// Deep copy the state changes made in the scope of block
// along with their original values.
2023-07-31 15:07:51 +03:00
state . accounts = copySet ( s . accounts )
state . storages = copy2DSet ( s . storages )
2024-07-10 10:39:20 +03:00
state . accountsOrigin = copySet ( s . accountsOrigin )
state . storagesOrigin = copy2DSet ( s . storagesOrigin )
2023-07-11 16:43:23 +03:00
// Deep copy the logs occurred in the scope of block
2019-11-22 17:56:05 +03:00
for hash , logs := range s . logs {
2018-08-23 15:59:58 +03:00
cpy := make ( [ ] * types . Log , len ( logs ) )
for i , l := range logs {
cpy [ i ] = new ( types . Log )
* cpy [ i ] = * l
}
state . logs [ hash ] = cpy
2015-04-08 18:14:58 +03:00
}
2023-07-11 16:43:23 +03:00
// Deep copy the preimages occurred in the scope of block
2019-11-22 17:56:05 +03:00
for hash , preimage := range s . preimages {
2017-01-17 14:19:50 +03:00
state . preimages [ hash ] = preimage
}
2022-12-28 16:53:43 +03:00
// Do we need to copy the access list and transient storage?
// In practice: No. At the start of a transaction, these two lists are empty.
// In practice, we only ever copy state _between_ transactions/blocks, never
// in the middle of a transaction. However, it doesn't cost us much to copy
// empty lists, so we do it anyway to not blow up if we ever decide copy them
// in the middle of a transaction.
2023-09-07 11:39:29 +03:00
if s . accessList != nil {
state . accessList = s . accessList . Copy ( )
}
2022-11-16 12:18:52 +03:00
state . transientStorage = s . transientStorage . Copy ( )
2021-01-08 16:01:49 +03:00
2022-07-26 17:23:55 +03:00
state . prefetcher = s . prefetcher
if s . prefetcher != nil && ! doPrefetch {
// If there's a prefetcher running, make an inactive copy of it that can
// only access data but does not actively preload (since the user will not
// know that they need to explicitly terminate an active copy).
state . prefetcher = state . prefetcher . copy ( )
2021-01-08 16:01:49 +03:00
}
2015-02-12 00:46:45 +02:00
return state
2014-07-22 12:54:48 +03:00
}
2016-10-04 13:36:02 +03:00
// Snapshot returns an identifier for the current revision of the state.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) Snapshot ( ) int {
id := s . nextRevisionId
s . nextRevisionId ++
s . validRevisions = append ( s . validRevisions , revision { id , s . journal . length ( ) } )
2016-10-04 13:36:02 +03:00
return id
}
// RevertToSnapshot reverts all state changes made since the given revision.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) RevertToSnapshot ( revid int ) {
2016-10-04 13:36:02 +03:00
// Find the snapshot in the stack of valid snapshots.
2019-11-22 17:56:05 +03:00
idx := sort . Search ( len ( s . validRevisions ) , func ( i int ) bool {
return s . validRevisions [ i ] . id >= revid
2016-10-04 13:36:02 +03:00
} )
2019-11-22 17:56:05 +03:00
if idx == len ( s . validRevisions ) || s . validRevisions [ idx ] . id != revid {
2016-10-04 13:36:02 +03:00
panic ( fmt . Errorf ( "revision id %v cannot be reverted" , revid ) )
}
2019-11-22 17:56:05 +03:00
snapshot := s . validRevisions [ idx ] . journalIndex
2016-10-04 13:36:02 +03:00
2018-03-27 15:13:30 +03:00
// Replay the journal to undo changes and remove invalidated snapshots
2019-11-22 17:56:05 +03:00
s . journal . revert ( s , snapshot )
s . validRevisions = s . validRevisions [ : idx ]
2014-07-22 12:54:48 +03:00
}
2016-10-04 13:36:02 +03:00
// GetRefund returns the current value of the refund counter.
2019-11-22 17:56:05 +03:00
func ( s * StateDB ) GetRefund ( ) uint64 {
return s . refund
2015-08-30 11:19:10 +03:00
}
2022-07-05 06:14:21 +03:00
// WaitPipeVerification waits until the snapshot been verified
func ( s * StateDB ) WaitPipeVerification ( ) error {
// Need to wait for the parent trie to commit
if s . snap != nil {
if valid := s . snap . WaitAndGetVerifyRes ( ) ; ! valid {
2024-03-11 09:52:33 +03:00
return errors . New ( "verification on parent snap failed" )
2022-07-05 06:14:21 +03:00
}
}
return nil
}
2022-08-04 11:03:20 +03:00
// Finalise finalises the state by removing the destructed objects and clears
2019-08-12 22:56:07 +03:00
// the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that.
2017-08-24 13:42:00 +03:00
func ( s * StateDB ) Finalise ( deleteEmptyObjects bool ) {
2021-01-08 16:01:49 +03:00
addressesToPrefetch := make ( [ ] [ ] byte , 0 , len ( s . journal . dirties ) )
2018-03-27 15:13:30 +03:00
for addr := range s . journal . dirties {
2019-08-12 22:56:07 +03:00
obj , exist := s . stateObjects [ addr ]
2018-04-07 20:14:20 +03:00
if ! exist {
// ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2
// That tx goes out of gas, and although the notion of 'touched' does not exist there, the
// touch-event will still be recorded in the journal. Since ripeMD is a special snowflake,
// it will persist in the journal even though the journal is reverted. In this special circumstance,
// it may exist in `s.journal.dirties` but not in `s.stateObjects`.
// Thus, we can safely ignore it here
continue
}
2023-07-15 17:35:30 +03:00
if obj . selfDestructed || ( deleteEmptyObjects && obj . empty ( ) ) {
2019-08-12 22:56:07 +03:00
obj . deleted = true
2020-03-03 16:52:00 +03:00
2022-12-28 16:53:43 +03:00
// We need to maintain account deletions explicitly (will remain
2023-07-11 16:43:23 +03:00
// set indefinitely). Note only the first occurred self-destruct
// event is tracked.
if _ , ok := s . stateObjectsDestruct [ obj . address ] ; ! ok {
s . stateObjectsDestruct [ obj . address ] = obj . origin
}
2020-03-03 16:52:00 +03:00
// Note, we can't do this only at the end of a block because multiple
// transactions within the same block might self destruct and then
2022-08-19 09:00:21 +03:00
// resurrect an account; but the snapshotter needs both events.
2023-07-31 15:07:51 +03:00
delete ( s . accounts , obj . addrHash ) // Clear out any previously updated account data (may be recreated via a resurrect)
delete ( s . storages , obj . addrHash ) // Clear out any previously updated storage data (may be recreated via a resurrect)
delete ( s . accountsOrigin , obj . address ) // Clear out any previously updated account data (may be recreated via a resurrect)
delete ( s . storagesOrigin , obj . address ) // Clear out any previously updated storage data (may be recreated via a resurrect)
2016-09-22 22:04:58 +03:00
} else {
2021-01-08 16:01:49 +03:00
obj . finalise ( true ) // Prefetch slots in the background
2014-07-22 12:54:48 +03:00
}
2023-07-17 20:02:18 +03:00
obj . created = false
2019-08-12 22:56:07 +03:00
s . stateObjectsPending [ addr ] = struct { } { }
2018-03-27 15:13:30 +03:00
s . stateObjectsDirty [ addr ] = struct { } { }
2021-01-08 16:01:49 +03:00
2020-02-05 15:12:09 +03:00
// At this point, also ship the address off to the precacher. The precacher
// will start loading tries, and when the change is eventually committed,
// the commit-phase will be a lot faster
2021-01-08 16:01:49 +03:00
addressesToPrefetch = append ( addressesToPrefetch , common . CopyBytes ( addr [ : ] ) ) // Copy needed for closure
2020-02-05 15:12:09 +03:00
}
2022-07-26 20:24:28 +03:00
prefetcher := s . prefetcher
if prefetcher != nil && len ( addressesToPrefetch ) > 0 {
2022-07-29 09:21:37 +03:00
if s . snap . Verified ( ) {
2023-08-23 12:46:08 +03:00
prefetcher . prefetch ( common . Hash { } , s . originalRoot , common . Address { } , addressesToPrefetch )
2022-07-29 09:21:37 +03:00
} else if prefetcher . rootParent != ( common . Hash { } ) {
2023-08-23 12:46:08 +03:00
prefetcher . prefetch ( common . Hash { } , prefetcher . rootParent , common . Address { } , addressesToPrefetch )
2022-07-29 09:21:37 +03:00
}
2014-07-22 12:54:48 +03:00
}
2016-10-04 13:36:02 +03:00
// Invalidate journal because reverting across transactions is not allowed.
s . clearJournalAndRefund ( )
2017-08-24 13:42:00 +03:00
}
// IntermediateRoot computes the current root hash of the state trie.
// It is called in between transactions to get the root hash that
// goes into transaction receipts.
func ( s * StateDB ) IntermediateRoot ( deleteEmptyObjects bool ) common . Hash {
2019-08-12 22:56:07 +03:00
// Finalise all the dirty storage states and write them into the tries
2017-08-24 13:42:00 +03:00
s . Finalise ( deleteEmptyObjects )
2022-07-05 06:14:21 +03:00
s . AccountsIntermediateRoot ( )
return s . StateIntermediateRoot ( )
}
2019-03-25 11:01:18 +03:00
2023-02-02 14:36:37 +03:00
// CorrectAccountsRoot will fix account roots in pipecommit mode
2022-07-05 06:14:21 +03:00
func ( s * StateDB ) CorrectAccountsRoot ( blockRoot common . Hash ) {
var snapshot snapshot . Snapshot
if blockRoot == ( common . Hash { } ) {
snapshot = s . snap
} else if s . snaps != nil {
snapshot = s . snaps . Snapshot ( blockRoot )
}
if snapshot == nil {
return
}
if accounts , err := snapshot . Accounts ( ) ; err == nil && accounts != nil {
for _ , obj := range s . stateObjects {
2022-07-29 09:21:37 +03:00
if ! obj . deleted {
2022-07-05 06:14:21 +03:00
if account , exist := accounts [ crypto . Keccak256Hash ( obj . address [ : ] ) ] ; exist {
2022-07-29 09:21:37 +03:00
if len ( account . Root ) == 0 {
2023-08-23 12:46:08 +03:00
obj . data . Root = types . EmptyRootHash
2022-07-29 09:21:37 +03:00
} else {
obj . data . Root = common . BytesToHash ( account . Root )
2022-07-22 10:58:06 +03:00
}
2022-07-05 06:14:21 +03:00
}
}
}
}
}
2023-02-02 14:36:37 +03:00
// PopulateSnapAccountAndStorage tries to populate required accounts and storages for pipecommit
2022-07-05 06:14:21 +03:00
func ( s * StateDB ) PopulateSnapAccountAndStorage ( ) {
for addr := range s . stateObjectsPending {
if obj := s . stateObjects [ addr ] ; ! obj . deleted {
if s . snap != nil {
2022-07-29 09:21:37 +03:00
s . populateSnapStorage ( obj )
2023-08-23 12:46:08 +03:00
s . accounts [ obj . addrHash ] = types . SlimAccountRLP ( obj . data )
2022-07-05 06:14:21 +03:00
}
}
}
}
2023-02-02 14:36:37 +03:00
// populateSnapStorage tries to populate required storages for pipecommit, and returns a flag to indicate whether the storage root changed or not
2023-08-23 12:46:08 +03:00
func ( s * StateDB ) populateSnapStorage ( obj * stateObject ) bool {
2022-07-05 06:14:21 +03:00
for key , value := range obj . dirtyStorage {
obj . pendingStorage [ key ] = value
}
if len ( obj . pendingStorage ) == 0 {
return false
}
2023-09-07 11:39:29 +03:00
hasher := crypto . NewKeccakState ( )
2023-08-23 12:46:08 +03:00
var storage map [ common . Hash ] [ ] byte
2022-07-05 06:14:21 +03:00
for key , value := range obj . pendingStorage {
var v [ ] byte
if ( value != common . Hash { } ) {
// Encoding []byte cannot fail, ok to ignore the error.
v , _ = rlp . EncodeToBytes ( common . TrimLeftZeroes ( value [ : ] ) )
}
// If state snapshotting is active, cache the data til commit
if obj . db . snap != nil {
if storage == nil {
// Retrieve the old storage map, if available, create a new one otherwise
2023-08-23 12:46:08 +03:00
if storage = obj . db . storages [ obj . addrHash ] ; storage == nil {
storage = make ( map [ common . Hash ] [ ] byte )
obj . db . storages [ obj . addrHash ] = storage
2022-07-05 06:14:21 +03:00
}
}
2023-09-07 11:39:29 +03:00
storage [ crypto . HashData ( hasher , key [ : ] ) ] = v // v will be nil if value is 0x00
2022-07-05 06:14:21 +03:00
}
}
return true
}
func ( s * StateDB ) AccountsIntermediateRoot ( ) {
tasks := make ( chan func ( ) )
finishCh := make ( chan struct { } )
defer close ( finishCh )
wg := sync . WaitGroup { }
for i := 0 ; i < runtime . NumCPU ( ) ; i ++ {
go func ( ) {
for {
select {
case task := <- tasks :
task ( )
case <- finishCh :
return
}
}
2021-01-08 16:01:49 +03:00
} ( )
}
2022-07-05 06:14:21 +03:00
2021-01-08 16:01:49 +03:00
// Although naively it makes sense to retrieve the account trie and then do
// the contract storage and account updates sequentially, that short circuits
// the account prefetcher. Instead, let's process all the storage updates
2022-08-04 11:03:20 +03:00
// first, giving the account prefetches just a few more milliseconds of time
2021-01-08 16:01:49 +03:00
// to pull useful data from disk.
for addr := range s . stateObjectsPending {
if obj := s . stateObjects [ addr ] ; ! obj . deleted {
2022-07-05 06:14:21 +03:00
wg . Add ( 1 )
tasks <- func ( ) {
2023-08-23 12:46:08 +03:00
obj . updateRoot ( )
2022-07-05 06:14:21 +03:00
2023-09-15 08:49:07 +03:00
// Cache the data until commit. Note, this update mechanism is not symmetric
// to the deletion, because whereas it is enough to track account updates
// at commit time, deletions need tracking at transaction boundary level to
// ensure we capture state clearing.
2023-08-23 12:46:08 +03:00
s . AccountMux . Lock ( )
s . accounts [ obj . addrHash ] = types . SlimAccountRLP ( obj . data )
s . AccountMux . Unlock ( )
2023-09-15 08:49:07 +03:00
2022-07-05 06:14:21 +03:00
wg . Done ( )
}
2021-01-08 16:01:49 +03:00
}
}
2022-07-05 06:14:21 +03:00
wg . Wait ( )
}
func ( s * StateDB ) StateIntermediateRoot ( ) common . Hash {
// If there was a trie prefetcher operating, it gets aborted and irrevocably
// modified after we start retrieving tries. Remove it from the statedb after
// this round of use.
//
// This is weird pre-byzantium since the first tx runs with a prefetcher and
// the remainder without, but pre-byzantium even the initial prefetcher is
// useless, so no sleep lost.
prefetcher := s . prefetcher
2022-08-31 08:30:25 +03:00
defer s . StopPrefetcher ( )
2022-07-05 06:14:21 +03:00
2021-01-08 16:01:49 +03:00
// Now we're about to start to write changes to the trie. The trie is so far
// _untouched_. We can check with the prefetcher, if it can give us a trie
// which has the same root, but also has some content loaded into it.
if prefetcher != nil {
2022-06-06 18:14:55 +03:00
if trie := prefetcher . trie ( common . Hash { } , s . originalRoot ) ; trie != nil {
2021-01-08 16:01:49 +03:00
s . trie = trie
2020-02-05 15:12:09 +03:00
}
}
2022-07-05 06:14:21 +03:00
if s . trie == nil {
tr , err := s . db . OpenTrie ( s . originalRoot )
if err != nil {
2023-09-07 11:39:29 +03:00
panic ( fmt . Sprintf ( "failed to open trie tree %s" , s . originalRoot ) )
2019-08-12 22:56:07 +03:00
}
2022-07-05 06:14:21 +03:00
s . trie = tr
2021-01-08 16:01:49 +03:00
}
2022-07-05 06:14:21 +03:00
usedAddrs := make ( [ ] [ ] byte , 0 , len ( s . stateObjectsPending ) )
if ! s . noTrie {
for addr := range s . stateObjectsPending {
if obj := s . stateObjects [ addr ] ; obj . deleted {
s . deleteStateObject ( obj )
} else {
s . updateStateObject ( obj )
}
usedAddrs = append ( usedAddrs , common . CopyBytes ( addr [ : ] ) ) // Copy needed for closure
}
if prefetcher != nil {
2023-08-23 12:46:08 +03:00
prefetcher . used ( common . Hash { } , s . originalRoot , usedAddrs )
2022-07-05 06:14:21 +03:00
}
2019-08-12 22:56:07 +03:00
}
2022-07-05 06:14:21 +03:00
2019-08-12 22:56:07 +03:00
if len ( s . stateObjectsPending ) > 0 {
s . stateObjectsPending = make ( map [ common . Address ] struct { } )
}
2019-03-25 11:01:18 +03:00
// Track the amount of time wasted on hashing the account trie
if metrics . EnabledExpensive {
defer func ( start time . Time ) { s . AccountHashes += time . Since ( start ) } ( time . Now ( ) )
}
2022-07-05 06:14:21 +03:00
if s . noTrie {
return s . expectedRoot
} else {
return s . trie . Hash ( )
}
2014-07-22 12:54:48 +03:00
}
2022-11-16 12:18:52 +03:00
// SetTxContext sets the current transaction hash and index which are
// used when the EVM emits new state logs. It should be invoked before
// transaction execution.
func ( s * StateDB ) SetTxContext ( thash common . Hash , ti int ) {
2019-11-22 17:56:05 +03:00
s . thash = thash
s . txIndex = ti
2023-09-15 08:49:07 +03:00
s . accessList = nil // can't delete this line now, because StateDB.Prepare is not called before processsing a system transaction
2017-02-02 00:36:51 +03:00
}
2016-10-04 13:36:02 +03:00
func ( s * StateDB ) clearJournalAndRefund ( ) {
2019-08-12 22:56:07 +03:00
if len ( s . journal . entries ) > 0 {
s . journal = newJournal ( )
s . refund = 0
}
2022-08-19 09:00:21 +03:00
s . validRevisions = s . validRevisions [ : 0 ] // Snapshots can be created without journal entries
2016-10-04 13:36:02 +03:00
}
2023-08-26 11:13:22 +03:00
// fastDeleteStorage is the function that efficiently deletes the storage trie
// of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots.
func ( s * StateDB ) fastDeleteStorage ( addrHash common . Hash , root common . Hash ) ( bool , common . StorageSize , map [ common . Hash ] [ ] byte , * trienode . NodeSet , error ) {
iter , err := s . snaps . StorageIterator ( s . originalRoot , addrHash , common . Hash { } )
if err != nil {
return false , 0 , nil , nil , err
}
defer iter . Release ( )
var (
size common . StorageSize
nodes = trienode . NewNodeSet ( addrHash )
slots = make ( map [ common . Hash ] [ ] byte )
)
2023-10-17 15:09:25 +03:00
options := trie . NewStackTrieOptions ( )
options = options . WithWriter ( func ( path [ ] byte , hash common . Hash , blob [ ] byte ) {
2023-08-26 11:13:22 +03:00
nodes . AddNode ( path , trienode . NewDeleted ( ) )
size += common . StorageSize ( len ( path ) )
} )
2023-10-17 15:09:25 +03:00
stack := trie . NewStackTrie ( options )
2023-08-26 11:13:22 +03:00
for iter . Next ( ) {
if size > storageDeleteLimit {
return true , size , nil , nil , nil
}
slot := common . CopyBytes ( iter . Slot ( ) )
2023-09-15 09:09:07 +03:00
if err := iter . Error ( ) ; err != nil { // error might occur after Slot function
2023-08-26 11:13:22 +03:00
return false , 0 , nil , nil , err
}
size += common . StorageSize ( common . HashLength + len ( slot ) )
slots [ iter . Hash ( ) ] = slot
if err := stack . Update ( iter . Hash ( ) . Bytes ( ) , slot ) ; err != nil {
return false , 0 , nil , nil , err
}
}
2023-09-15 09:09:07 +03:00
if err := iter . Error ( ) ; err != nil { // error might occur during iteration
2023-08-26 11:13:22 +03:00
return false , 0 , nil , nil , err
}
if stack . Hash ( ) != root {
return false , 0 , nil , nil , fmt . Errorf ( "snapshot is not matched, exp %x, got %x" , root , stack . Hash ( ) )
}
return false , size , slots , nodes , nil
}
// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly.
func ( s * StateDB ) slowDeleteStorage ( addr common . Address , addrHash common . Hash , root common . Hash ) ( bool , common . StorageSize , map [ common . Hash ] [ ] byte , * trienode . NodeSet , error ) {
2023-11-14 15:09:40 +03:00
tr , err := s . db . OpenStorageTrie ( s . originalRoot , addr , root , s . trie )
2023-07-11 16:43:23 +03:00
if err != nil {
2023-08-26 11:13:22 +03:00
return false , 0 , nil , nil , fmt . Errorf ( "failed to open storage trie, err: %w" , err )
2023-07-11 16:43:23 +03:00
}
2023-10-10 13:15:24 +03:00
// skip deleting storages for EmptyTrie
if _ , ok := tr . ( * trie . EmptyTrie ) ; ok {
2023-08-26 11:13:22 +03:00
return false , 0 , nil , nil , nil
2023-10-10 13:15:24 +03:00
}
2023-07-11 16:43:23 +03:00
it , err := tr . NodeIterator ( nil )
if err != nil {
2023-08-26 11:13:22 +03:00
return false , 0 , nil , nil , fmt . Errorf ( "failed to open storage iterator, err: %w" , err )
2023-07-11 16:43:23 +03:00
}
var (
2023-08-26 11:13:22 +03:00
size common . StorageSize
nodes = trienode . NewNodeSet ( addrHash )
slots = make ( map [ common . Hash ] [ ] byte )
2023-07-11 16:43:23 +03:00
)
for it . Next ( true ) {
2023-08-26 11:13:22 +03:00
if size > storageDeleteLimit {
return true , size , nil , nil , nil
2023-07-11 16:43:23 +03:00
}
if it . Leaf ( ) {
slots [ common . BytesToHash ( it . LeafKey ( ) ) ] = common . CopyBytes ( it . LeafBlob ( ) )
2023-08-26 11:13:22 +03:00
size += common . StorageSize ( common . HashLength + len ( it . LeafBlob ( ) ) )
2023-07-11 16:43:23 +03:00
continue
}
if it . Hash ( ) == ( common . Hash { } ) {
continue
}
2023-08-26 11:13:22 +03:00
size += common . StorageSize ( len ( it . Path ( ) ) )
nodes . AddNode ( it . Path ( ) , trienode . NewDeleted ( ) )
2023-07-11 16:43:23 +03:00
}
if err := it . Error ( ) ; err != nil {
2023-08-26 11:13:22 +03:00
return false , 0 , nil , nil , err
}
return false , size , slots , nodes , nil
}
// deleteStorage is designed to delete the storage trie of a designated account.
// It could potentially be terminated if the storage size is excessively large,
// potentially leading to an out-of-memory panic. The function will make an attempt
// to utilize an efficient strategy if the associated state snapshot is reachable;
// otherwise, it will resort to a less-efficient approach.
func ( s * StateDB ) deleteStorage ( addr common . Address , addrHash common . Hash , root common . Hash ) ( bool , map [ common . Hash ] [ ] byte , * trienode . NodeSet , error ) {
var (
start = time . Now ( )
err error
aborted bool
size common . StorageSize
slots map [ common . Hash ] [ ] byte
nodes * trienode . NodeSet
)
// The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow
// one just in case.
if s . snap != nil {
aborted , size , slots , nodes , err = s . fastDeleteStorage ( addrHash , root )
}
if s . snap == nil || err != nil {
aborted , size , slots , nodes , err = s . slowDeleteStorage ( addr , addrHash , root )
}
if err != nil {
2023-07-11 16:43:23 +03:00
return false , nil , nil , err
}
if metrics . EnabledExpensive {
2023-08-26 11:13:22 +03:00
if aborted {
slotDeletionSkip . Inc ( 1 )
}
n := int64 ( len ( slots ) )
metrics: refactor metrics (#28035)
This change includes a lot of things, listed below.
### Split up interfaces, write vs read
The interfaces have been split up into one write-interface and one read-interface, with `Snapshot` being the gateway from write to read. This simplifies the semantics _a lot_.
Example of splitting up an interface into one readonly 'snapshot' part, and one updatable writeonly part:
```golang
type MeterSnapshot interface {
Count() int64
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
}
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter interface {
Mark(int64)
Snapshot() MeterSnapshot
Stop()
}
```
### A note about concurrency
This PR makes the concurrency model clearer. We have actual meters and snapshot of meters. The `meter` is the thing which can be accessed from the registry, and updates can be made to it.
- For all `meters`, (`Gauge`, `Timer` etc), it is assumed that they are accessed by different threads, making updates. Therefore, all `meters` update-methods (`Inc`, `Add`, `Update`, `Clear` etc) need to be concurrency-safe.
- All `meters` have a `Snapshot()` method. This method is _usually_ called from one thread, a backend-exporter. But it's fully possible to have several exporters simultaneously: therefore this method should also be concurrency-safe.
TLDR: `meter`s are accessible via registry, all their methods must be concurrency-safe.
For all `Snapshot`s, it is assumed that an individual exporter-thread has obtained a `meter` from the registry, and called the `Snapshot` method to obtain a readonly snapshot. This snapshot is _not_ guaranteed to be concurrency-safe. There's no need for a snapshot to be concurrency-safe, since exporters should not share snapshots.
Note, though: that by happenstance a lot of the snapshots _are_ concurrency-safe, being unmutable minimal representations of a value. Only the more complex ones are _not_ threadsafe, those that lazily calculate things like `Variance()`, `Mean()`.
Example of how a background exporter typically works, obtaining the snapshot and sequentially accessing the non-threadsafe methods in it:
```golang
ms := metric.Snapshot()
...
fields := map[string]interface{}{
"count": ms.Count(),
"max": ms.Max(),
"mean": ms.Mean(),
"min": ms.Min(),
"stddev": ms.StdDev(),
"variance": ms.Variance(),
```
TLDR: `snapshots` are not guaranteed to be concurrency-safe (but often are).
### Sample changes
I also changed the `Sample` type: previously, it iterated the samples fully every time `Mean()`,`Sum()`, `Min()` or `Max()` was invoked. Since we now have readonly base data, we can just iterate it once, in the constructor, and set all four values at once.
The same thing has been done for runtimehistogram.
### ResettingTimer API
Back when ResettingTImer was implemented, as part of https://github.com/ethereum/go-ethereum/pull/15910, Anton implemented a `Percentiles` on the new type. However, the method did not conform to the other existing types which also had a `Percentiles`.
1. The existing ones, on input, took `0.5` to mean `50%`. Anton used `50` to mean `50%`.
2. The existing ones returned `float64` outputs, thus interpolating between values. A value-set of `0, 10`, at `50%` would return `5`, whereas Anton's would return either `0` or `10`.
This PR removes the 'new' version, and uses only the 'legacy' percentiles, also for the ResettingTimer type.
The resetting timer snapshot was also defined so that it would expose the internal values. This has been removed, and getters for `Max, Min, Mean` have been added instead.
### Unexport types
A lot of types were exported, but do not need to be. This PR unexports quite a lot of them.
2023-09-13 20:13:47 +03:00
slotDeletionMaxCount . UpdateIfGt ( int64 ( len ( slots ) ) )
slotDeletionMaxSize . UpdateIfGt ( int64 ( size ) )
2023-07-11 16:43:23 +03:00
slotDeletionTimer . UpdateSince ( start )
2023-08-26 11:13:22 +03:00
slotDeletionCount . Mark ( n )
slotDeletionSize . Mark ( int64 ( size ) )
2023-07-11 16:43:23 +03:00
}
2023-08-26 11:13:22 +03:00
return aborted , slots , nodes , nil
2023-07-11 16:43:23 +03:00
}
2022-07-05 06:14:21 +03:00
2023-07-11 16:43:23 +03:00
// handleDestruction processes all destruction markers and deletes the account
// and associated storage slots if necessary. There are four possible situations
// here:
//
// - the account was not existent and be marked as destructed
//
// - the account was not existent and be marked as destructed,
// however, it's resurrected later in the same block.
//
// - the account was existent and be marked as destructed
//
// - the account was existent and be marked as destructed,
// however it's resurrected later in the same block.
//
// In case (a), nothing needs be deleted, nil to nil transition can be ignored.
//
// In case (b), nothing needs be deleted, nil is used as the original value for
// newly created account and storages
//
// In case (c), **original** account along with its storages should be deleted,
// with their values be tracked as original value.
//
// In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value.
2023-07-31 15:07:51 +03:00
func ( s * StateDB ) handleDestruction ( nodes * trienode . MergedNodeSet ) ( map [ common . Address ] struct { } , error ) {
2023-10-08 13:55:30 +03:00
// Short circuit if geth is running with hash mode. This procedure can consume
// considerable time and storage deletion isn't supported in hash mode, thus
// preemptively avoiding unnecessary expenses.
2023-07-31 15:07:51 +03:00
incomplete := make ( map [ common . Address ] struct { } )
2023-10-08 13:55:30 +03:00
if s . db . TrieDB ( ) . Scheme ( ) == rawdb . HashScheme {
return incomplete , nil
}
2023-07-11 16:43:23 +03:00
for addr , prev := range s . stateObjectsDestruct {
// The original account was non-existing, and it's marked as destructed
// in the scope of block. It can be case (a) or (b).
// - for (a), skip it without doing anything.
// - for (b), track account's original value as nil. It may overwrite
// the data cached in s.accountsOrigin set by 'updateStateObject'.
addrHash := crypto . Keccak256Hash ( addr [ : ] )
if prev == nil {
if _ , ok := s . accounts [ addrHash ] ; ok {
2023-07-31 15:07:51 +03:00
s . accountsOrigin [ addr ] = nil // case (b)
2022-07-05 06:14:21 +03:00
}
2023-07-11 16:43:23 +03:00
continue
}
// It can overwrite the data in s.accountsOrigin set by 'updateStateObject'.
2023-07-31 15:07:51 +03:00
s . accountsOrigin [ addr ] = types . SlimAccountRLP ( * prev ) // case (c) or (d)
2022-07-05 06:14:21 +03:00
2023-07-11 16:43:23 +03:00
// Short circuit if the storage was empty.
if prev . Root == types . EmptyRootHash {
continue
}
// Remove storage slots belong to the account.
aborted , slots , set , err := s . deleteStorage ( addr , addrHash , prev . Root )
if err != nil {
return nil , fmt . Errorf ( "failed to delete storage, err: %w" , err )
}
// The storage is too huge to handle, skip it but mark as incomplete.
// For case (d), the account is resurrected might with a few slots
// created. In this case, wipe the entire storage state diff because
// of aborted deletion.
if aborted {
2023-07-31 15:07:51 +03:00
incomplete [ addr ] = struct { } { }
delete ( s . storagesOrigin , addr )
2023-07-11 16:43:23 +03:00
continue
}
2023-07-31 15:07:51 +03:00
if s . storagesOrigin [ addr ] == nil {
s . storagesOrigin [ addr ] = slots
2023-07-11 16:43:23 +03:00
} else {
// It can overwrite the data in s.storagesOrigin[addrHash] set by
// 'object.updateTrie'.
for key , val := range slots {
2023-07-31 15:07:51 +03:00
s . storagesOrigin [ addr ] [ key ] = val
2022-07-05 06:14:21 +03:00
}
2023-07-11 16:43:23 +03:00
}
if err := nodes . Merge ( set ) ; err != nil {
return nil , err
2020-08-21 15:10:40 +03:00
}
}
2023-07-11 16:43:23 +03:00
return incomplete , nil
2022-07-05 06:14:21 +03:00
}
cmd, core/state, eth, tests, trie: improve state reader (#27428)
The state availability is checked during the creation of a state reader.
- In hash-based database, if the specified root node does not exist on disk disk, then
the state reader won't be created and an error will be returned.
- In path-based database, if the specified state layer is not available, then the
state reader won't be created and an error will be returned.
This change also contains a stricter semantics regarding the `Commit` operation: once it has been performed, the trie is no longer usable, and certain operations will return an error.
2023-06-20 22:31:45 +03:00
// Once the state is committed, tries cached in stateDB (including account
// trie, storage tries) will no longer be functional. A new state instance
// must be created with new root and updated database for accessing post-
// commit states.
2023-07-24 13:22:09 +03:00
//
// The associated block number of the state transition is also provided
// for more chain context.
2023-08-23 12:46:08 +03:00
func ( s * StateDB ) Commit ( block uint64 , failPostCommitFunc func ( ) , postCommitFuncs ... func ( ) error ) ( common . Hash , * types . DiffLayer , error ) {
2023-03-16 10:12:34 +03:00
// Short circuit in case any database failure occurred earlier.
2022-07-05 06:14:21 +03:00
if s . dbErr != nil {
2022-08-31 08:30:25 +03:00
s . StopPrefetcher ( )
2022-07-05 06:14:21 +03:00
return common . Hash { } , nil , fmt . Errorf ( "commit aborted due to earlier error: %v" , s . dbErr )
2019-03-25 11:01:18 +03:00
}
2022-07-05 06:14:21 +03:00
// Finalize any pending changes and merge everything into the tries
2022-08-04 11:03:20 +03:00
var (
2023-08-23 12:46:08 +03:00
diffLayer * types . DiffLayer
verified chan struct { }
snapUpdated chan struct { }
2023-09-15 08:49:07 +03:00
incomplete map [ common . Address ] struct { }
2023-08-23 12:46:08 +03:00
nodes = trienode . NewMergedNodeSet ( )
2022-08-04 11:03:20 +03:00
)
2023-08-23 12:46:08 +03:00
2019-08-06 13:40:28 +03:00
if s . snap != nil {
2022-07-05 06:14:21 +03:00
diffLayer = & types . DiffLayer { }
}
if s . pipeCommit {
// async commit the MPT
verified = make ( chan struct { } )
snapUpdated = make ( chan struct { } )
}
commmitTrie := func ( ) error {
commitErr := func ( ) error {
if s . pipeCommit {
<- snapUpdated
// Due to state verification pipeline, the accounts roots are not updated, leading to the data in the difflayer is not correct, capture the correct data here
s . AccountsIntermediateRoot ( )
if parent := s . snap . Root ( ) ; parent != s . expectedRoot {
accountData := make ( map [ common . Hash ] [ ] byte )
2023-08-23 12:46:08 +03:00
for k , v := range s . accounts {
2022-07-05 06:14:21 +03:00
accountData [ crypto . Keccak256Hash ( k [ : ] ) ] = v
}
s . snaps . Snapshot ( s . expectedRoot ) . CorrectAccounts ( accountData )
}
2023-09-07 11:39:29 +03:00
s . snap = nil
2022-07-05 06:14:21 +03:00
}
if s . stateRoot = s . StateIntermediateRoot ( ) ; s . fullProcessed && s . expectedRoot != s . stateRoot {
log . Error ( "Invalid merkle root" , "remote" , s . expectedRoot , "local" , s . stateRoot )
return fmt . Errorf ( "invalid merkle root (remote: %x local: %x)" , s . expectedRoot , s . stateRoot )
}
2023-09-15 08:49:07 +03:00
var err error
// Handle all state deletions first
incomplete , err = s . handleDestruction ( nodes )
if err != nil {
return err
}
2022-07-05 06:14:21 +03:00
tasks := make ( chan func ( ) )
2024-03-18 06:00:07 +03:00
type taskResult struct {
2023-08-23 12:46:08 +03:00
err error
nodeSet * trienode . NodeSet
}
2024-03-18 06:00:07 +03:00
taskResults := make ( chan taskResult , len ( s . stateObjectsDirty ) )
2022-07-05 06:14:21 +03:00
tasksNum := 0
finishCh := make ( chan struct { } )
threads := gopool . Threads ( len ( s . stateObjectsDirty ) )
wg := sync . WaitGroup { }
for i := 0 ; i < threads ; i ++ {
wg . Add ( 1 )
go func ( ) {
defer wg . Done ( )
for {
select {
case task := <- tasks :
task ( )
case <- finishCh :
return
}
}
} ( )
}
for addr := range s . stateObjectsDirty {
if obj := s . stateObjects [ addr ] ; ! obj . deleted {
tasks <- func ( ) {
// Write any storage changes in the state object to its storage trie
if ! s . noTrie {
2023-08-23 12:46:08 +03:00
if set , err := obj . commit ( ) ; err != nil {
2024-03-18 06:00:07 +03:00
taskResults <- taskResult { err , nil }
2022-07-05 06:14:21 +03:00
return
2023-08-23 12:46:08 +03:00
} else {
2024-03-18 06:00:07 +03:00
taskResults <- taskResult { nil , set }
2022-07-05 06:14:21 +03:00
}
2023-08-23 12:46:08 +03:00
} else {
2024-03-18 06:00:07 +03:00
taskResults <- taskResult { nil , nil }
2022-07-05 06:14:21 +03:00
}
}
tasksNum ++
}
}
for i := 0 ; i < tasksNum ; i ++ {
2023-08-23 12:46:08 +03:00
res := <- taskResults
if res . err != nil {
2022-07-05 06:14:21 +03:00
close ( finishCh )
2023-08-23 12:46:08 +03:00
return res . err
}
// Merge the dirty nodes of storage trie into global set. It is possible
// that the account was destructed and then resurrected in the same block.
// In this case, the node set is shared by both accounts.
if res . nodeSet != nil {
if err := nodes . Merge ( res . nodeSet ) ; err != nil {
return err
}
2022-07-05 06:14:21 +03:00
}
}
close ( finishCh )
if ! s . noTrie {
2023-08-23 12:46:08 +03:00
root , set , err := s . trie . Commit ( true )
2022-07-05 06:14:21 +03:00
if err != nil {
return err
}
2023-08-23 12:46:08 +03:00
// Merge the dirty nodes of account trie into global set
if set != nil {
if err := nodes . Merge ( set ) ; err != nil {
return err
}
}
2023-09-27 06:07:45 +03:00
origin := s . originalRoot
if origin == ( common . Hash { } ) {
origin = types . EmptyRootHash
}
if root != origin {
start := time . Now ( )
2024-02-02 10:43:33 +03:00
set := triestate . New ( s . accountsOrigin , s . storagesOrigin , incomplete )
if err := s . db . TrieDB ( ) . Update ( root , origin , block , nodes , set ) ; err != nil {
2023-09-27 06:07:45 +03:00
return err
}
s . originalRoot = root
if metrics . EnabledExpensive {
s . TrieDBCommits += time . Since ( start )
}
2024-02-02 10:43:33 +03:00
if s . onCommit != nil {
s . onCommit ( set )
}
2023-09-27 06:07:45 +03:00
}
2022-07-05 06:14:21 +03:00
}
for _ , postFunc := range postCommitFuncs {
err := postFunc ( )
if err != nil {
return err
}
}
wg . Wait ( )
return nil
} ( )
if s . pipeCommit {
if commitErr == nil {
s . snaps . Snapshot ( s . stateRoot ) . MarkValid ( )
close ( verified )
} else {
// The blockchain will do the further rewind if write block not finish yet
close ( verified )
if failPostCommitFunc != nil {
failPostCommitFunc ( )
}
log . Error ( "state verification failed" , "err" , commitErr )
}
2019-08-06 13:40:28 +03:00
}
2022-07-05 06:14:21 +03:00
return commitErr
}
commitFuncs := [ ] func ( ) error {
func ( ) error {
2023-08-23 12:46:08 +03:00
codeWriter := s . db . DiskDB ( ) . NewBatch ( )
2022-07-05 06:14:21 +03:00
for addr := range s . stateObjectsDirty {
if obj := s . stateObjects [ addr ] ; ! obj . deleted {
2023-08-23 12:46:08 +03:00
// Write any contract code associated with the state object
2022-07-05 06:14:21 +03:00
if obj . code != nil && obj . dirtyCode {
rawdb . WriteCode ( codeWriter , common . BytesToHash ( obj . CodeHash ( ) ) , obj . code )
obj . dirtyCode = false
if s . snap != nil {
diffLayer . Codes = append ( diffLayer . Codes , types . DiffCode {
Hash : common . BytesToHash ( obj . CodeHash ( ) ) ,
Code : obj . code ,
} )
}
if codeWriter . ValueSize ( ) > ethdb . IdealBatchSize {
if err := codeWriter . Write ( ) ; err != nil {
return err
}
codeWriter . Reset ( )
}
}
}
}
if codeWriter . ValueSize ( ) > 0 {
if err := codeWriter . Write ( ) ; err != nil {
log . Crit ( "Failed to commit dirty codes" , "error" , err )
return err
}
2019-11-22 14:23:49 +03:00
}
2022-07-05 06:14:21 +03:00
return nil
} ,
func ( ) error {
// If snapshotting is enabled, update the snapshot tree with this new version
if s . snap != nil {
if metrics . EnabledExpensive {
defer func ( start time . Time ) { s . SnapshotCommits += time . Since ( start ) } ( time . Now ( ) )
}
if s . pipeCommit {
defer close ( snapUpdated )
// State verification pipeline - accounts root are not calculated here, just populate needed fields for process
s . PopulateSnapAccountAndStorage ( )
}
diffLayer . Destructs , diffLayer . Accounts , diffLayer . Storages = s . SnapToDiffLayer ( )
// Only update if there's a state transition (skip empty Clique blocks)
if parent := s . snap . Root ( ) ; parent != s . expectedRoot {
2023-08-23 12:46:08 +03:00
err := s . snaps . Update ( s . expectedRoot , parent , s . convertAccountSet ( s . stateObjectsDestruct ) , s . accounts , s . storages , verified )
2022-07-05 06:14:21 +03:00
if err != nil {
log . Warn ( "Failed to update snapshot tree" , "from" , parent , "to" , s . expectedRoot , "err" , err )
}
// Keep n diff layers in the memory
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-(n-1) layer(bottom-most diff layer) is paired with HEAD-(n-1)state
go func ( ) {
if err := s . snaps . Cap ( s . expectedRoot , s . snaps . CapLimit ( ) ) ; err != nil {
log . Warn ( "Failed to cap snapshot tree" , "root" , s . expectedRoot , "layers" , s . snaps . CapLimit ( ) , "err" , err )
}
} ( )
}
2019-11-22 14:23:49 +03:00
}
2022-07-05 06:14:21 +03:00
return nil
} ,
}
if s . pipeCommit {
go commmitTrie ( )
} else {
2022-08-31 08:30:25 +03:00
defer s . StopPrefetcher ( )
2023-10-27 03:55:51 +03:00
commitFuncs = append ( commitFuncs , commmitTrie )
2022-07-05 06:14:21 +03:00
}
commitRes := make ( chan error , len ( commitFuncs ) )
for _ , f := range commitFuncs {
2023-09-07 11:39:29 +03:00
// commitFuncs[0] and commitFuncs[1] both read map `stateObjects`, but no conflicts
2022-07-05 06:14:21 +03:00
tmpFunc := f
2023-09-15 08:49:07 +03:00
go func ( ) {
commitRes <- tmpFunc ( )
} ( )
2022-07-05 06:14:21 +03:00
}
for i := 0 ; i < len ( commitFuncs ) ; i ++ {
r := <- commitRes
if r != nil {
return common . Hash { } , nil , r
2019-08-06 13:40:28 +03:00
}
}
2023-09-07 11:39:29 +03:00
2022-07-05 06:14:21 +03:00
root := s . stateRoot
if s . pipeCommit {
root = s . expectedRoot
2023-09-15 08:49:07 +03:00
} else {
s . snap = nil
2022-07-05 06:14:21 +03:00
}
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 11:01:02 +03:00
if root == ( common . Hash { } ) {
2023-02-21 14:12:27 +03:00
root = types . EmptyRootHash
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 11:01:02 +03:00
}
2023-07-11 16:43:23 +03:00
// Clear all internal flags at the end of commit operation.
s . accounts = make ( map [ common . Hash ] [ ] byte )
s . storages = make ( map [ common . Hash ] map [ common . Hash ] [ ] byte )
2023-07-31 15:07:51 +03:00
s . accountsOrigin = make ( map [ common . Address ] [ ] byte )
s . storagesOrigin = make ( map [ common . Address ] map [ common . Hash ] [ ] byte )
2023-07-11 16:43:23 +03:00
s . stateObjectsDirty = make ( map [ common . Address ] struct { } )
s . stateObjectsDestruct = make ( map [ common . Address ] * types . StateAccount )
2022-07-05 06:14:21 +03:00
return root , diffLayer , nil
}
func ( s * StateDB ) SnapToDiffLayer ( ) ( [ ] common . Address , [ ] types . DiffAccount , [ ] types . DiffStorage ) {
2023-08-23 12:46:08 +03:00
destructs := make ( [ ] common . Address , 0 , len ( s . stateObjectsDestruct ) )
for account := range s . stateObjectsDestruct {
2022-07-05 06:14:21 +03:00
destructs = append ( destructs , account )
}
2023-08-23 12:46:08 +03:00
accounts := make ( [ ] types . DiffAccount , 0 , len ( s . accounts ) )
for accountHash , account := range s . accounts {
2022-07-05 06:14:21 +03:00
accounts = append ( accounts , types . DiffAccount {
Account : accountHash ,
Blob : account ,
} )
}
2023-08-23 12:46:08 +03:00
storages := make ( [ ] types . DiffStorage , 0 , len ( s . storages ) )
for accountHash , storage := range s . storages {
keys := make ( [ ] common . Hash , 0 , len ( storage ) )
2022-07-05 06:14:21 +03:00
values := make ( [ ] [ ] byte , 0 , len ( storage ) )
for k , v := range storage {
keys = append ( keys , k )
values = append ( values , v )
}
storages = append ( storages , types . DiffStorage {
Account : accountHash ,
Keys : keys ,
Vals : values ,
} )
}
return destructs , accounts , storages
2015-08-18 15:14:45 +03:00
}
2020-10-23 09:26:57 +03:00
2022-11-16 12:18:52 +03:00
// Prepare handles the preparatory steps for executing a state transition with.
// This method must be invoked before state transition.
2021-02-25 17:26:57 +03:00
//
2022-11-16 12:18:52 +03:00
// Berlin fork:
2021-02-25 17:26:57 +03:00
// - Add sender to access list (2929)
// - Add destination to access list (2929)
// - Add precompiles to access list (2929)
// - Add the contents of the optional tx access list (2930)
//
2022-11-16 12:18:52 +03:00
// Potential EIPs:
2022-11-23 00:39:52 +03:00
// - Reset access list (Berlin)
// - Add coinbase to access list (EIP-3651)
// - Reset transient storage (EIP-1153)
func ( s * StateDB ) Prepare ( rules params . Rules , sender , coinbase common . Address , dst * common . Address , precompiles [ ] common . Address , list types . AccessList ) {
2022-11-16 12:18:52 +03:00
if rules . IsBerlin {
// Clear out any leftover from previous executions
2022-11-23 00:39:52 +03:00
al := newAccessList ( )
s . accessList = al
2022-11-16 12:18:52 +03:00
2022-11-23 00:39:52 +03:00
al . AddAddress ( sender )
2022-11-16 12:18:52 +03:00
if dst != nil {
2022-11-23 00:39:52 +03:00
al . AddAddress ( * dst )
2022-11-16 12:18:52 +03:00
// If it's a create-tx, the destination will be added inside evm.create
}
for _ , addr := range precompiles {
2022-11-23 00:39:52 +03:00
al . AddAddress ( addr )
2022-11-16 12:18:52 +03:00
}
for _ , el := range list {
2022-11-23 00:39:52 +03:00
al . AddAddress ( el . Address )
2022-11-16 12:18:52 +03:00
for _ , key := range el . StorageKeys {
2022-11-23 00:39:52 +03:00
al . AddSlot ( el . Address , key )
2022-11-16 12:18:52 +03:00
}
2021-02-25 17:26:57 +03:00
}
2022-11-23 00:39:52 +03:00
if rules . IsShanghai { // EIP-3651: warm coinbase
al . AddAddress ( coinbase )
2021-02-25 17:26:57 +03:00
}
}
2022-11-16 12:18:52 +03:00
// Reset transient storage at the beginning of transaction execution
s . transientStorage = newTransientStorage ( )
2021-02-25 17:26:57 +03:00
}
2020-10-23 09:26:57 +03:00
// AddAddressToAccessList adds the given address to the access list
func ( s * StateDB ) AddAddressToAccessList ( addr common . Address ) {
2022-07-05 06:14:21 +03:00
if s . accessList == nil {
s . accessList = newAccessList ( )
}
2020-10-23 09:26:57 +03:00
if s . accessList . AddAddress ( addr ) {
s . journal . append ( accessListAddAccountChange { & addr } )
}
}
// AddSlotToAccessList adds the given (address, slot)-tuple to the access list
func ( s * StateDB ) AddSlotToAccessList ( addr common . Address , slot common . Hash ) {
2022-07-05 06:14:21 +03:00
if s . accessList == nil {
s . accessList = newAccessList ( )
}
2020-10-23 09:26:57 +03:00
addrMod , slotMod := s . accessList . AddSlot ( addr , slot )
if addrMod {
// In practice, this should not happen, since there is no way to enter the
// scope of 'address' without having the 'address' become already added
// to the access list (via call-variant, create, etc).
// Better safe than sorry, though
s . journal . append ( accessListAddAccountChange { & addr } )
}
if slotMod {
s . journal . append ( accessListAddSlotChange {
address : & addr ,
slot : & slot ,
} )
}
}
// AddressInAccessList returns true if the given address is in the access list.
func ( s * StateDB ) AddressInAccessList ( addr common . Address ) bool {
2022-07-05 06:14:21 +03:00
if s . accessList == nil {
return false
}
2020-10-23 09:26:57 +03:00
return s . accessList . ContainsAddress ( addr )
}
// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list.
func ( s * StateDB ) SlotInAccessList ( addr common . Address , slot common . Hash ) ( addressPresent bool , slotPresent bool ) {
2022-07-05 06:14:21 +03:00
if s . accessList == nil {
return false , false
}
2020-10-23 09:26:57 +03:00
return s . accessList . Contains ( addr , slot )
}
2022-07-05 06:14:21 +03:00
func ( s * StateDB ) GetStorage ( address common . Address ) * sync . Map {
return s . storagePool . getStorage ( address )
}
2023-08-23 12:46:08 +03:00
2022-12-28 16:53:43 +03:00
// convertAccountSet converts a provided account set from address keyed to hash keyed.
2023-07-11 16:43:23 +03:00
func ( s * StateDB ) convertAccountSet ( set map [ common . Address ] * types . StateAccount ) map [ common . Hash ] struct { } {
2023-05-08 09:59:14 +03:00
ret := make ( map [ common . Hash ] struct { } , len ( set ) )
2022-12-28 16:53:43 +03:00
for addr := range set {
obj , exist := s . stateObjects [ addr ]
if ! exist {
ret [ crypto . Keccak256Hash ( addr [ : ] ) ] = struct { } { }
} else {
ret [ obj . addrHash ] = struct { } { }
}
}
return ret
}
2023-07-11 16:43:23 +03:00
2024-04-07 07:07:25 +03:00
func ( s * StateDB ) GetSnap ( ) snapshot . Snapshot {
return s . snap
}
2023-07-31 15:07:51 +03:00
// copySet returns a deep-copied set.
func copySet [ k comparable ] ( set map [ k ] [ ] byte ) map [ k ] [ ] byte {
copied := make ( map [ k ] [ ] byte , len ( set ) )
2023-07-11 16:43:23 +03:00
for key , val := range set {
copied [ key ] = common . CopyBytes ( val )
}
return copied
}
2023-07-31 15:07:51 +03:00
// copy2DSet returns a two-dimensional deep-copied set.
func copy2DSet [ k comparable ] ( set map [ k ] map [ common . Hash ] [ ] byte ) map [ k ] map [ common . Hash ] [ ] byte {
copied := make ( map [ k ] map [ common . Hash ] [ ] byte , len ( set ) )
2023-07-11 16:43:23 +03:00
for addr , subset := range set {
copied [ addr ] = make ( map [ common . Hash ] [ ] byte , len ( subset ) )
for key , val := range subset {
copied [ addr ] [ key ] = common . CopyBytes ( val )
}
}
return copied
}