bsc/core/state_processor.go

520 lines
17 KiB
Go
Raw Normal View History

2016-04-14 19:18:24 +03:00
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
2022-07-05 06:14:21 +03:00
"bytes"
"errors"
"fmt"
"math/big"
2022-07-05 06:14:21 +03:00
"math/rand"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
2022-07-05 06:14:21 +03:00
"github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
2022-07-05 06:14:21 +03:00
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
2022-07-05 06:14:21 +03:00
"github.com/ethereum/go-ethereum/core/state/snapshot"
2020-08-06 18:27:24 +03:00
"github.com/ethereum/go-ethereum/core/systemcontracts"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
2022-07-05 06:14:21 +03:00
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
2022-07-05 06:14:21 +03:00
"github.com/ethereum/go-ethereum/rlp"
)
const (
fullProcessCheck = 21 // On diff sync mode, will do full process every fullProcessCheck randomly
recentTime = 1024 * 3
recentDiffLayerTimeout = 5
farDiffLayerTimeout = 2
)
// StateProcessor is a basic Processor, which takes care of transitioning
// state from one point to another.
//
// StateProcessor implements Processor.
type StateProcessor struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
engine consensus.Engine // Consensus engine used for block rewards
}
// NewStateProcessor initialises a new StateProcessor.
func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *StateProcessor {
return &StateProcessor{
config: config,
bc: bc,
engine: engine,
}
}
2022-07-05 06:14:21 +03:00
type LightStateProcessor struct {
check int64
StateProcessor
}
func NewLightStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *LightStateProcessor {
randomGenerator := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
check := randomGenerator.Int63n(fullProcessCheck)
return &LightStateProcessor{
check: check,
StateProcessor: *NewStateProcessor(config, bc, engine),
}
}
func (p *LightStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*state.StateDB, types.Receipts, []*types.Log, uint64, error) {
allowLightProcess := true
if posa, ok := p.engine.(consensus.PoSA); ok {
allowLightProcess = posa.AllowLightProcess(p.bc, block.Header())
}
// random fallback to full process
if allowLightProcess && block.NumberU64()%fullProcessCheck != uint64(p.check) && len(block.Transactions()) != 0 {
var pid string
if peer, ok := block.ReceivedFrom.(PeerIDer); ok {
pid = peer.ID()
}
var diffLayer *types.DiffLayer
var diffLayerTimeout = recentDiffLayerTimeout
if time.Now().Unix()-int64(block.Time()) > recentTime {
diffLayerTimeout = farDiffLayerTimeout
}
for tried := 0; tried < diffLayerTimeout; tried++ {
// wait a bit for the diff layer
diffLayer = p.bc.GetUnTrustedDiffLayer(block.Hash(), pid)
if diffLayer != nil {
break
}
time.Sleep(time.Millisecond)
}
if diffLayer != nil {
if err := diffLayer.Receipts.DeriveFields(p.bc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
log.Error("Failed to derive block receipts fields", "hash", block.Hash(), "number", block.NumberU64(), "err", err)
// fallback to full process
return p.StateProcessor.Process(block, statedb, cfg)
}
receipts, logs, gasUsed, err := p.LightProcess(diffLayer, block, statedb)
if err == nil {
log.Info("do light process success at block", "num", block.NumberU64())
return statedb, receipts, logs, gasUsed, nil
}
log.Error("do light process err at block", "num", block.NumberU64(), "err", err)
p.bc.removeDiffLayers(diffLayer.DiffHash.Load().(common.Hash))
// prepare new statedb
statedb.StopPrefetcher()
parent := p.bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
statedb, err = state.New(parent.Root, p.bc.stateCache, p.bc.snaps)
if err != nil {
return statedb, nil, nil, 0, err
}
statedb.SetExpectedStateRoot(block.Root())
if p.bc.pipeCommit {
statedb.EnablePipeCommit()
}
// Enable prefetching to pull in trie node paths while processing transactions
statedb.StartPrefetcher("chain")
}
}
// fallback to full process
return p.StateProcessor.Process(block, statedb, cfg)
}
func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *types.Block, statedb *state.StateDB) (types.Receipts, []*types.Log, uint64, error) {
statedb.MarkLightProcessed()
fullDiffCode := make(map[common.Hash][]byte, len(diffLayer.Codes))
diffTries := make(map[common.Address]state.Trie)
diffCode := make(map[common.Hash][]byte)
snapDestructs, snapAccounts, snapStorage, err := statedb.DiffLayerToSnap(diffLayer)
if err != nil {
return nil, nil, 0, err
}
for _, c := range diffLayer.Codes {
fullDiffCode[c.Hash] = c.Code
}
stateTrie, err := statedb.Trie()
if err != nil {
return nil, nil, 0, err
}
for des := range snapDestructs {
stateTrie.TryDelete(des[:])
}
threads := gopool.Threads(len(snapAccounts))
iteAccounts := make([]common.Address, 0, len(snapAccounts))
for diffAccount := range snapAccounts {
iteAccounts = append(iteAccounts, diffAccount)
}
errChan := make(chan error, threads)
exitChan := make(chan struct{})
var snapMux sync.RWMutex
var stateMux, diffMux sync.Mutex
for i := 0; i < threads; i++ {
start := i * len(iteAccounts) / threads
end := (i + 1) * len(iteAccounts) / threads
if i+1 == threads {
end = len(iteAccounts)
}
go func(start, end int) {
for index := start; index < end; index++ {
select {
// fast fail
case <-exitChan:
return
default:
}
diffAccount := iteAccounts[index]
snapMux.RLock()
blob := snapAccounts[diffAccount]
snapMux.RUnlock()
addrHash := crypto.Keccak256Hash(diffAccount[:])
latestAccount, err := snapshot.FullAccount(blob)
if err != nil {
errChan <- err
return
}
// fetch previous state
var previousAccount types.StateAccount
stateMux.Lock()
enc, err := stateTrie.TryGet(diffAccount[:])
stateMux.Unlock()
if err != nil {
errChan <- err
return
}
if len(enc) != 0 {
if err := rlp.DecodeBytes(enc, &previousAccount); err != nil {
errChan <- err
return
}
}
if latestAccount.Balance == nil {
latestAccount.Balance = new(big.Int)
}
if previousAccount.Balance == nil {
previousAccount.Balance = new(big.Int)
}
if previousAccount.Root == (common.Hash{}) {
previousAccount.Root = types.EmptyRootHash
}
if len(previousAccount.CodeHash) == 0 {
previousAccount.CodeHash = types.EmptyCodeHash
}
// skip no change account
if previousAccount.Nonce == latestAccount.Nonce &&
bytes.Equal(previousAccount.CodeHash, latestAccount.CodeHash) &&
previousAccount.Balance.Cmp(latestAccount.Balance) == 0 &&
previousAccount.Root == common.BytesToHash(latestAccount.Root) {
// It is normal to receive redundant message since the collected message is redundant.
log.Debug("receive redundant account change in diff layer", "account", diffAccount, "num", block.NumberU64())
snapMux.Lock()
delete(snapAccounts, diffAccount)
delete(snapStorage, diffAccount)
snapMux.Unlock()
continue
}
// update code
codeHash := common.BytesToHash(latestAccount.CodeHash)
if !bytes.Equal(latestAccount.CodeHash, previousAccount.CodeHash) &&
!bytes.Equal(latestAccount.CodeHash, types.EmptyCodeHash) {
if code, exist := fullDiffCode[codeHash]; exist {
if crypto.Keccak256Hash(code) != codeHash {
errChan <- fmt.Errorf("code and code hash mismatch, account %s", diffAccount.String())
return
}
diffMux.Lock()
diffCode[codeHash] = code
diffMux.Unlock()
} else {
rawCode := rawdb.ReadCode(p.bc.db, codeHash)
if len(rawCode) == 0 {
errChan <- fmt.Errorf("missing code, account %s", diffAccount.String())
return
}
}
}
//update storage
latestRoot := common.BytesToHash(latestAccount.Root)
if latestRoot != previousAccount.Root {
accountTrie, err := statedb.Database().OpenStorageTrie(addrHash, previousAccount.Root)
if err != nil {
errChan <- err
return
}
snapMux.RLock()
storageChange, exist := snapStorage[diffAccount]
snapMux.RUnlock()
if !exist {
errChan <- errors.New("missing storage change in difflayer")
return
}
for k, v := range storageChange {
if len(v) != 0 {
accountTrie.TryUpdate([]byte(k), v)
} else {
accountTrie.TryDelete([]byte(k))
}
}
// check storage root
accountRootHash := accountTrie.Hash()
if latestRoot != accountRootHash {
errChan <- errors.New("account storage root mismatch")
return
}
diffMux.Lock()
diffTries[diffAccount] = accountTrie
diffMux.Unlock()
} else {
snapMux.Lock()
delete(snapStorage, diffAccount)
snapMux.Unlock()
}
// can't trust the blob, need encode by our-self.
latestStateAccount := types.StateAccount{
Nonce: latestAccount.Nonce,
Balance: latestAccount.Balance,
Root: common.BytesToHash(latestAccount.Root),
CodeHash: latestAccount.CodeHash,
}
bz, err := rlp.EncodeToBytes(&latestStateAccount)
if err != nil {
errChan <- err
return
}
stateMux.Lock()
err = stateTrie.TryUpdate(diffAccount[:], bz)
stateMux.Unlock()
if err != nil {
errChan <- err
return
}
}
errChan <- nil
}(start, end)
}
for i := 0; i < threads; i++ {
err := <-errChan
if err != nil {
close(exitChan)
return nil, nil, 0, err
}
}
var allLogs []*types.Log
var gasUsed uint64
for _, receipt := range diffLayer.Receipts {
allLogs = append(allLogs, receipt.Logs...)
gasUsed += receipt.GasUsed
}
// Do validate in advance so that we can fall back to full process
if err := p.bc.validator.ValidateState(block, statedb, diffLayer.Receipts, gasUsed); err != nil {
log.Error("validate state failed during diff sync", "error", err)
return nil, nil, 0, err
}
// remove redundant storage change
for account := range snapStorage {
if _, exist := snapAccounts[account]; !exist {
log.Warn("receive redundant storage change in diff layer")
delete(snapStorage, account)
}
}
// remove redundant code
if len(fullDiffCode) != len(diffLayer.Codes) {
diffLayer.Codes = make([]types.DiffCode, 0, len(diffCode))
for hash, code := range diffCode {
diffLayer.Codes = append(diffLayer.Codes, types.DiffCode{
Hash: hash,
Code: code,
})
}
}
statedb.SetSnapData(snapDestructs, snapAccounts, snapStorage)
if len(snapAccounts) != len(diffLayer.Accounts) || len(snapStorage) != len(diffLayer.Storages) {
diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = statedb.SnapToDiffLayer()
}
statedb.SetDiff(diffLayer, diffTries, diffCode)
return diffLayer.Receipts, allLogs, gasUsed, nil
}
// Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles.
//
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
2022-07-05 06:14:21 +03:00
func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*state.StateDB, types.Receipts, []*types.Log, uint64, error) {
var (
usedGas = new(uint64)
header = block.Header()
blockHash = block.Hash()
blockNumber = block.Number()
allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit())
)
2022-07-05 06:14:21 +03:00
2020-05-20 06:46:45 +03:00
var receipts = make([]*types.Receipt, 0)
// Mutate the block and state according to any hard-fork specs
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
misc.ApplyDAOHardFork(statedb)
}
2020-06-30 14:51:41 +03:00
// Handle upgrade build-in system contract code
2020-08-06 18:27:24 +03:00
systemcontracts.UpgradeBuildInSystemContract(p.config, block.Number(), statedb)
2021-04-16 07:45:26 +03:00
blockContext := NewEVMBlockContext(header, p.bc, nil)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
2021-04-16 07:45:26 +03:00
2022-07-05 06:14:21 +03:00
txNum := len(block.Transactions())
// Iterate over and process the individual transactions
2020-05-20 06:46:45 +03:00
posa, isPoSA := p.engine.(consensus.PoSA)
2022-07-05 06:14:21 +03:00
commonTxs := make([]*types.Transaction, 0, txNum)
[Feature]: Improve trie prefetch (#952) * trie prefetcher for From/To address in advance We found that trie prefetch could be not fast enough, especially trie prefetch of the outer big state trie tree. Instead of do trie prefetch until a transaction is finalized, we could do trie prefetch in advance. Try to prefetch the trie node of the From/To accounts, since their root hash are most likely to be changed. * Parallel TriePrefetch for large trie update. Currently, we create a subfetch for each account address to do trie prefetch. If the address has very large state change, trie prefetch could be not fast enough, e.g. a contract modified lots of KV pair or a large number of account's root hash is changed in a block. With this commit, there will be children subfetcher created to do trie prefetch in parallell if the parent subfetch's workload exceed the threshold. * some improvemnts of parallel trie prefetch implementation 1.childrenLock is removed, since it is not necessary APIs of triePrefetcher is not thread safe, they should be used sequentially. A prefetch will be interrupted by trie() or clos(), so we only need mark it as interrupted and check before call scheduleParallel to avoid the concurrent access to paraChildren 2.rename subfetcher.children to subfetcher.paraChildren 3.use subfetcher.pendingSize to replace totalSize & processedIndex 4.randomly select the start child to avoid always feed the first one 5.increase threshold and capacity to avoid create too many child routine * fix review comments ** nil check refine ** create a separate routine for From/To prefetch, avoid blocking the cirtical path * remove the interrupt member * not create a signer for each transaction * some changes to triePrefetcher ** remove the abortLoop, move the subfetcher abort operation into mainLoop since we want to make subfetcher's create & schedule & abort within a loop to avoid concurrent access locks. ** no wait subfetcher's term signal in abort() it could speed up the close by closing subfetcher concurrently. we send stop signnal to all subfetchers in burst and wait their term signal later. * some coding improve for subfetcher.scheduleParallel * fix a UT crash of s.prefetcher == nil * update parallel trie prefetcher configuration tested with different combination of parallelTriePrefetchThreshold & parallelTriePrefetchCapacity, found the most efficient configure could be: parallelTriePrefetchThreshold = 10 parallelTriePrefetchCapacity = 20 * fix review comments: code refine
2022-07-15 14:17:08 +03:00
// initialise bloom processors
2022-07-05 06:14:21 +03:00
bloomProcessors := NewAsyncReceiptBloomGenerator(txNum)
statedb.MarkFullProcessed()
[Feature]: Improve trie prefetch (#952) * trie prefetcher for From/To address in advance We found that trie prefetch could be not fast enough, especially trie prefetch of the outer big state trie tree. Instead of do trie prefetch until a transaction is finalized, we could do trie prefetch in advance. Try to prefetch the trie node of the From/To accounts, since their root hash are most likely to be changed. * Parallel TriePrefetch for large trie update. Currently, we create a subfetch for each account address to do trie prefetch. If the address has very large state change, trie prefetch could be not fast enough, e.g. a contract modified lots of KV pair or a large number of account's root hash is changed in a block. With this commit, there will be children subfetcher created to do trie prefetch in parallell if the parent subfetch's workload exceed the threshold. * some improvemnts of parallel trie prefetch implementation 1.childrenLock is removed, since it is not necessary APIs of triePrefetcher is not thread safe, they should be used sequentially. A prefetch will be interrupted by trie() or clos(), so we only need mark it as interrupted and check before call scheduleParallel to avoid the concurrent access to paraChildren 2.rename subfetcher.children to subfetcher.paraChildren 3.use subfetcher.pendingSize to replace totalSize & processedIndex 4.randomly select the start child to avoid always feed the first one 5.increase threshold and capacity to avoid create too many child routine * fix review comments ** nil check refine ** create a separate routine for From/To prefetch, avoid blocking the cirtical path * remove the interrupt member * not create a signer for each transaction * some changes to triePrefetcher ** remove the abortLoop, move the subfetcher abort operation into mainLoop since we want to make subfetcher's create & schedule & abort within a loop to avoid concurrent access locks. ** no wait subfetcher's term signal in abort() it could speed up the close by closing subfetcher concurrently. we send stop signnal to all subfetchers in burst and wait their term signal later. * some coding improve for subfetcher.scheduleParallel * fix a UT crash of s.prefetcher == nil * update parallel trie prefetcher configuration tested with different combination of parallelTriePrefetchThreshold & parallelTriePrefetchCapacity, found the most efficient configure could be: parallelTriePrefetchThreshold = 10 parallelTriePrefetchCapacity = 20 * fix review comments: code refine
2022-07-15 14:17:08 +03:00
signer := types.MakeSigner(p.config, header.Number)
2022-07-05 06:14:21 +03:00
2020-05-20 06:46:45 +03:00
// usually do have two tx, one for validator set contract, another for system reward contract.
systemTxs := make([]*types.Transaction, 0, 2)
2022-07-05 06:14:21 +03:00
for i, tx := range block.Transactions() {
2020-05-20 06:46:45 +03:00
if isPoSA {
if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil {
2022-07-05 06:14:21 +03:00
bloomProcessors.Close()
return statedb, nil, nil, 0, err
2020-05-20 06:46:45 +03:00
} else if isSystemTx {
systemTxs = append(systemTxs, tx)
continue
}
}
2021-04-16 07:45:26 +03:00
[Feature]: Improve trie prefetch (#952) * trie prefetcher for From/To address in advance We found that trie prefetch could be not fast enough, especially trie prefetch of the outer big state trie tree. Instead of do trie prefetch until a transaction is finalized, we could do trie prefetch in advance. Try to prefetch the trie node of the From/To accounts, since their root hash are most likely to be changed. * Parallel TriePrefetch for large trie update. Currently, we create a subfetch for each account address to do trie prefetch. If the address has very large state change, trie prefetch could be not fast enough, e.g. a contract modified lots of KV pair or a large number of account's root hash is changed in a block. With this commit, there will be children subfetcher created to do trie prefetch in parallell if the parent subfetch's workload exceed the threshold. * some improvemnts of parallel trie prefetch implementation 1.childrenLock is removed, since it is not necessary APIs of triePrefetcher is not thread safe, they should be used sequentially. A prefetch will be interrupted by trie() or clos(), so we only need mark it as interrupted and check before call scheduleParallel to avoid the concurrent access to paraChildren 2.rename subfetcher.children to subfetcher.paraChildren 3.use subfetcher.pendingSize to replace totalSize & processedIndex 4.randomly select the start child to avoid always feed the first one 5.increase threshold and capacity to avoid create too many child routine * fix review comments ** nil check refine ** create a separate routine for From/To prefetch, avoid blocking the cirtical path * remove the interrupt member * not create a signer for each transaction * some changes to triePrefetcher ** remove the abortLoop, move the subfetcher abort operation into mainLoop since we want to make subfetcher's create & schedule & abort within a loop to avoid concurrent access locks. ** no wait subfetcher's term signal in abort() it could speed up the close by closing subfetcher concurrently. we send stop signnal to all subfetchers in burst and wait their term signal later. * some coding improve for subfetcher.scheduleParallel * fix a UT crash of s.prefetcher == nil * update parallel trie prefetcher configuration tested with different combination of parallelTriePrefetchThreshold & parallelTriePrefetchCapacity, found the most efficient configure could be: parallelTriePrefetchThreshold = 10 parallelTriePrefetchCapacity = 20 * fix review comments: code refine
2022-07-15 14:17:08 +03:00
msg, err := tx.AsMessage(signer, header.BaseFee)
if err != nil {
2022-07-05 06:14:21 +03:00
bloomProcessors.Close()
return statedb, nil, nil, 0, err
}
statedb.Prepare(tx.Hash(), i)
2022-07-05 06:14:21 +03:00
receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, bloomProcessors)
if err != nil {
2022-07-05 06:14:21 +03:00
bloomProcessors.Close()
return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
}
2020-05-20 06:46:45 +03:00
commonTxs = append(commonTxs, tx)
receipts = append(receipts, receipt)
}
2022-07-05 06:14:21 +03:00
bloomProcessors.Close()
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
2020-05-20 06:46:45 +03:00
err := p.engine.Finalize(p.bc, header, statedb, &commonTxs, block.Uncles(), &receipts, &systemTxs, usedGas)
if err != nil {
2022-07-05 06:14:21 +03:00
return statedb, receipts, allLogs, *usedGas, err
2020-05-20 06:46:45 +03:00
}
for _, receipt := range receipts {
allLogs = append(allLogs, receipt.Logs...)
}
2022-07-05 06:14:21 +03:00
return statedb, receipts, allLogs, *usedGas, nil
}
2022-07-05 06:14:21 +03:00
func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) {
// Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg)
evm.Reset(txContext, statedb)
// Apply the transaction to the current state (included in the env).
result, err := ApplyMessage(evm, msg, gp)
if err != nil {
return nil, err
}
// Update the state with pending changes.
var root []byte
if config.IsByzantium(blockNumber) {
statedb.Finalise(true)
} else {
root = statedb.IntermediateRoot(config.IsEIP158(blockNumber)).Bytes()
}
*usedGas += result.UsedGas
// Create a new receipt for the transaction, storing the intermediate root and gas used
// by the tx.
receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: *usedGas}
if result.Failed() {
receipt.Status = types.ReceiptStatusFailed
} else {
receipt.Status = types.ReceiptStatusSuccessful
}
receipt.TxHash = tx.Hash()
receipt.GasUsed = result.UsedGas
// If the transaction created a contract, store the creation address in the receipt.
if msg.To() == nil {
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
}
// Set the receipt logs and create the bloom filter.
receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash)
receipt.BlockHash = blockHash
receipt.BlockNumber = blockNumber
receipt.TransactionIndex = uint(statedb.TxIndex())
2022-07-05 06:14:21 +03:00
for _, receiptProcessor := range receiptProcessors {
receiptProcessor.Apply(receipt)
}
return receipt, err
}
// ApplyTransaction attempts to apply a transaction to the given state database
// and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid.
2022-07-05 06:14:21 +03:00
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) {
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number), header.BaseFee)
if err != nil {
return nil, err
}
// Create a new context to be used in the EVM environment
blockContext := NewEVMBlockContext(header, bc, author)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg)
2022-07-05 06:14:21 +03:00
defer func() {
ite := vmenv.Interpreter()
vm.EVMInterpreterPool.Put(ite)
vm.EvmPool.Put(vmenv)
}()
return applyTransaction(msg, config, bc, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv, receiptProcessors...)
}