Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
ec318b9c97 | ||
|
6624522423 | ||
|
206c3b0ab0 |
@ -24,6 +24,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -100,7 +101,7 @@ type ContractTransactor interface {
|
||||
PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
|
||||
|
||||
// SendTransactionConditional injects the conditional transaction into the pending pool for execution after verification.
|
||||
SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error
|
||||
SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error
|
||||
}
|
||||
|
||||
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -75,7 +76,7 @@ func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transac
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *mockTransactor) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error {
|
||||
func (mt *mockTransactor) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -63,8 +63,7 @@ var (
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
utils.CachePreimagesFlag,
|
||||
utils.OverridePassedForkTime,
|
||||
utils.OverridePascal,
|
||||
utils.OverridePrague,
|
||||
utils.OverrideBohr,
|
||||
utils.OverrideVerkle,
|
||||
utils.MultiDataBaseFlag,
|
||||
}, utils.DatabaseFlags),
|
||||
@ -259,13 +258,9 @@ func initGenesis(ctx *cli.Context) error {
|
||||
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
|
||||
overrides.OverridePassedForkTime = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverridePascal.Name) {
|
||||
v := ctx.Uint64(utils.OverridePascal.Name)
|
||||
overrides.OverridePascal = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverridePrague.Name) {
|
||||
v := ctx.Uint64(utils.OverridePrague.Name)
|
||||
overrides.OverridePrague = &v
|
||||
if ctx.IsSet(utils.OverrideBohr.Name) {
|
||||
v := ctx.Uint64(utils.OverrideBohr.Name)
|
||||
overrides.OverrideBohr = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||
|
@ -191,13 +191,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
|
||||
cfg.Eth.OverridePassedForkTime = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverridePascal.Name) {
|
||||
v := ctx.Uint64(utils.OverridePascal.Name)
|
||||
cfg.Eth.OverridePascal = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverridePrague.Name) {
|
||||
v := ctx.Uint64(utils.OverridePrague.Name)
|
||||
cfg.Eth.OverridePrague = &v
|
||||
if ctx.IsSet(utils.OverrideBohr.Name) {
|
||||
v := ctx.Uint64(utils.OverrideBohr.Name)
|
||||
cfg.Eth.OverrideBohr = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||
|
@ -67,13 +67,13 @@ var (
|
||||
utils.DirectBroadcastFlag,
|
||||
utils.DisableSnapProtocolFlag,
|
||||
utils.EnableTrustProtocolFlag,
|
||||
utils.PipeCommitFlag,
|
||||
utils.RangeLimitFlag,
|
||||
utils.USBFlag,
|
||||
utils.SmartCardDaemonPathFlag,
|
||||
utils.RialtoHash,
|
||||
utils.OverridePassedForkTime,
|
||||
utils.OverridePascal,
|
||||
utils.OverridePrague,
|
||||
utils.OverrideBohr,
|
||||
utils.OverrideVerkle,
|
||||
utils.OverrideFullImmutabilityThreshold,
|
||||
utils.OverrideMinBlocksForBlobRequests,
|
||||
@ -91,7 +91,6 @@ var (
|
||||
utils.TxPoolGlobalSlotsFlag,
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolOverflowPoolSlotsFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
utils.TxPoolReannounceTimeFlag,
|
||||
utils.BlobPoolDataDirFlag,
|
||||
|
@ -117,6 +117,11 @@ var (
|
||||
Usage: "Enable trust protocol",
|
||||
Category: flags.FastNodeCategory,
|
||||
}
|
||||
PipeCommitFlag = &cli.BoolFlag{
|
||||
Name: "pipecommit",
|
||||
Usage: "Enable MPT pipeline commit, it will improve syncing performance. It is an experimental feature(default is false)",
|
||||
Category: flags.DeprecatedCategory,
|
||||
}
|
||||
RangeLimitFlag = &cli.BoolFlag{
|
||||
Name: "rangelimit",
|
||||
Usage: "Enable 5000 blocks limit for range query",
|
||||
@ -305,17 +310,12 @@ var (
|
||||
}
|
||||
OverridePassedForkTime = &cli.Uint64Flag{
|
||||
Name: "override.passedforktime",
|
||||
Usage: "Manually specify the hard fork timestamps which have passed on the mainnet, overriding the bundled setting",
|
||||
Usage: "Manually specify the hard fork timestamp except the last one, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverridePascal = &cli.Uint64Flag{
|
||||
Name: "override.pascal",
|
||||
Usage: "Manually specify the Pascal fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverridePrague = &cli.Uint64Flag{
|
||||
Name: "override.prague",
|
||||
Usage: "Manually specify the Prague fork timestamp, overriding the bundled setting",
|
||||
OverrideBohr = &cli.Uint64Flag{
|
||||
Name: "override.bohr",
|
||||
Usage: "Manually specify the Bohr fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverrideVerkle = &cli.Uint64Flag{
|
||||
@ -453,12 +453,6 @@ var (
|
||||
Value: ethconfig.Defaults.TxPool.GlobalQueue,
|
||||
Category: flags.TxPoolCategory,
|
||||
}
|
||||
TxPoolOverflowPoolSlotsFlag = &cli.Uint64Flag{
|
||||
Name: "txpool.overflowpoolslots",
|
||||
Usage: "Maximum number of transaction slots in overflow pool",
|
||||
Value: ethconfig.Defaults.TxPool.OverflowPoolSlots,
|
||||
Category: flags.TxPoolCategory,
|
||||
}
|
||||
TxPoolLifetimeFlag = &cli.DurationFlag{
|
||||
Name: "txpool.lifetime",
|
||||
Usage: "Maximum amount of time non-executable transaction are queued",
|
||||
@ -1790,9 +1784,6 @@ func setTxPool(ctx *cli.Context, cfg *legacypool.Config) {
|
||||
if ctx.IsSet(TxPoolGlobalQueueFlag.Name) {
|
||||
cfg.GlobalQueue = ctx.Uint64(TxPoolGlobalQueueFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(TxPoolOverflowPoolSlotsFlag.Name) {
|
||||
cfg.OverflowPoolSlots = ctx.Uint64(TxPoolOverflowPoolSlotsFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(TxPoolLifetimeFlag.Name) {
|
||||
cfg.Lifetime = ctx.Duration(TxPoolLifetimeFlag.Name)
|
||||
}
|
||||
@ -1977,6 +1968,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
if ctx.IsSet(EnableTrustProtocolFlag.Name) {
|
||||
cfg.EnableTrustProtocol = ctx.IsSet(EnableTrustProtocolFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(PipeCommitFlag.Name) {
|
||||
log.Warn("The --pipecommit flag is deprecated and could be removed in the future!")
|
||||
}
|
||||
if ctx.IsSet(RangeLimitFlag.Name) {
|
||||
cfg.RangeLimit = ctx.Bool(RangeLimitFlag.Name)
|
||||
}
|
||||
@ -2320,7 +2314,6 @@ func EnableNodeInfo(poolConfig *legacypool.Config, nodeInfo *p2p.NodeInfo) Setup
|
||||
"GlobalSlots": poolConfig.GlobalSlots,
|
||||
"AccountQueue": poolConfig.AccountQueue,
|
||||
"GlobalQueue": poolConfig.GlobalQueue,
|
||||
"OverflowPoolSlots": poolConfig.OverflowPoolSlots,
|
||||
"Lifetime": poolConfig.Lifetime,
|
||||
})
|
||||
}
|
||||
|
@ -1806,30 +1806,27 @@ func (p *Parlia) getCurrentValidators(blockHash common.Hash, blockNum *big.Int)
|
||||
func (p *Parlia) distributeIncoming(val common.Address, state *state.StateDB, header *types.Header, chain core.ChainContext,
|
||||
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
|
||||
coinbase := header.Coinbase
|
||||
|
||||
doDistributeSysReward := !p.chainConfig.IsKepler(header.Number, header.Time) &&
|
||||
state.GetBalance(common.HexToAddress(systemcontracts.SystemRewardContract)).Cmp(maxSystemBalance) < 0
|
||||
if doDistributeSysReward {
|
||||
balance := state.GetBalance(consensus.SystemAddress)
|
||||
rewards := new(uint256.Int)
|
||||
rewards = rewards.Rsh(balance, systemRewardPercent)
|
||||
if rewards.Cmp(common.U2560) > 0 {
|
||||
state.SetBalance(consensus.SystemAddress, balance.Sub(balance, rewards))
|
||||
state.AddBalance(coinbase, rewards)
|
||||
err := p.distributeToSystem(rewards.ToBig(), state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace("distribute to system reward pool", "block hash", header.Hash(), "amount", rewards)
|
||||
}
|
||||
}
|
||||
|
||||
balance := state.GetBalance(consensus.SystemAddress)
|
||||
if balance.Cmp(common.U2560) <= 0 {
|
||||
return nil
|
||||
}
|
||||
state.SetBalance(consensus.SystemAddress, common.U2560)
|
||||
state.AddBalance(coinbase, balance)
|
||||
|
||||
doDistributeSysReward := !p.chainConfig.IsKepler(header.Number, header.Time) &&
|
||||
state.GetBalance(common.HexToAddress(systemcontracts.SystemRewardContract)).Cmp(maxSystemBalance) < 0
|
||||
if doDistributeSysReward {
|
||||
rewards := new(uint256.Int)
|
||||
rewards = rewards.Rsh(balance, systemRewardPercent)
|
||||
if rewards.Cmp(common.U2560) > 0 {
|
||||
err := p.distributeToSystem(rewards.ToBig(), state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace("distribute to system reward pool", "block hash", header.Hash(), "amount", rewards)
|
||||
balance = balance.Sub(balance, rewards)
|
||||
}
|
||||
}
|
||||
log.Trace("distribute to validator contract", "block hash", header.Hash(), "amount", balance)
|
||||
return p.distributeToValidator(balance.ToBig(), val, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
|
||||
}
|
||||
|
@ -19,7 +19,9 @@ package core
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -27,6 +29,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
const badBlockCacheExpire = 30 * time.Second
|
||||
|
||||
type BlockValidatorOption func(*BlockValidator) *BlockValidator
|
||||
|
||||
func EnableRemoteVerifyManager(remoteValidator *remoteVerifyManager) BlockValidatorOption {
|
||||
@ -70,6 +74,9 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
|
||||
return ErrKnownBlock
|
||||
}
|
||||
if v.bc.isCachedBadBlock(block) {
|
||||
return ErrKnownBadBlock
|
||||
}
|
||||
// Header validity is known at this point. Here we verify that uncles, transactions
|
||||
// and withdrawals given in the block body match the header.
|
||||
header := block.Header()
|
||||
@ -185,12 +192,23 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
|
||||
return nil
|
||||
},
|
||||
}
|
||||
if statedb.IsPipeCommit() {
|
||||
validateFuns = append(validateFuns, func() error {
|
||||
if err := statedb.WaitPipeVerification(); err != nil {
|
||||
return err
|
||||
}
|
||||
statedb.CorrectAccountsRoot(common.Hash{})
|
||||
statedb.Finalise(v.config.IsEIP158(header.Number))
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
validateFuns = append(validateFuns, func() error {
|
||||
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
|
||||
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
validateRes := make(chan error, len(validateFuns))
|
||||
for _, f := range validateFuns {
|
||||
tmpFunc := f
|
||||
|
@ -102,6 +102,7 @@ var (
|
||||
|
||||
blockRecvTimeDiffGauge = metrics.NewRegisteredGauge("chain/block/recvtimediff", nil)
|
||||
|
||||
errStateRootVerificationFailed = errors.New("state root verification failed")
|
||||
errInsertionInterrupted = errors.New("insertion is interrupted")
|
||||
errChainStopped = errors.New("blockchain is stopped")
|
||||
errInvalidOldChain = errors.New("invalid old chain")
|
||||
@ -115,6 +116,7 @@ const (
|
||||
receiptsCacheLimit = 10000
|
||||
sidecarsCacheLimit = 1024
|
||||
txLookupCacheLimit = 1024
|
||||
maxBadBlockLimit = 16
|
||||
maxFutureBlocks = 256
|
||||
maxTimeFutureBlocks = 30
|
||||
TriesInMemory = 128
|
||||
@ -124,6 +126,8 @@ const (
|
||||
diffLayerFreezerRecheckInterval = 3 * time.Second
|
||||
maxDiffForkDist = 11 // Maximum allowed backward distance from the chain head
|
||||
|
||||
rewindBadBlockInterval = 1 * time.Second
|
||||
|
||||
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
|
||||
//
|
||||
// Changelog:
|
||||
@ -290,6 +294,8 @@ type BlockChain struct {
|
||||
|
||||
// future blocks are blocks added for later processing
|
||||
futureBlocks *lru.Cache[common.Hash, *types.Block]
|
||||
// Cache for the blocks that failed to pass MPT root verification
|
||||
badBlockCache *lru.Cache[common.Hash, time.Time]
|
||||
|
||||
// trusted diff layers
|
||||
diffLayerCache *exlru.Cache // Cache for the diffLayers
|
||||
@ -310,6 +316,7 @@ type BlockChain struct {
|
||||
processor Processor // Block transaction processor interface
|
||||
forker *ForkChoice
|
||||
vmConfig vm.Config
|
||||
pipeCommit bool
|
||||
|
||||
// monitor
|
||||
doubleSignMonitor *monitor.DoubleSignMonitor
|
||||
@ -371,6 +378,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
|
||||
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
|
||||
futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks),
|
||||
badBlockCache: lru.NewCache[common.Hash, time.Time](maxBadBlockLimit),
|
||||
diffLayerCache: diffLayerCache,
|
||||
diffLayerChanCache: diffLayerChanCache,
|
||||
engine: engine,
|
||||
@ -551,6 +559,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
bc.wg.Add(1)
|
||||
go bc.trustedDiffLayerLoop()
|
||||
}
|
||||
if bc.pipeCommit {
|
||||
// check current block and rewind invalid one
|
||||
bc.wg.Add(1)
|
||||
go bc.rewindInvalidHeaderBlockLoop()
|
||||
}
|
||||
|
||||
if bc.doubleSignMonitor != nil {
|
||||
bc.wg.Add(1)
|
||||
@ -804,6 +817,26 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *BlockChain) tryRewindBadBlocks() {
|
||||
if !bc.chainmu.TryLock() {
|
||||
return
|
||||
}
|
||||
defer bc.chainmu.Unlock()
|
||||
block := bc.CurrentBlock()
|
||||
snaps := bc.snaps
|
||||
// Verified and Result is false
|
||||
if snaps != nil && snaps.Snapshot(block.Root) != nil &&
|
||||
snaps.Snapshot(block.Root).Verified() && !snaps.Snapshot(block.Root).WaitAndGetVerifyRes() {
|
||||
// Rewind by one block
|
||||
log.Warn("current block verified failed, rewind to its parent", "height", block.Number.Uint64(), "hash", block.Hash())
|
||||
bc.futureBlocks.Remove(block.Hash())
|
||||
bc.badBlockCache.Add(block.Hash(), time.Now())
|
||||
bc.diffLayerCache.Remove(block.Hash())
|
||||
bc.reportBlock(bc.GetBlockByHash(block.Hash()), nil, errStateRootVerificationFailed)
|
||||
bc.setHeadBeyondRoot(block.Number.Uint64()-1, 0, common.Hash{}, false)
|
||||
}
|
||||
}
|
||||
|
||||
// rewindHashHead implements the logic of rewindHead in the context of hash scheme.
|
||||
func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
|
||||
var (
|
||||
@ -1860,7 +1893,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||
return nil
|
||||
}
|
||||
// Commit all cached state changes into underlying memory database.
|
||||
_, diffLayer, err := state.Commit(block.NumberU64(), tryCommitTrieDB)
|
||||
_, diffLayer, err := state.Commit(block.NumberU64(), bc.tryRewindBadBlocks, tryCommitTrieDB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2236,6 +2269,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
||||
}
|
||||
|
||||
// Process block using the parent state as reference point
|
||||
if bc.pipeCommit {
|
||||
statedb.EnablePipeCommit()
|
||||
}
|
||||
statedb.SetExpectedStateRoot(block.Root())
|
||||
pstart := time.Now()
|
||||
statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
|
||||
@ -2853,6 +2889,22 @@ func (bc *BlockChain) updateFutureBlocks() {
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *BlockChain) rewindInvalidHeaderBlockLoop() {
|
||||
recheck := time.NewTicker(rewindBadBlockInterval)
|
||||
defer func() {
|
||||
recheck.Stop()
|
||||
bc.wg.Done()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-recheck.C:
|
||||
bc.tryRewindBadBlocks()
|
||||
case <-bc.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *BlockChain) trustedDiffLayerLoop() {
|
||||
recheck := time.NewTicker(diffLayerFreezerRecheckInterval)
|
||||
defer func() {
|
||||
@ -2990,6 +3042,17 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (bc *BlockChain) isCachedBadBlock(block *types.Block) bool {
|
||||
if timeAt, exist := bc.badBlockCache.Get(block.Hash()); exist {
|
||||
if time.Since(timeAt) >= badBlockCacheExpire {
|
||||
bc.badBlockCache.Remove(block.Hash())
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// reportBlock logs a bad block error.
|
||||
// bad block need not save receipts & sidecars.
|
||||
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
||||
@ -3051,6 +3114,11 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
|
||||
|
||||
func (bc *BlockChain) TriesInMemory() uint64 { return bc.triesInMemory }
|
||||
|
||||
func EnablePipelineCommit(bc *BlockChain) (*BlockChain, error) {
|
||||
bc.pipeCommit = false
|
||||
return bc, nil
|
||||
}
|
||||
|
||||
func EnablePersistDiff(limit uint64) BlockChainOption {
|
||||
return func(chain *BlockChain) (*BlockChain, error) {
|
||||
chain.diffLayerFreezerBlockLimit = limit
|
||||
|
@ -237,7 +237,7 @@ func TestFreezeDiffLayer(t *testing.T) {
|
||||
// Wait for the buffer to be zero.
|
||||
}
|
||||
// Minus one empty block.
|
||||
if fullBackend.chain.diffQueue.Size() != blockNum-1 {
|
||||
if fullBackend.chain.diffQueue.Size() > blockNum-1 && fullBackend.chain.diffQueue.Size() < blockNum-2 {
|
||||
t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size())
|
||||
}
|
||||
|
||||
|
@ -351,6 +351,12 @@ func (bc *BlockChain) HasState(hash common.Hash) bool {
|
||||
if bc.NoTries() {
|
||||
return bc.snaps != nil && bc.snaps.Snapshot(hash) != nil
|
||||
}
|
||||
if bc.pipeCommit && bc.snaps != nil {
|
||||
// If parent snap is pending on verification, treat it as state exist
|
||||
if s := bc.snaps.Snapshot(hash); s != nil && !s.Verified() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
_, err := bc.stateCache.OpenTrie(hash)
|
||||
return err == nil
|
||||
}
|
||||
|
@ -51,7 +51,8 @@ import (
|
||||
// So we can deterministically seed different blockchains
|
||||
var (
|
||||
canonicalSeed = 1
|
||||
forkSeed = 2
|
||||
forkSeed1 = 2
|
||||
forkSeed2 = 3
|
||||
|
||||
TestTriesInMemory = 128
|
||||
)
|
||||
@ -60,15 +61,19 @@ var (
|
||||
// chain. Depending on the full flag, it creates either a full block chain or a
|
||||
// header only chain. The database and genesis specification for block generation
|
||||
// are also returned in case more test blocks are needed later.
|
||||
func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (ethdb.Database, *Genesis, *BlockChain, error) {
|
||||
func newCanonical(engine consensus.Engine, n int, full bool, scheme string, pipeline bool) (ethdb.Database, *Genesis, *BlockChain, error) {
|
||||
var (
|
||||
genesis = &Genesis{
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
Config: params.AllEthashProtocolChanges,
|
||||
}
|
||||
)
|
||||
|
||||
// Initialize a fresh chain with only a genesis block
|
||||
var ops []BlockChainOption
|
||||
if pipeline {
|
||||
ops = append(ops, EnablePipelineCommit)
|
||||
}
|
||||
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil, ops...)
|
||||
// Create and inject the requested chain
|
||||
if n == 0 {
|
||||
@ -91,9 +96,53 @@ func newGwei(n int64) *big.Int {
|
||||
}
|
||||
|
||||
// Test fork of length N starting from block i
|
||||
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string) {
|
||||
func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n int, pipeline bool) {
|
||||
// Copy old chain up to #i into a new db
|
||||
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme)
|
||||
db, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatal("could not make new canonical in testFork", err)
|
||||
}
|
||||
defer blockchain2.Stop()
|
||||
|
||||
// Assert the chains have the same header/block at #i
|
||||
hash1 := blockchain.GetBlockByNumber(uint64(i)).Hash()
|
||||
hash2 := blockchain2.GetBlockByNumber(uint64(i)).Hash()
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
|
||||
}
|
||||
// Extend the newly created chain
|
||||
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed1)
|
||||
for idx, block := range blockChainB {
|
||||
block.SetRoot(common.Hash{0: byte(forkSeed1), 19: byte(idx)})
|
||||
}
|
||||
previousBlock := blockchain.CurrentBlock()
|
||||
// Sanity check that the forked chain can be imported into the original
|
||||
if _, err := blockchain.InsertChain(blockChainB); err == nil {
|
||||
t.Fatalf("failed to report insert error")
|
||||
}
|
||||
|
||||
time.Sleep(2 * rewindBadBlockInterval)
|
||||
latestBlock := blockchain.CurrentBlock()
|
||||
if latestBlock.Hash() != previousBlock.Hash() || latestBlock.Number.Uint64() != previousBlock.Number.Uint64() {
|
||||
t.Fatalf("rewind do not take effect")
|
||||
}
|
||||
db, _, blockchain3, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatal("could not make new canonical in testFork", err)
|
||||
}
|
||||
defer blockchain3.Stop()
|
||||
|
||||
blockChainC := makeBlockChain(blockchain3.chainConfig, blockchain3.GetBlockByHash(blockchain3.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed2)
|
||||
|
||||
if _, err := blockchain.InsertChain(blockChainC); err != nil {
|
||||
t.Fatalf("failed to insert forking chain: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test fork of length N starting from block i
|
||||
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string, pipeline bool) {
|
||||
// Copy old chain up to #i into a new db
|
||||
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatal("could not make new canonical in testFork", err)
|
||||
}
|
||||
@ -117,12 +166,12 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
||||
headerChainB []*types.Header
|
||||
)
|
||||
if full {
|
||||
blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
|
||||
blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1)
|
||||
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
||||
t.Fatalf("failed to insert forking chain: %v", err)
|
||||
}
|
||||
} else {
|
||||
headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
||||
headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1)
|
||||
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
||||
t.Fatalf("failed to insert forking chain: %v", err)
|
||||
}
|
||||
@ -133,7 +182,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
||||
if full {
|
||||
cur := blockchain.CurrentBlock()
|
||||
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
|
||||
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
|
||||
if err := testBlockChainImport(blockChainB, pipeline, blockchain); err != nil {
|
||||
t.Fatalf("failed to import forked block chain: %v", err)
|
||||
}
|
||||
last := blockChainB[len(blockChainB)-1]
|
||||
@ -153,7 +202,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
||||
|
||||
// testBlockChainImport tries to process a chain of blocks, writing them into
|
||||
// the database if successful.
|
||||
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||
func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *BlockChain) error {
|
||||
for _, block := range chain {
|
||||
// Try and process the block
|
||||
err := blockchain.engine.VerifyHeader(blockchain, block.Header())
|
||||
@ -171,6 +220,9 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||
return err
|
||||
}
|
||||
statedb.SetExpectedStateRoot(block.Root())
|
||||
if pipelineCommit {
|
||||
statedb.EnablePipeCommit()
|
||||
}
|
||||
statedb, receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
|
||||
if err != nil {
|
||||
blockchain.reportBlock(block, receipts, err)
|
||||
@ -210,13 +262,26 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBlockImportVerification(t *testing.T) {
|
||||
length := 5
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, true, rawdb.HashScheme, true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
defer processor.Stop()
|
||||
// Start fork from current height
|
||||
processor, _ = EnablePipelineCommit(processor)
|
||||
testInvalidStateRootBlockImport(t, processor, length, 10, true)
|
||||
}
|
||||
func TestLastBlock(t *testing.T) {
|
||||
testLastBlock(t, rawdb.HashScheme)
|
||||
testLastBlock(t, rawdb.PathScheme)
|
||||
}
|
||||
|
||||
func testLastBlock(t *testing.T, scheme string) {
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme)
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pristine chain: %v", err)
|
||||
}
|
||||
@ -235,7 +300,7 @@ func testLastBlock(t *testing.T, scheme string) {
|
||||
// The chain is reorged to whatever specified.
|
||||
func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) {
|
||||
// Copy old chain up to #i into a new db
|
||||
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme)
|
||||
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, false)
|
||||
if err != nil {
|
||||
t.Fatal("could not make new canonical in testFork", err)
|
||||
}
|
||||
@ -256,7 +321,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b
|
||||
|
||||
// Extend the newly created chain
|
||||
if full {
|
||||
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
|
||||
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1)
|
||||
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
||||
t.Fatalf("failed to insert forking chain: %v", err)
|
||||
}
|
||||
@ -267,7 +332,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b
|
||||
t.Fatalf("failed to reorg to the given chain")
|
||||
}
|
||||
} else {
|
||||
headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
||||
headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1)
|
||||
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
||||
t.Fatalf("failed to insert forking chain: %v", err)
|
||||
}
|
||||
@ -283,20 +348,21 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b
|
||||
// Tests that given a starting canonical chain of a given size, it can be extended
|
||||
// with various length chains.
|
||||
func TestExtendCanonicalHeaders(t *testing.T) {
|
||||
testExtendCanonical(t, false, rawdb.HashScheme)
|
||||
testExtendCanonical(t, false, rawdb.PathScheme)
|
||||
testExtendCanonical(t, false, rawdb.HashScheme, false)
|
||||
testExtendCanonical(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
|
||||
func TestExtendCanonicalBlocks(t *testing.T) {
|
||||
testExtendCanonical(t, true, rawdb.HashScheme)
|
||||
testExtendCanonical(t, true, rawdb.PathScheme)
|
||||
testExtendCanonical(t, true, rawdb.HashScheme, false)
|
||||
testExtendCanonical(t, true, rawdb.PathScheme, false)
|
||||
testExtendCanonical(t, true, rawdb.HashScheme, true)
|
||||
}
|
||||
|
||||
func testExtendCanonical(t *testing.T, full bool, scheme string) {
|
||||
func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
length := 5
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -309,10 +375,10 @@ func testExtendCanonical(t *testing.T, full bool, scheme string) {
|
||||
}
|
||||
}
|
||||
// Start fork from current height
|
||||
testFork(t, processor, length, 1, full, better, scheme)
|
||||
testFork(t, processor, length, 2, full, better, scheme)
|
||||
testFork(t, processor, length, 5, full, better, scheme)
|
||||
testFork(t, processor, length, 10, full, better, scheme)
|
||||
testFork(t, processor, length, 1, full, better, scheme, pipeline)
|
||||
testFork(t, processor, length, 2, full, better, scheme, pipeline)
|
||||
testFork(t, processor, length, 5, full, better, scheme, pipeline)
|
||||
testFork(t, processor, length, 10, full, better, scheme, pipeline)
|
||||
}
|
||||
|
||||
// Tests that given a starting canonical chain of a given size, it can be extended
|
||||
@ -330,7 +396,7 @@ func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) {
|
||||
length := 5
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -343,19 +409,20 @@ func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) {
|
||||
// Tests that given a starting canonical chain of a given size, creating shorter
|
||||
// forks do not take canonical ownership.
|
||||
func TestShorterForkHeaders(t *testing.T) {
|
||||
testShorterFork(t, false, rawdb.HashScheme)
|
||||
testShorterFork(t, false, rawdb.PathScheme)
|
||||
testShorterFork(t, false, rawdb.HashScheme, false)
|
||||
testShorterFork(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
func TestShorterForkBlocks(t *testing.T) {
|
||||
testShorterFork(t, true, rawdb.HashScheme)
|
||||
testShorterFork(t, true, rawdb.PathScheme)
|
||||
testShorterFork(t, true, rawdb.HashScheme, false)
|
||||
testShorterFork(t, true, rawdb.PathScheme, false)
|
||||
testShorterFork(t, true, rawdb.HashScheme, true)
|
||||
}
|
||||
|
||||
func testShorterFork(t *testing.T, full bool, scheme string) {
|
||||
func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
length := 10
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -368,12 +435,12 @@ func testShorterFork(t *testing.T, full bool, scheme string) {
|
||||
}
|
||||
}
|
||||
// Sum of numbers must be less than `length` for this to be a shorter fork
|
||||
testFork(t, processor, 0, 3, full, worse, scheme)
|
||||
testFork(t, processor, 0, 7, full, worse, scheme)
|
||||
testFork(t, processor, 1, 1, full, worse, scheme)
|
||||
testFork(t, processor, 1, 7, full, worse, scheme)
|
||||
testFork(t, processor, 5, 3, full, worse, scheme)
|
||||
testFork(t, processor, 5, 4, full, worse, scheme)
|
||||
testFork(t, processor, 0, 3, full, worse, scheme, pipeline)
|
||||
testFork(t, processor, 0, 7, full, worse, scheme, pipeline)
|
||||
testFork(t, processor, 1, 1, full, worse, scheme, pipeline)
|
||||
testFork(t, processor, 1, 7, full, worse, scheme, pipeline)
|
||||
testFork(t, processor, 5, 3, full, worse, scheme, pipeline)
|
||||
testFork(t, processor, 5, 4, full, worse, scheme, pipeline)
|
||||
}
|
||||
|
||||
// Tests that given a starting canonical chain of a given size, creating shorter
|
||||
@ -391,7 +458,7 @@ func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) {
|
||||
length := 10
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -408,19 +475,20 @@ func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) {
|
||||
// Tests that given a starting canonical chain of a given size, creating longer
|
||||
// forks do take canonical ownership.
|
||||
func TestLongerForkHeaders(t *testing.T) {
|
||||
testLongerFork(t, false, rawdb.HashScheme)
|
||||
testLongerFork(t, false, rawdb.PathScheme)
|
||||
testLongerFork(t, false, rawdb.HashScheme, false)
|
||||
testLongerFork(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
func TestLongerForkBlocks(t *testing.T) {
|
||||
testLongerFork(t, true, rawdb.HashScheme)
|
||||
testLongerFork(t, true, rawdb.PathScheme)
|
||||
testLongerFork(t, true, rawdb.HashScheme, false)
|
||||
testLongerFork(t, true, rawdb.PathScheme, false)
|
||||
testLongerFork(t, true, rawdb.HashScheme, true)
|
||||
}
|
||||
|
||||
func testLongerFork(t *testing.T, full bool, scheme string) {
|
||||
func testLongerFork(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
length := 10
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -449,7 +517,7 @@ func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) {
|
||||
length := 10
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -466,19 +534,20 @@ func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) {
|
||||
// Tests that given a starting canonical chain of a given size, creating equal
|
||||
// forks do take canonical ownership.
|
||||
func TestEqualForkHeaders(t *testing.T) {
|
||||
testEqualFork(t, false, rawdb.HashScheme)
|
||||
testEqualFork(t, false, rawdb.PathScheme)
|
||||
testEqualFork(t, false, rawdb.HashScheme, false)
|
||||
testEqualFork(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
func TestEqualForkBlocks(t *testing.T) {
|
||||
testEqualFork(t, true, rawdb.HashScheme)
|
||||
testEqualFork(t, true, rawdb.PathScheme)
|
||||
testEqualFork(t, true, rawdb.HashScheme, false)
|
||||
testEqualFork(t, true, rawdb.PathScheme, false)
|
||||
testEqualFork(t, true, rawdb.HashScheme, true)
|
||||
}
|
||||
|
||||
func testEqualFork(t *testing.T, full bool, scheme string) {
|
||||
func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
length := 10
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -491,12 +560,12 @@ func testEqualFork(t *testing.T, full bool, scheme string) {
|
||||
}
|
||||
}
|
||||
// Sum of numbers must be equal to `length` for this to be an equal fork
|
||||
testFork(t, processor, 0, 10, full, equal, scheme)
|
||||
testFork(t, processor, 1, 9, full, equal, scheme)
|
||||
testFork(t, processor, 2, 8, full, equal, scheme)
|
||||
testFork(t, processor, 5, 5, full, equal, scheme)
|
||||
testFork(t, processor, 6, 4, full, equal, scheme)
|
||||
testFork(t, processor, 9, 1, full, equal, scheme)
|
||||
testFork(t, processor, 0, 10, full, equal, scheme, pipeline)
|
||||
testFork(t, processor, 1, 9, full, equal, scheme, pipeline)
|
||||
testFork(t, processor, 2, 8, full, equal, scheme, pipeline)
|
||||
testFork(t, processor, 5, 5, full, equal, scheme, pipeline)
|
||||
testFork(t, processor, 6, 4, full, equal, scheme, pipeline)
|
||||
testFork(t, processor, 9, 1, full, equal, scheme, pipeline)
|
||||
}
|
||||
|
||||
// Tests that given a starting canonical chain of a given size, creating equal
|
||||
@ -514,7 +583,7 @@ func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) {
|
||||
length := 10
|
||||
|
||||
// Make first chain starting from genesis
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -530,17 +599,18 @@ func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) {
|
||||
|
||||
// Tests that chains missing links do not get accepted by the processor.
|
||||
func TestBrokenHeaderChain(t *testing.T) {
|
||||
testBrokenChain(t, false, rawdb.HashScheme)
|
||||
testBrokenChain(t, false, rawdb.PathScheme)
|
||||
testBrokenChain(t, false, rawdb.HashScheme, false)
|
||||
testBrokenChain(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
func TestBrokenBlockChain(t *testing.T) {
|
||||
testBrokenChain(t, true, rawdb.HashScheme)
|
||||
testBrokenChain(t, true, rawdb.PathScheme)
|
||||
testBrokenChain(t, true, rawdb.HashScheme, false)
|
||||
testBrokenChain(t, true, rawdb.PathScheme, false)
|
||||
testBrokenChain(t, true, rawdb.HashScheme, true)
|
||||
}
|
||||
|
||||
func testBrokenChain(t *testing.T, full bool, scheme string) {
|
||||
func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
// Make chain starting from genesis
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme)
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||
}
|
||||
@ -548,12 +618,12 @@ func testBrokenChain(t *testing.T, full bool, scheme string) {
|
||||
|
||||
// Create a forked chain, and try to insert with a missing link
|
||||
if full {
|
||||
chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
|
||||
if err := testBlockChainImport(chain, blockchain); err == nil {
|
||||
chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed1)[1:]
|
||||
if err := testBlockChainImport(chain, pipeline, blockchain); err == nil {
|
||||
t.Errorf("broken block chain not reported")
|
||||
}
|
||||
} else {
|
||||
chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
|
||||
chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed1)[1:]
|
||||
if err := testHeaderChainImport(chain, blockchain); err == nil {
|
||||
t.Errorf("broken header chain not reported")
|
||||
}
|
||||
@ -563,30 +633,32 @@ func testBrokenChain(t *testing.T, full bool, scheme string) {
|
||||
// Tests that reorganising a long difficult chain after a short easy one
|
||||
// overwrites the canonical numbers and links in the database.
|
||||
func TestReorgLongHeaders(t *testing.T) {
|
||||
testReorgLong(t, false, rawdb.HashScheme)
|
||||
testReorgLong(t, false, rawdb.PathScheme)
|
||||
testReorgLong(t, false, rawdb.HashScheme, false)
|
||||
testReorgLong(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
func TestReorgLongBlocks(t *testing.T) {
|
||||
testReorgLong(t, true, rawdb.HashScheme)
|
||||
testReorgLong(t, true, rawdb.PathScheme)
|
||||
testReorgLong(t, true, rawdb.HashScheme, false)
|
||||
testReorgLong(t, true, rawdb.PathScheme, false)
|
||||
testReorgLong(t, true, rawdb.HashScheme, true)
|
||||
}
|
||||
|
||||
func testReorgLong(t *testing.T, full bool, scheme string) {
|
||||
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme)
|
||||
func testReorgLong(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme, pipeline)
|
||||
}
|
||||
|
||||
// Tests that reorganising a short difficult chain after a long easy one
|
||||
// overwrites the canonical numbers and links in the database.
|
||||
func TestReorgShortHeaders(t *testing.T) {
|
||||
testReorgShort(t, false, rawdb.HashScheme)
|
||||
testReorgShort(t, false, rawdb.PathScheme)
|
||||
testReorgShort(t, false, rawdb.HashScheme, false)
|
||||
testReorgShort(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
func TestReorgShortBlocks(t *testing.T) {
|
||||
testReorgShort(t, true, rawdb.HashScheme)
|
||||
testReorgShort(t, true, rawdb.PathScheme)
|
||||
testReorgShort(t, true, rawdb.HashScheme, false)
|
||||
testReorgShort(t, true, rawdb.PathScheme, false)
|
||||
testReorgShort(t, true, rawdb.HashScheme, true)
|
||||
}
|
||||
|
||||
func testReorgShort(t *testing.T, full bool, scheme string) {
|
||||
func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
|
||||
// we need a fairly long chain of blocks with different difficulties for a short
|
||||
// one to become heavier than a long one. The 96 is an empirical value.
|
||||
@ -598,12 +670,12 @@ func testReorgShort(t *testing.T, full bool, scheme string) {
|
||||
for i := 0; i < len(diff); i++ {
|
||||
diff[i] = -9
|
||||
}
|
||||
testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme)
|
||||
testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme, pipeline)
|
||||
}
|
||||
|
||||
func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string) {
|
||||
func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string, pipeline bool) {
|
||||
// Create a pristine chain and database
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pristine chain: %v", err)
|
||||
}
|
||||
@ -672,18 +744,19 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme
|
||||
|
||||
// Tests that the insertion functions detect banned hashes.
|
||||
func TestBadHeaderHashes(t *testing.T) {
|
||||
testBadHashes(t, false, rawdb.HashScheme)
|
||||
testBadHashes(t, false, rawdb.PathScheme)
|
||||
testBadHashes(t, false, rawdb.HashScheme, false)
|
||||
testBadHashes(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
|
||||
func TestBadBlockHashes(t *testing.T) {
|
||||
testBadHashes(t, true, rawdb.HashScheme)
|
||||
testBadHashes(t, true, rawdb.PathScheme)
|
||||
testBadHashes(t, true, rawdb.HashScheme, false)
|
||||
testBadHashes(t, true, rawdb.HashScheme, true)
|
||||
testBadHashes(t, true, rawdb.PathScheme, false)
|
||||
}
|
||||
|
||||
func testBadHashes(t *testing.T, full bool, scheme string) {
|
||||
func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
// Create a pristine chain and database
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pristine chain: %v", err)
|
||||
}
|
||||
@ -713,17 +786,18 @@ func testBadHashes(t *testing.T, full bool, scheme string) {
|
||||
// Tests that bad hashes are detected on boot, and the chain rolled back to a
|
||||
// good state prior to the bad hash.
|
||||
func TestReorgBadHeaderHashes(t *testing.T) {
|
||||
testReorgBadHashes(t, false, rawdb.HashScheme)
|
||||
testReorgBadHashes(t, false, rawdb.PathScheme)
|
||||
testReorgBadHashes(t, false, rawdb.HashScheme, false)
|
||||
testReorgBadHashes(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
func TestReorgBadBlockHashes(t *testing.T) {
|
||||
testReorgBadHashes(t, true, rawdb.HashScheme)
|
||||
testReorgBadHashes(t, true, rawdb.PathScheme)
|
||||
testReorgBadHashes(t, true, rawdb.HashScheme, false)
|
||||
testReorgBadHashes(t, true, rawdb.HashScheme, true)
|
||||
testReorgBadHashes(t, true, rawdb.PathScheme, false)
|
||||
}
|
||||
|
||||
func testReorgBadHashes(t *testing.T, full bool, scheme string) {
|
||||
func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
// Create a pristine chain and database
|
||||
genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
|
||||
genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pristine chain: %v", err)
|
||||
}
|
||||
@ -774,18 +848,19 @@ func testReorgBadHashes(t *testing.T, full bool, scheme string) {
|
||||
|
||||
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
||||
func TestHeadersInsertNonceError(t *testing.T) {
|
||||
testInsertNonceError(t, false, rawdb.HashScheme)
|
||||
testInsertNonceError(t, false, rawdb.PathScheme)
|
||||
testInsertNonceError(t, false, rawdb.HashScheme, false)
|
||||
testInsertNonceError(t, false, rawdb.PathScheme, false)
|
||||
}
|
||||
func TestBlocksInsertNonceError(t *testing.T) {
|
||||
testInsertNonceError(t, true, rawdb.HashScheme)
|
||||
testInsertNonceError(t, true, rawdb.PathScheme)
|
||||
testInsertNonceError(t, true, rawdb.HashScheme, false)
|
||||
testInsertNonceError(t, true, rawdb.HashScheme, true)
|
||||
testInsertNonceError(t, true, rawdb.PathScheme, false)
|
||||
}
|
||||
|
||||
func testInsertNonceError(t *testing.T, full bool, scheme string) {
|
||||
func testInsertNonceError(t *testing.T, full bool, scheme string, pipeline bool) {
|
||||
doTest := func(i int) {
|
||||
// Create a pristine chain and database
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
|
||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pristine chain: %v", err)
|
||||
}
|
||||
@ -1536,7 +1611,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
|
||||
}
|
||||
|
||||
func testCanonicalBlockRetrieval(t *testing.T, scheme string) {
|
||||
_, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme)
|
||||
_, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pristine chain: %v", err)
|
||||
}
|
||||
|
@ -39,6 +39,9 @@ var (
|
||||
|
||||
// ErrCurrentBlockNotFound is returned when current block not found.
|
||||
ErrCurrentBlockNotFound = errors.New("current block not found")
|
||||
|
||||
// ErrKnownBadBlock is return when the block is a known bad block
|
||||
ErrKnownBadBlock = errors.New("already known bad block")
|
||||
)
|
||||
|
||||
// List of evm-call-message pre-checking errors. All state transition messages will
|
||||
|
@ -217,8 +217,7 @@ func (e *GenesisMismatchError) Error() string {
|
||||
// Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes.
|
||||
type ChainOverrides struct {
|
||||
OverridePassedForkTime *uint64
|
||||
OverridePascal *uint64
|
||||
OverridePrague *uint64
|
||||
OverrideBohr *uint64
|
||||
OverrideVerkle *uint64
|
||||
}
|
||||
|
||||
@ -253,13 +252,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
|
||||
config.CancunTime = overrides.OverridePassedForkTime
|
||||
config.HaberTime = overrides.OverridePassedForkTime
|
||||
config.HaberFixTime = overrides.OverridePassedForkTime
|
||||
config.BohrTime = overrides.OverridePassedForkTime
|
||||
}
|
||||
if overrides != nil && overrides.OverridePascal != nil {
|
||||
config.PascalTime = overrides.OverridePascal
|
||||
}
|
||||
if overrides != nil && overrides.OverridePrague != nil {
|
||||
config.PragueTime = overrides.OverridePrague
|
||||
if overrides != nil && overrides.OverrideBohr != nil {
|
||||
config.BohrTime = overrides.OverrideBohr
|
||||
}
|
||||
if overrides != nil && overrides.OverrideVerkle != nil {
|
||||
config.VerkleTime = overrides.OverrideVerkle
|
||||
|
@ -119,6 +119,9 @@ type diffLayer struct {
|
||||
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
||||
|
||||
verifiedCh chan struct{} // the difflayer is verified when verifiedCh is nil or closed
|
||||
valid bool // mark the difflayer is valid or not.
|
||||
|
||||
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
|
||||
|
||||
lock sync.RWMutex
|
||||
@ -142,7 +145,7 @@ func storageBloomHash(h0, h1 common.Hash) uint64 {
|
||||
|
||||
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
||||
// level persistent database or a hierarchical diff already.
|
||||
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
|
||||
// Create the new layer with some pre-allocated data segments
|
||||
dl := &diffLayer{
|
||||
parent: parent,
|
||||
@ -151,6 +154,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
|
||||
accountData: accounts,
|
||||
storageData: storage,
|
||||
storageList: make(map[common.Hash][]common.Hash),
|
||||
verifiedCh: verified,
|
||||
}
|
||||
|
||||
switch parent := parent.(type) {
|
||||
@ -232,6 +236,39 @@ func (dl *diffLayer) Root() common.Hash {
|
||||
return dl.root
|
||||
}
|
||||
|
||||
// WaitAndGetVerifyRes will wait until the diff layer been verified and return the verification result
|
||||
func (dl *diffLayer) WaitAndGetVerifyRes() bool {
|
||||
if dl.verifiedCh == nil {
|
||||
return true
|
||||
}
|
||||
<-dl.verifiedCh
|
||||
return dl.valid
|
||||
}
|
||||
|
||||
func (dl *diffLayer) MarkValid() {
|
||||
dl.valid = true
|
||||
}
|
||||
|
||||
// Represent whether the difflayer is been verified, does not means it is a valid or invalid difflayer
|
||||
func (dl *diffLayer) Verified() bool {
|
||||
if dl.verifiedCh == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-dl.verifiedCh:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (dl *diffLayer) CorrectAccounts(accounts map[common.Hash][]byte) {
|
||||
dl.lock.Lock()
|
||||
defer dl.lock.Unlock()
|
||||
|
||||
dl.accountData = accounts
|
||||
}
|
||||
|
||||
// Parent returns the subsequent layer of a diff layer.
|
||||
func (dl *diffLayer) Parent() snapshot {
|
||||
dl.lock.RLock()
|
||||
@ -430,8 +467,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
||||
|
||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||
// the specified data items.
|
||||
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
return newDiffLayer(dl, blockRoot, destructs, accounts, storage)
|
||||
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
|
||||
return newDiffLayer(dl, blockRoot, destructs, accounts, storage, verified)
|
||||
}
|
||||
|
||||
// flatten pushes all data from this point downwards, flattening everything into
|
||||
|
@ -80,11 +80,11 @@ func TestMergeBasics(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Add some (identical) layers on top
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
||||
child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
||||
// And flatten
|
||||
merged := (child.flatten()).(*diffLayer)
|
||||
|
||||
@ -152,13 +152,13 @@ func TestMergeDelete(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Add some flipAccs-flopping layers on top
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage, nil)
|
||||
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
|
||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
|
||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
|
||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
|
||||
|
||||
if data, _ := child.Account(h1); data == nil {
|
||||
t.Errorf("last diff layer: expected %x account to be non-nil", h1)
|
||||
@ -210,7 +210,7 @@ func TestInsertAndMerge(t *testing.T) {
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
|
||||
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage, nil)
|
||||
}
|
||||
{
|
||||
var (
|
||||
@ -221,7 +221,7 @@ func TestInsertAndMerge(t *testing.T) {
|
||||
accounts[acc] = randomAccount()
|
||||
storage[acc] = make(map[common.Hash][]byte)
|
||||
storage[acc][slot] = []byte{0x01}
|
||||
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
||||
}
|
||||
// And flatten
|
||||
merged := (child.flatten()).(*diffLayer)
|
||||
@ -257,7 +257,7 @@ func BenchmarkSearch(b *testing.B) {
|
||||
for i := 0; i < 10000; i++ {
|
||||
accounts[randomHash()] = randomAccount()
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
||||
}
|
||||
var layer snapshot
|
||||
layer = emptyLayer()
|
||||
@ -299,7 +299,7 @@ func BenchmarkSearchSlot(b *testing.B) {
|
||||
accStorage[randomHash()] = value
|
||||
storage[accountKey] = accStorage
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
||||
}
|
||||
var layer snapshot
|
||||
layer = emptyLayer()
|
||||
@ -336,7 +336,7 @@ func BenchmarkFlatten(b *testing.B) {
|
||||
}
|
||||
storage[accountKey] = accStorage
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -385,7 +385,7 @@ func BenchmarkJournal(b *testing.B) {
|
||||
}
|
||||
storage[accountKey] = accStorage
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
||||
}
|
||||
layer := snapshot(emptyLayer())
|
||||
for i := 1; i < 128; i++ {
|
||||
|
@ -60,6 +60,19 @@ func (dl *diskLayer) Root() common.Hash {
|
||||
return dl.root
|
||||
}
|
||||
|
||||
func (dl *diskLayer) WaitAndGetVerifyRes() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (dl *diskLayer) MarkValid() {}
|
||||
|
||||
func (dl *diskLayer) Verified() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (dl *diskLayer) CorrectAccounts(map[common.Hash][]byte) {
|
||||
}
|
||||
|
||||
// Parent always returns nil as there's no layer below the disk.
|
||||
func (dl *diskLayer) Parent() snapshot {
|
||||
return nil
|
||||
@ -178,6 +191,6 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
|
||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||
// the specified data items. Note, the maps are retained by the method to avoid
|
||||
// copying everything.
|
||||
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||
return newDiffLayer(dl, blockHash, destructs, accounts, storage)
|
||||
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
|
||||
return newDiffLayer(dl, blockHash, destructs, accounts, storage, verified)
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ func TestDiskMerge(t *testing.T) {
|
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||
conDelNoCache: {conDelNoCacheSlot: nil},
|
||||
conDelCache: {conDelCacheSlot: nil},
|
||||
}); err != nil {
|
||||
}, nil); err != nil {
|
||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||
}
|
||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||
@ -353,7 +353,7 @@ func TestDiskPartialMerge(t *testing.T) {
|
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||
conDelNoCache: {conDelNoCacheSlot: nil},
|
||||
conDelCache: {conDelCacheSlot: nil},
|
||||
}); err != nil {
|
||||
}, nil); err != nil {
|
||||
t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
|
||||
}
|
||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||
@ -464,7 +464,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
|
||||
accTwo: accTwo[:],
|
||||
}, nil); err != nil {
|
||||
}, nil, nil); err != nil {
|
||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||
}
|
||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||
@ -484,7 +484,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
|
||||
accThree: accThree.Bytes(),
|
||||
}, map[common.Hash]map[common.Hash][]byte{
|
||||
accThree: {accThreeSlot: accThreeSlot.Bytes()},
|
||||
}); err != nil {
|
||||
}, nil); err != nil {
|
||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||
}
|
||||
diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
|
||||
|
@ -54,7 +54,7 @@ func TestAccountIteratorBasics(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Add some (identical) layers on top
|
||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
||||
it := diffLayer.AccountIterator(common.Hash{})
|
||||
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
||||
|
||||
@ -92,7 +92,7 @@ func TestStorageIteratorBasics(t *testing.T) {
|
||||
nilStorage[h] = nilstorage
|
||||
}
|
||||
// Add some (identical) layers on top
|
||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage))
|
||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage), nil)
|
||||
for account := range accounts {
|
||||
it, _ := diffLayer.StorageIterator(account, common.Hash{})
|
||||
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
||||
@ -223,13 +223,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
|
||||
|
||||
// Verify the single and multi-layer iterators
|
||||
head := snaps.Snapshot(common.HexToHash("0x04"))
|
||||
@ -270,13 +270,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil)
|
||||
|
||||
// Verify the single and multi-layer iterators
|
||||
head := snaps.Snapshot(common.HexToHash("0x04"))
|
||||
@ -354,14 +354,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Assemble a stack of snapshots from the account layers
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil)
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil)
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil)
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil)
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil)
|
||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil)
|
||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil)
|
||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil, nil)
|
||||
|
||||
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
|
||||
head := snaps.Snapshot(common.HexToHash("0x09"))
|
||||
@ -453,14 +453,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Assemble a stack of snapshots from the account layers
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
|
||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
|
||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
|
||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a), nil)
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b), nil)
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c), nil)
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d), nil)
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil)
|
||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil)
|
||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g), nil)
|
||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h), nil)
|
||||
|
||||
it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
|
||||
head := snaps.Snapshot(common.HexToHash("0x09"))
|
||||
@ -523,7 +523,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i := 1; i < 128; i++ {
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil)
|
||||
}
|
||||
// Iterate the entire stack and ensure everything is hit only once
|
||||
head := snaps.Snapshot(common.HexToHash("0x80"))
|
||||
@ -568,13 +568,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
|
||||
}
|
||||
// Create a stack of diffs on top
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
|
||||
|
||||
// Create an iterator and flatten the data from underneath it
|
||||
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
|
||||
@ -599,13 +599,13 @@ func TestAccountIteratorSeek(t *testing.T) {
|
||||
},
|
||||
}
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
|
||||
|
||||
// Account set is now
|
||||
// 02: aa, ee, f0, ff
|
||||
@ -663,13 +663,13 @@ func TestStorageIteratorSeek(t *testing.T) {
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil)
|
||||
|
||||
// Account set is now
|
||||
// 02: 01, 03, 05
|
||||
@ -726,17 +726,17 @@ func TestAccountIteratorDeletions(t *testing.T) {
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||
nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
|
||||
nil, randomAccountSet("0x11", "0x22", "0x33"), nil, nil)
|
||||
|
||||
deleted := common.HexToHash("0x22")
|
||||
destructed := map[common.Hash]struct{}{
|
||||
deleted: {},
|
||||
}
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||
destructed, randomAccountSet("0x11", "0x33"), nil)
|
||||
destructed, randomAccountSet("0x11", "0x33"), nil, nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||
nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
|
||||
nil, randomAccountSet("0x33", "0x44", "0x55"), nil, nil)
|
||||
|
||||
// The output should be 11,33,44,55
|
||||
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
|
||||
@ -772,10 +772,10 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
||||
}
|
||||
// Stack three diff layers on top with various overlaps
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil)
|
||||
|
||||
// The output should be 02,04,05,06
|
||||
it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
|
||||
@ -791,7 +791,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
||||
destructed := map[common.Hash]struct{}{
|
||||
common.HexToHash("0xaa"): {},
|
||||
}
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
|
||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil, nil)
|
||||
|
||||
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
|
||||
verifyIterator(t, 0, it, verifyStorage)
|
||||
@ -799,7 +799,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
||||
|
||||
// Re-insert the slots of the same account
|
||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
|
||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil)
|
||||
|
||||
// The output should be 07,08,09
|
||||
it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
|
||||
@ -807,7 +807,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
||||
it.Release()
|
||||
|
||||
// Destruct the whole storage but re-create the account in the same layer
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
|
||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil), nil)
|
||||
it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
|
||||
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
|
||||
it.Release()
|
||||
@ -849,7 +849,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
|
||||
},
|
||||
}
|
||||
for i := 1; i <= 100; i++ {
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil)
|
||||
}
|
||||
// We call this once before the benchmark, so the creation of
|
||||
// sorted accountlists are not included in the results.
|
||||
@ -944,9 +944,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
|
||||
base.root: base,
|
||||
},
|
||||
}
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
|
||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil, nil)
|
||||
for i := 2; i <= 100; i++ {
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
|
||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil, nil)
|
||||
}
|
||||
// We call this once before the benchmark, so the creation of
|
||||
// sorted accountlists are not included in the results.
|
||||
|
@ -110,7 +110,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou
|
||||
// etc.), we just discard all diffs and try to recover them later.
|
||||
var current snapshot = base
|
||||
err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error {
|
||||
current = newDiffLayer(current, root, destructSet, accountData, storageData)
|
||||
current = newDiffLayer(current, root, destructSet, accountData, storageData, nil)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -100,6 +100,18 @@ type Snapshot interface {
|
||||
// Root returns the root hash for which this snapshot was made.
|
||||
Root() common.Hash
|
||||
|
||||
// WaitAndGetVerifyRes will wait until the snapshot been verified and return verification result
|
||||
WaitAndGetVerifyRes() bool
|
||||
|
||||
// Verified returns whether the snapshot is verified
|
||||
Verified() bool
|
||||
|
||||
// MarkValid stores the verification result
|
||||
MarkValid()
|
||||
|
||||
// CorrectAccounts updates account data for storing the correct data during pipecommit
|
||||
CorrectAccounts(map[common.Hash][]byte)
|
||||
|
||||
// Account directly retrieves the account associated with a particular hash in
|
||||
// the snapshot slim data format.
|
||||
Account(hash common.Hash) (*types.SlimAccount, error)
|
||||
@ -130,7 +142,7 @@ type snapshot interface {
|
||||
// the specified data items.
|
||||
//
|
||||
// Note, the maps are retained by the method to avoid copying everything.
|
||||
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
|
||||
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer
|
||||
|
||||
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
||||
// This is meant to be used during shutdown to persist the snapshot without
|
||||
@ -355,7 +367,7 @@ func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
|
||||
|
||||
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
||||
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
||||
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) error {
|
||||
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
||||
// special case that can only happen for Clique networks where empty blocks
|
||||
// don't modify the state (0 block subsidy).
|
||||
@ -370,7 +382,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m
|
||||
if parent == nil {
|
||||
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
|
||||
}
|
||||
snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
|
||||
snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage, verified)
|
||||
|
||||
// Save the new snapshot for later
|
||||
t.lock.Lock()
|
||||
@ -696,6 +708,11 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
||||
if snap == nil {
|
||||
return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
|
||||
}
|
||||
// Wait the snapshot(difflayer) is verified, it means the account data also been refreshed with the correct data
|
||||
if !snap.WaitAndGetVerifyRes() {
|
||||
return common.Hash{}, ErrSnapshotStale
|
||||
}
|
||||
|
||||
// Run the journaling
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
@ -107,7 +107,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
|
||||
accounts := map[common.Hash][]byte{
|
||||
common.HexToHash("0xa1"): randomAccount(),
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if n := len(snaps.layers); n != 2 {
|
||||
@ -151,10 +151,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
||||
accounts := map[common.Hash][]byte{
|
||||
common.HexToHash("0xa1"): randomAccount(),
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if n := len(snaps.layers); n != 3 {
|
||||
@ -203,13 +203,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
||||
accounts := map[common.Hash][]byte{
|
||||
common.HexToHash("0xa1"): randomAccount(),
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
|
||||
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil, nil); err != nil {
|
||||
t.Fatalf("failed to create a diff layer: %v", err)
|
||||
}
|
||||
if n := len(snaps.layers); n != 4 {
|
||||
@ -263,12 +263,12 @@ func TestPostCapBasicDataAccess(t *testing.T) {
|
||||
},
|
||||
}
|
||||
// The lowest difflayer
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
|
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
|
||||
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil)
|
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil)
|
||||
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil, nil)
|
||||
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
|
||||
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil)
|
||||
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil, nil)
|
||||
|
||||
// checkExist verifies if an account exists in a snapshot
|
||||
checkExist := func(layer *diffLayer, key string) error {
|
||||
@ -363,7 +363,7 @@ func TestSnaphots(t *testing.T) {
|
||||
)
|
||||
for i := 0; i < 129; i++ {
|
||||
head = makeRoot(uint64(i + 2))
|
||||
snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil)
|
||||
snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil, nil)
|
||||
last = head
|
||||
snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
|
||||
}
|
||||
@ -456,9 +456,9 @@ func TestReadStateDuringFlattening(t *testing.T) {
|
||||
},
|
||||
}
|
||||
// 4 layers in total, 3 diff layers and 1 disk layers
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
|
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
|
||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil)
|
||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil)
|
||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil)
|
||||
|
||||
// Obtain the topmost snapshot handler for state accessing
|
||||
snap := snaps.Snapshot(common.HexToHash("0xa3"))
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||
"github.com/ethereum/go-ethereum/trie/triestate"
|
||||
@ -81,6 +82,7 @@ type StateDB struct {
|
||||
stateRoot common.Hash // The calculation result of IntermediateRoot
|
||||
|
||||
fullProcessed bool
|
||||
pipeCommit bool
|
||||
|
||||
// These maps hold the state changes (including the corresponding
|
||||
// original value) that occurred in this **block**.
|
||||
@ -195,7 +197,8 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
|
||||
}
|
||||
|
||||
tr, err := db.OpenTrie(root)
|
||||
if err != nil {
|
||||
// return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification
|
||||
if err != nil && (sdb.snap == nil || sdb.snap.Verified()) {
|
||||
return nil, err
|
||||
}
|
||||
_, sdb.noTrie = tr.(*trie.EmptyTrie)
|
||||
@ -297,6 +300,20 @@ func (s *StateDB) SetExpectedStateRoot(root common.Hash) {
|
||||
s.expectedRoot = root
|
||||
}
|
||||
|
||||
// Enable the pipeline commit function of statedb
|
||||
func (s *StateDB) EnablePipeCommit() {
|
||||
if s.snap != nil && s.snaps.Layers() > 1 {
|
||||
// after big merge, disable pipeCommit for now,
|
||||
// because `s.db.TrieDB().Update` should be called after `s.trie.Commit(true)`
|
||||
s.pipeCommit = false
|
||||
}
|
||||
}
|
||||
|
||||
// IsPipeCommit checks whether pipecommit is enabled on the statedb or not
|
||||
func (s *StateDB) IsPipeCommit() bool {
|
||||
return s.pipeCommit
|
||||
}
|
||||
|
||||
// Mark that the block is full processed
|
||||
func (s *StateDB) MarkFullProcessed() {
|
||||
s.fullProcessed = true
|
||||
@ -318,6 +335,22 @@ func (s *StateDB) Error() error {
|
||||
return s.dbErr
|
||||
}
|
||||
|
||||
// Not thread safe
|
||||
func (s *StateDB) Trie() (Trie, error) {
|
||||
if s.trie == nil {
|
||||
err := s.WaitPipeVerification()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr, err := s.db.OpenTrie(s.originalRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.trie = tr
|
||||
}
|
||||
return s.trie, nil
|
||||
}
|
||||
|
||||
func (s *StateDB) AddLog(log *types.Log) {
|
||||
s.journal.append(addLogChange{txhash: s.thash})
|
||||
|
||||
@ -835,6 +868,7 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB {
|
||||
// stateRoot: s.stateRoot,
|
||||
originalRoot: s.originalRoot,
|
||||
// fullProcessed: s.fullProcessed,
|
||||
// pipeCommit: s.pipeCommit,
|
||||
accounts: make(map[common.Hash][]byte),
|
||||
storages: make(map[common.Hash]map[common.Hash][]byte),
|
||||
accountsOrigin: make(map[common.Address][]byte),
|
||||
@ -965,6 +999,17 @@ func (s *StateDB) GetRefund() uint64 {
|
||||
return s.refund
|
||||
}
|
||||
|
||||
// WaitPipeVerification waits until the snapshot been verified
|
||||
func (s *StateDB) WaitPipeVerification() error {
|
||||
// Need to wait for the parent trie to commit
|
||||
if s.snap != nil {
|
||||
if valid := s.snap.WaitAndGetVerifyRes(); !valid {
|
||||
return errors.New("verification on parent snap failed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finalise finalises the state by removing the destructed objects and clears
|
||||
// the journal as well as the refunds. Finalise, however, will not push any updates
|
||||
// into the tries just yet. Only IntermediateRoot or Commit will do that.
|
||||
@ -1011,7 +1056,11 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
||||
}
|
||||
prefetcher := s.prefetcher
|
||||
if prefetcher != nil && len(addressesToPrefetch) > 0 {
|
||||
if s.snap.Verified() {
|
||||
prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch)
|
||||
} else if prefetcher.rootParent != (common.Hash{}) {
|
||||
prefetcher.prefetch(common.Hash{}, prefetcher.rootParent, common.Address{}, addressesToPrefetch)
|
||||
}
|
||||
}
|
||||
// Invalidate journal because reverting across transactions is not allowed.
|
||||
s.clearJournalAndRefund()
|
||||
@ -1027,6 +1076,76 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
||||
return s.StateIntermediateRoot()
|
||||
}
|
||||
|
||||
// CorrectAccountsRoot will fix account roots in pipecommit mode
|
||||
func (s *StateDB) CorrectAccountsRoot(blockRoot common.Hash) {
|
||||
var snapshot snapshot.Snapshot
|
||||
if blockRoot == (common.Hash{}) {
|
||||
snapshot = s.snap
|
||||
} else if s.snaps != nil {
|
||||
snapshot = s.snaps.Snapshot(blockRoot)
|
||||
}
|
||||
|
||||
if snapshot == nil {
|
||||
return
|
||||
}
|
||||
if accounts, err := snapshot.Accounts(); err == nil && accounts != nil {
|
||||
for _, obj := range s.stateObjects {
|
||||
if !obj.deleted {
|
||||
if account, exist := accounts[crypto.Keccak256Hash(obj.address[:])]; exist {
|
||||
if len(account.Root) == 0 {
|
||||
obj.data.Root = types.EmptyRootHash
|
||||
} else {
|
||||
obj.data.Root = common.BytesToHash(account.Root)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PopulateSnapAccountAndStorage tries to populate required accounts and storages for pipecommit
|
||||
func (s *StateDB) PopulateSnapAccountAndStorage() {
|
||||
for addr := range s.stateObjectsPending {
|
||||
if obj := s.stateObjects[addr]; !obj.deleted {
|
||||
if s.snap != nil {
|
||||
s.populateSnapStorage(obj)
|
||||
s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// populateSnapStorage tries to populate required storages for pipecommit, and returns a flag to indicate whether the storage root changed or not
|
||||
func (s *StateDB) populateSnapStorage(obj *stateObject) bool {
|
||||
for key, value := range obj.dirtyStorage {
|
||||
obj.pendingStorage[key] = value
|
||||
}
|
||||
if len(obj.pendingStorage) == 0 {
|
||||
return false
|
||||
}
|
||||
hasher := crypto.NewKeccakState()
|
||||
var storage map[common.Hash][]byte
|
||||
for key, value := range obj.pendingStorage {
|
||||
var v []byte
|
||||
if (value != common.Hash{}) {
|
||||
// Encoding []byte cannot fail, ok to ignore the error.
|
||||
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
||||
}
|
||||
// If state snapshotting is active, cache the data til commit
|
||||
if obj.db.snap != nil {
|
||||
if storage == nil {
|
||||
// Retrieve the old storage map, if available, create a new one otherwise
|
||||
if storage = obj.db.storages[obj.addrHash]; storage == nil {
|
||||
storage = make(map[common.Hash][]byte)
|
||||
obj.db.storages[obj.addrHash] = storage
|
||||
}
|
||||
}
|
||||
storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *StateDB) AccountsIntermediateRoot() {
|
||||
tasks := make(chan func())
|
||||
finishCh := make(chan struct{})
|
||||
@ -1363,7 +1482,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A
|
||||
//
|
||||
// The associated block number of the state transition is also provided
|
||||
// for more chain context.
|
||||
func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash, *types.DiffLayer, error) {
|
||||
func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFuncs ...func() error) (common.Hash, *types.DiffLayer, error) {
|
||||
// Short circuit in case any database failure occurred earlier.
|
||||
if s.dbErr != nil {
|
||||
s.StopPrefetcher()
|
||||
@ -1372,6 +1491,8 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash
|
||||
// Finalize any pending changes and merge everything into the tries
|
||||
var (
|
||||
diffLayer *types.DiffLayer
|
||||
verified chan struct{}
|
||||
snapUpdated chan struct{}
|
||||
incomplete map[common.Address]struct{}
|
||||
nodes = trienode.NewMergedNodeSet()
|
||||
)
|
||||
@ -1379,9 +1500,28 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash
|
||||
if s.snap != nil {
|
||||
diffLayer = &types.DiffLayer{}
|
||||
}
|
||||
if s.pipeCommit {
|
||||
// async commit the MPT
|
||||
verified = make(chan struct{})
|
||||
snapUpdated = make(chan struct{})
|
||||
}
|
||||
|
||||
commmitTrie := func() error {
|
||||
commitErr := func() error {
|
||||
if s.pipeCommit {
|
||||
<-snapUpdated
|
||||
// Due to state verification pipeline, the accounts roots are not updated, leading to the data in the difflayer is not correct, capture the correct data here
|
||||
s.AccountsIntermediateRoot()
|
||||
if parent := s.snap.Root(); parent != s.expectedRoot {
|
||||
accountData := make(map[common.Hash][]byte)
|
||||
for k, v := range s.accounts {
|
||||
accountData[crypto.Keccak256Hash(k[:])] = v
|
||||
}
|
||||
s.snaps.Snapshot(s.expectedRoot).CorrectAccounts(accountData)
|
||||
}
|
||||
s.snap = nil
|
||||
}
|
||||
|
||||
if s.stateRoot = s.StateIntermediateRoot(); s.fullProcessed && s.expectedRoot != s.stateRoot {
|
||||
log.Error("Invalid merkle root", "remote", s.expectedRoot, "local", s.stateRoot)
|
||||
return fmt.Errorf("invalid merkle root (remote: %x local: %x)", s.expectedRoot, s.stateRoot)
|
||||
@ -1489,8 +1629,8 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash
|
||||
}
|
||||
}
|
||||
|
||||
if postCommitFunc != nil {
|
||||
err := postCommitFunc()
|
||||
for _, postFunc := range postCommitFuncs {
|
||||
err := postFunc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1499,6 +1639,19 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash
|
||||
return nil
|
||||
}()
|
||||
|
||||
if s.pipeCommit {
|
||||
if commitErr == nil {
|
||||
s.snaps.Snapshot(s.stateRoot).MarkValid()
|
||||
close(verified)
|
||||
} else {
|
||||
// The blockchain will do the further rewind if write block not finish yet
|
||||
close(verified)
|
||||
if failPostCommitFunc != nil {
|
||||
failPostCommitFunc()
|
||||
}
|
||||
log.Error("state verification failed", "err", commitErr)
|
||||
}
|
||||
}
|
||||
return commitErr
|
||||
}
|
||||
|
||||
@ -1540,10 +1693,15 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
|
||||
}
|
||||
if s.pipeCommit {
|
||||
defer close(snapUpdated)
|
||||
// State verification pipeline - accounts root are not calculated here, just populate needed fields for process
|
||||
s.PopulateSnapAccountAndStorage()
|
||||
}
|
||||
diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer()
|
||||
// Only update if there's a state transition (skip empty Clique blocks)
|
||||
if parent := s.snap.Root(); parent != s.expectedRoot {
|
||||
err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages)
|
||||
err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, verified)
|
||||
|
||||
if err != nil {
|
||||
log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err)
|
||||
@ -1563,9 +1721,12 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
if s.pipeCommit {
|
||||
go commmitTrie()
|
||||
} else {
|
||||
defer s.StopPrefetcher()
|
||||
commitFuncs = append(commitFuncs, commmitTrie)
|
||||
}
|
||||
commitRes := make(chan error, len(commitFuncs))
|
||||
for _, f := range commitFuncs {
|
||||
// commitFuncs[0] and commitFuncs[1] both read map `stateObjects`, but no conflicts
|
||||
@ -1582,7 +1743,11 @@ func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash
|
||||
}
|
||||
|
||||
root := s.stateRoot
|
||||
if s.pipeCommit {
|
||||
root = s.expectedRoot
|
||||
} else {
|
||||
s.snap = nil
|
||||
}
|
||||
if root == (common.Hash{}) {
|
||||
root = types.EmptyRootHash
|
||||
}
|
||||
|
1
core/systemcontracts/bohr/rialto/StakeHubContract
Normal file
1
core/systemcontracts/bohr/rialto/StakeHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/bohr/rialto/ValidatorContract
Normal file
1
core/systemcontracts/bohr/rialto/ValidatorContract
Normal file
File diff suppressed because one or more lines are too long
@ -17,3 +17,11 @@ var (
|
||||
//go:embed chapel/StakeHubContract
|
||||
ChapelStakeHubContract string
|
||||
)
|
||||
|
||||
// contract codes for Rialto upgrade
|
||||
var (
|
||||
//go:embed rialto/ValidatorContract
|
||||
RialtoValidatorContract string
|
||||
//go:embed rialto/StakeHubContract
|
||||
RialtoStakeHubContract string
|
||||
)
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,168 +0,0 @@
|
||||
package pascal
|
||||
|
||||
import _ "embed"
|
||||
|
||||
// contract codes for Mainnet upgrade
|
||||
var (
|
||||
|
||||
//go:embed mainnet/ValidatorContract
|
||||
MainnetValidatorContract string
|
||||
|
||||
//go:embed mainnet/SlashContract
|
||||
MainnetSlashContract string
|
||||
|
||||
//go:embed mainnet/SystemRewardContract
|
||||
MainnetSystemRewardContract string
|
||||
|
||||
//go:embed mainnet/LightClientContract
|
||||
MainnetLightClientContract string
|
||||
|
||||
//go:embed mainnet/TokenHubContract
|
||||
MainnetTokenHubContract string
|
||||
|
||||
//go:embed mainnet/RelayerIncentivizeContract
|
||||
MainnetRelayerIncentivizeContract string
|
||||
|
||||
//go:embed mainnet/RelayerHubContract
|
||||
MainnetRelayerHubContract string
|
||||
|
||||
//go:embed mainnet/GovHubContract
|
||||
MainnetGovHubContract string
|
||||
|
||||
//go:embed mainnet/TokenManagerContract
|
||||
MainnetTokenManagerContract string
|
||||
|
||||
//go:embed mainnet/CrossChainContract
|
||||
MainnetCrossChainContract string
|
||||
|
||||
//go:embed mainnet/StakingContract
|
||||
MainnetStakingContract string
|
||||
|
||||
//go:embed mainnet/StakeHubContract
|
||||
MainnetStakeHubContract string
|
||||
|
||||
//go:embed mainnet/StakeCreditContract
|
||||
MainnetStakeCreditContract string
|
||||
|
||||
//go:embed mainnet/GovernorContract
|
||||
MainnetGovernorContract string
|
||||
|
||||
//go:embed mainnet/GovTokenContract
|
||||
MainnetGovTokenContract string
|
||||
|
||||
//go:embed mainnet/TimelockContract
|
||||
MainnetTimelockContract string
|
||||
|
||||
//go:embed mainnet/TokenRecoverPortalContract
|
||||
MainnetTokenRecoverPortalContract string
|
||||
)
|
||||
|
||||
// contract codes for Chapel upgrade
|
||||
var (
|
||||
|
||||
//go:embed chapel/ValidatorContract
|
||||
ChapelValidatorContract string
|
||||
|
||||
//go:embed chapel/SlashContract
|
||||
ChapelSlashContract string
|
||||
|
||||
//go:embed chapel/SystemRewardContract
|
||||
ChapelSystemRewardContract string
|
||||
|
||||
//go:embed chapel/LightClientContract
|
||||
ChapelLightClientContract string
|
||||
|
||||
//go:embed chapel/TokenHubContract
|
||||
ChapelTokenHubContract string
|
||||
|
||||
//go:embed chapel/RelayerIncentivizeContract
|
||||
ChapelRelayerIncentivizeContract string
|
||||
|
||||
//go:embed chapel/RelayerHubContract
|
||||
ChapelRelayerHubContract string
|
||||
|
||||
//go:embed chapel/GovHubContract
|
||||
ChapelGovHubContract string
|
||||
|
||||
//go:embed chapel/TokenManagerContract
|
||||
ChapelTokenManagerContract string
|
||||
|
||||
//go:embed chapel/CrossChainContract
|
||||
ChapelCrossChainContract string
|
||||
|
||||
//go:embed chapel/StakingContract
|
||||
ChapelStakingContract string
|
||||
|
||||
//go:embed chapel/StakeHubContract
|
||||
ChapelStakeHubContract string
|
||||
|
||||
//go:embed chapel/StakeCreditContract
|
||||
ChapelStakeCreditContract string
|
||||
|
||||
//go:embed chapel/GovernorContract
|
||||
ChapelGovernorContract string
|
||||
|
||||
//go:embed chapel/GovTokenContract
|
||||
ChapelGovTokenContract string
|
||||
|
||||
//go:embed chapel/TimelockContract
|
||||
ChapelTimelockContract string
|
||||
|
||||
//go:embed chapel/TokenRecoverPortalContract
|
||||
ChapelTokenRecoverPortalContract string
|
||||
)
|
||||
|
||||
// contract codes for Rialto upgrade
|
||||
var (
|
||||
|
||||
//go:embed rialto/ValidatorContract
|
||||
RialtoValidatorContract string
|
||||
|
||||
//go:embed rialto/SlashContract
|
||||
RialtoSlashContract string
|
||||
|
||||
//go:embed rialto/SystemRewardContract
|
||||
RialtoSystemRewardContract string
|
||||
|
||||
//go:embed rialto/LightClientContract
|
||||
RialtoLightClientContract string
|
||||
|
||||
//go:embed rialto/TokenHubContract
|
||||
RialtoTokenHubContract string
|
||||
|
||||
//go:embed rialto/RelayerIncentivizeContract
|
||||
RialtoRelayerIncentivizeContract string
|
||||
|
||||
//go:embed rialto/RelayerHubContract
|
||||
RialtoRelayerHubContract string
|
||||
|
||||
//go:embed rialto/GovHubContract
|
||||
RialtoGovHubContract string
|
||||
|
||||
//go:embed rialto/TokenManagerContract
|
||||
RialtoTokenManagerContract string
|
||||
|
||||
//go:embed rialto/CrossChainContract
|
||||
RialtoCrossChainContract string
|
||||
|
||||
//go:embed rialto/StakingContract
|
||||
RialtoStakingContract string
|
||||
|
||||
//go:embed rialto/StakeHubContract
|
||||
RialtoStakeHubContract string
|
||||
|
||||
//go:embed rialto/StakeCreditContract
|
||||
RialtoStakeCreditContract string
|
||||
|
||||
//go:embed rialto/GovernorContract
|
||||
RialtoGovernorContract string
|
||||
|
||||
//go:embed rialto/GovTokenContract
|
||||
RialtoGovTokenContract string
|
||||
|
||||
//go:embed rialto/TimelockContract
|
||||
RialtoTimelockContract string
|
||||
|
||||
//go:embed rialto/TokenRecoverPortalContract
|
||||
RialtoTokenRecoverPortalContract string
|
||||
)
|
@ -20,7 +20,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/systemcontracts/mirror"
|
||||
"github.com/ethereum/go-ethereum/core/systemcontracts/moran"
|
||||
"github.com/ethereum/go-ethereum/core/systemcontracts/niels"
|
||||
"github.com/ethereum/go-ethereum/core/systemcontracts/pascal"
|
||||
"github.com/ethereum/go-ethereum/core/systemcontracts/planck"
|
||||
"github.com/ethereum/go-ethereum/core/systemcontracts/plato"
|
||||
"github.com/ethereum/go-ethereum/core/systemcontracts/ramanujan"
|
||||
@ -83,8 +82,6 @@ var (
|
||||
haberFixUpgrade = make(map[string]*Upgrade)
|
||||
|
||||
bohrUpgrade = make(map[string]*Upgrade)
|
||||
|
||||
pascalUpgrade = make(map[string]*Upgrade)
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -776,275 +773,18 @@ func init() {
|
||||
},
|
||||
}
|
||||
|
||||
pascalUpgrade[mainNet] = &Upgrade{
|
||||
UpgradeName: "pascal",
|
||||
bohrUpgrade[rialtoNet] = &Upgrade{
|
||||
UpgradeName: "bohr",
|
||||
Configs: []*UpgradeConfig{
|
||||
{
|
||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetValidatorContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(SlashContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetSlashContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(SystemRewardContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetSystemRewardContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(LightClientContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetLightClientContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetTokenHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(RelayerIncentivizeContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetRelayerIncentivizeContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(RelayerHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetRelayerHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetGovHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenManagerContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetTokenManagerContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(CrossChainContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetCrossChainContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakingContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetStakingContract,
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
||||
Code: bohr.RialtoValidatorContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakeHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetStakeHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakeCreditContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetStakeCreditContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovernorContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetGovernorContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovTokenContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetGovTokenContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TimelockContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetTimelockContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenRecoverPortalContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.MainnetTokenRecoverPortalContract,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pascalUpgrade[chapelNet] = &Upgrade{
|
||||
UpgradeName: "pascal",
|
||||
Configs: []*UpgradeConfig{
|
||||
{
|
||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelValidatorContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(SlashContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelSlashContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(SystemRewardContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelSystemRewardContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(LightClientContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelLightClientContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelTokenHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(RelayerIncentivizeContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelRelayerIncentivizeContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(RelayerHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelRelayerHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelGovHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenManagerContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelTokenManagerContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(CrossChainContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelCrossChainContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakingContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelStakingContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakeHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelStakeHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakeCreditContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelStakeCreditContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovernorContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelGovernorContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovTokenContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelGovTokenContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TimelockContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelTimelockContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenRecoverPortalContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.ChapelTokenRecoverPortalContract,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pascalUpgrade[rialtoNet] = &Upgrade{
|
||||
UpgradeName: "pascal",
|
||||
Configs: []*UpgradeConfig{
|
||||
{
|
||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoValidatorContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(SlashContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoSlashContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(SystemRewardContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoSystemRewardContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(LightClientContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoLightClientContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoTokenHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(RelayerIncentivizeContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoRelayerIncentivizeContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(RelayerHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoRelayerHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoGovHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenManagerContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoTokenManagerContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(CrossChainContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoCrossChainContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakingContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoStakingContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakeHubContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoStakeHubContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(StakeCreditContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoStakeCreditContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovernorContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoGovernorContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(GovTokenContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoGovTokenContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TimelockContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoTimelockContract,
|
||||
},
|
||||
{
|
||||
ContractAddr: common.HexToAddress(TokenRecoverPortalContract),
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||
Code: pascal.RialtoTokenRecoverPortalContract,
|
||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
||||
Code: bohr.RialtoStakeHubContract,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -1133,10 +873,6 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I
|
||||
applySystemContractUpgrade(bohrUpgrade[network], blockNumber, statedb, logger)
|
||||
}
|
||||
|
||||
if config.IsOnPascal(blockNumber, lastBlockTime, blockTime) {
|
||||
applySystemContractUpgrade(pascalUpgrade[network], blockNumber, statedb, logger)
|
||||
}
|
||||
|
||||
/*
|
||||
apply other upgrades
|
||||
*/
|
||||
|
@ -19,7 +19,6 @@ package legacypool
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
@ -104,7 +103,6 @@ var (
|
||||
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
|
||||
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
|
||||
slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
|
||||
OverflowPoolGauge = metrics.NewRegisteredGauge("txpool/overflowpool", nil)
|
||||
|
||||
reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
|
||||
)
|
||||
@ -139,7 +137,6 @@ type Config struct {
|
||||
GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
|
||||
AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
|
||||
GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
|
||||
OverflowPoolSlots uint64 // Maximum number of transaction slots in overflow pool
|
||||
|
||||
Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
|
||||
ReannounceTime time.Duration // Duration for announcing local pending transactions again
|
||||
@ -157,7 +154,6 @@ var DefaultConfig = Config{
|
||||
GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
|
||||
AccountQueue: 64,
|
||||
GlobalQueue: 1024,
|
||||
OverflowPoolSlots: 0,
|
||||
|
||||
Lifetime: 3 * time.Hour,
|
||||
ReannounceTime: 10 * 365 * 24 * time.Hour,
|
||||
@ -239,8 +235,6 @@ type LegacyPool struct {
|
||||
all *lookup // All transactions to allow lookups
|
||||
priced *pricedList // All transactions sorted by price
|
||||
|
||||
localBufferPool *TxOverflowPool // Local buffer transactions
|
||||
|
||||
reqResetCh chan *txpoolResetRequest
|
||||
reqPromoteCh chan *accountSet
|
||||
queueTxEventCh chan *types.Transaction
|
||||
@ -278,7 +272,6 @@ func New(config Config, chain BlockChain) *LegacyPool {
|
||||
reorgDoneCh: make(chan chan struct{}),
|
||||
reorgShutdownCh: make(chan struct{}),
|
||||
initDoneCh: make(chan struct{}),
|
||||
localBufferPool: NewTxOverflowPoolHeap(config.OverflowPoolSlots),
|
||||
}
|
||||
pool.locals = newAccountSet(pool.signer)
|
||||
for _, addr := range config.Locals {
|
||||
@ -415,6 +408,7 @@ func (pool *LegacyPool) loop() {
|
||||
if !pool.locals.contains(addr) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, tx := range list.Flatten() {
|
||||
// Default ReannounceTime is 10 years, won't announce by default.
|
||||
if time.Since(tx.Time()) < pool.config.ReannounceTime {
|
||||
@ -523,17 +517,6 @@ func (pool *LegacyPool) Stats() (int, int) {
|
||||
return pool.stats()
|
||||
}
|
||||
|
||||
func (pool *LegacyPool) statsOverflowPool() int {
|
||||
pool.mu.RLock()
|
||||
defer pool.mu.RUnlock()
|
||||
|
||||
if pool.localBufferPool == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return pool.localBufferPool.Size()
|
||||
}
|
||||
|
||||
// stats retrieves the current pool stats, namely the number of pending and the
|
||||
// number of queued (non-executable) transactions.
|
||||
func (pool *LegacyPool) stats() (int, int) {
|
||||
@ -848,8 +831,6 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
|
||||
}
|
||||
}
|
||||
|
||||
pool.addToOverflowPool(drop, isLocal)
|
||||
|
||||
// Kick out the underpriced remote transactions.
|
||||
for _, tx := range drop {
|
||||
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
|
||||
@ -906,29 +887,6 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
|
||||
return replaced, nil
|
||||
}
|
||||
|
||||
func (pool *LegacyPool) addToOverflowPool(drop types.Transactions, isLocal bool) {
|
||||
// calculate total number of slots in drop. Accordingly add them to OverflowPool (if there is space)
|
||||
availableSlotsOverflowPool := pool.availableSlotsOverflowPool()
|
||||
if availableSlotsOverflowPool > 0 {
|
||||
// transfer availableSlotsOverflowPool number of transactions slots from drop to OverflowPool
|
||||
currentSlotsUsed := 0
|
||||
for i, tx := range drop {
|
||||
txSlots := numSlots(tx)
|
||||
if currentSlotsUsed+txSlots <= availableSlotsOverflowPool {
|
||||
from, _ := types.Sender(pool.signer, tx)
|
||||
pool.localBufferPool.Add(tx)
|
||||
log.Debug("adding to OverflowPool", "transaction", tx.Hash().String(), "from", from.String())
|
||||
currentSlotsUsed += txSlots
|
||||
} else {
|
||||
log.Debug("not all got added to OverflowPool", "totalAdded", i+1)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Debug("adding to OverflowPool unsuccessful", "availableSlotsOverflowPool", availableSlotsOverflowPool)
|
||||
}
|
||||
}
|
||||
|
||||
// isGapped reports whether the given transaction is immediately executable.
|
||||
func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool {
|
||||
// Short circuit if transaction falls within the scope of the pending list
|
||||
@ -1375,6 +1333,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
|
||||
reorgDurationTimer.Update(time.Since(t0))
|
||||
}(time.Now())
|
||||
defer close(done)
|
||||
|
||||
var promoteAddrs []common.Address
|
||||
if dirtyAccounts != nil && reset == nil {
|
||||
// Only dirty accounts need to be promoted, unless we're resetting.
|
||||
@ -1432,9 +1391,6 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
|
||||
pool.changesSinceReorg = 0 // Reset change counter
|
||||
pool.mu.Unlock()
|
||||
|
||||
// Transfer transactions from OverflowPool to MainPool for new block import
|
||||
pool.transferTransactions()
|
||||
|
||||
// Notify subsystems for newly added transactions
|
||||
for _, tx := range promoted {
|
||||
addr, _ := types.Sender(pool.signer, tx)
|
||||
@ -2082,50 +2038,3 @@ func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
|
||||
func numSlots(tx *types.Transaction) int {
|
||||
return int((tx.Size() + txSlotSize - 1) / txSlotSize)
|
||||
}
|
||||
|
||||
// transferTransactions moves transactions from OverflowPool to MainPool
|
||||
func (pool *LegacyPool) transferTransactions() {
|
||||
// Fail fast if the overflow pool is empty
|
||||
if pool.localBufferPool.Size() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
maxMainPoolSize := int(pool.config.GlobalSlots + pool.config.GlobalQueue)
|
||||
// Use pool.all.Slots() to get the total slots used by all transactions
|
||||
currentMainPoolSize := pool.all.Slots()
|
||||
if currentMainPoolSize >= maxMainPoolSize {
|
||||
return
|
||||
}
|
||||
|
||||
extraSlots := maxMainPoolSize - currentMainPoolSize
|
||||
extraTransactions := (extraSlots + 3) / 4 // Since a transaction can take up to 4 slots
|
||||
log.Debug("Will attempt to transfer from OverflowPool to MainPool", "transactions", extraTransactions)
|
||||
txs := pool.localBufferPool.Flush(extraTransactions)
|
||||
if len(txs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
pool.Add(txs, true, false)
|
||||
}
|
||||
|
||||
func (pool *LegacyPool) availableSlotsOverflowPool() int {
|
||||
maxOverflowPoolSize := int(pool.config.OverflowPoolSlots)
|
||||
availableSlots := maxOverflowPoolSize - pool.localBufferPool.Size()
|
||||
if availableSlots > 0 {
|
||||
return availableSlots
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (pool *LegacyPool) PrintTxStats() {
|
||||
for _, l := range pool.pending {
|
||||
for _, transaction := range l.txs.items {
|
||||
from, _ := types.Sender(pool.signer, transaction)
|
||||
fmt.Println("from: ", from, " Pending:", transaction.Hash().String(), transaction.GasFeeCap(), transaction.GasTipCap())
|
||||
}
|
||||
}
|
||||
|
||||
pool.localBufferPool.PrintTxStats()
|
||||
fmt.Println("length of all: ", pool.all.Slots())
|
||||
fmt.Println("----------------------------------------------------")
|
||||
}
|
||||
|
@ -40,7 +40,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -1740,7 +1739,6 @@ func TestRepricingKeepsLocals(t *testing.T) {
|
||||
// Note, local transactions are never allowed to be dropped.
|
||||
func TestUnderpricing(t *testing.T) {
|
||||
t.Parallel()
|
||||
testTxPoolConfig.OverflowPoolSlots = 5
|
||||
|
||||
// Create the pool to test the pricing enforcement with
|
||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||
@ -1933,8 +1931,6 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
||||
pool.config.GlobalSlots = 2
|
||||
pool.config.GlobalQueue = 2
|
||||
|
||||
pool.config.OverflowPoolSlots = 0
|
||||
|
||||
// Keep track of transaction events to ensure all executables get announced
|
||||
events := make(chan core.NewTxsEvent, 32)
|
||||
sub := pool.txFeed.Subscribe(events)
|
||||
@ -1959,6 +1955,7 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
||||
// Import the batch and that both pending and queued transactions match up
|
||||
pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1
|
||||
pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
|
||||
|
||||
pending, queued := pool.Stats()
|
||||
if pending != 3 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
|
||||
@ -1998,9 +1995,9 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
||||
}
|
||||
if queued != 2 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
|
||||
}
|
||||
if err := validateEvents(events, 2); err != nil { // todo make it 4...After this validateEvents the pending becomes 3?!
|
||||
if err := validateEvents(events, 2); err != nil {
|
||||
t.Fatalf("additional event firing failed: %v", err)
|
||||
}
|
||||
if err := validatePoolInternals(pool); err != nil {
|
||||
@ -2015,12 +2012,11 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
||||
if err := pool.addLocal(ltx); err != nil {
|
||||
t.Fatalf("failed to add new underpriced local transaction: %v", err)
|
||||
}
|
||||
|
||||
pending, queued = pool.Stats()
|
||||
if pending != 3 { // 3
|
||||
if pending != 3 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
|
||||
}
|
||||
if queued != 1 { // 1
|
||||
if queued != 1 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
|
||||
}
|
||||
if err := validateEvents(events, 2); err != nil {
|
||||
@ -2036,51 +2032,41 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
||||
func TestDualHeapEviction(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testTxPoolConfig.OverflowPoolSlots = 1
|
||||
pool, _ := setupPoolWithConfig(eip1559Config)
|
||||
defer pool.Close()
|
||||
|
||||
pool.config.GlobalSlots = 2
|
||||
pool.config.GlobalQueue = 2
|
||||
pool.config.OverflowPoolSlots = 1
|
||||
pool.config.GlobalSlots = 10
|
||||
pool.config.GlobalQueue = 10
|
||||
|
||||
var (
|
||||
highTip, highCap *types.Transaction
|
||||
baseFee int
|
||||
highCapValue int64
|
||||
highTipValue int64
|
||||
)
|
||||
|
||||
check := func(tx *types.Transaction, name string) {
|
||||
if pool.all.GetRemote(tx.Hash()) == nil {
|
||||
t.Fatalf("highest %s transaction evicted from the pool, gasTip: %s, gasFeeCap: %s, hash: %s", name, highTip.GasTipCap().String(), highCap.GasFeeCap().String(), tx.Hash().String())
|
||||
t.Fatalf("highest %s transaction evicted from the pool", name)
|
||||
}
|
||||
}
|
||||
|
||||
add := func(urgent bool) {
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := 0; i < 20; i++ {
|
||||
var tx *types.Transaction
|
||||
// Create a test accounts and fund it
|
||||
key, _ := crypto.GenerateKey()
|
||||
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000))
|
||||
if urgent {
|
||||
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key)
|
||||
if int64(1+i) > highTipValue || (int64(1+i) == highTipValue && int64(baseFee+1+i) > highTip.GasFeeCap().Int64()) {
|
||||
highTipValue = int64(1 + i)
|
||||
highTip = tx
|
||||
}
|
||||
} else {
|
||||
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
|
||||
if int64(baseFee+200+i) > highCapValue {
|
||||
highCapValue = int64(baseFee + 200 + i)
|
||||
highCap = tx
|
||||
}
|
||||
}
|
||||
pool.addRemotesSync([]*types.Transaction{tx})
|
||||
}
|
||||
pending, queued := pool.Stats()
|
||||
if pending+queued != 4 {
|
||||
t.Fatalf("transaction count mismatch: have %d, want %d, pending %d, queued %d, OverflowPool %d", pending+queued, 5, pending, queued, pool.localBufferPool.Size())
|
||||
if pending+queued != 20 {
|
||||
t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10)
|
||||
}
|
||||
}
|
||||
|
||||
@ -2245,50 +2231,6 @@ func TestReplacement(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransferTransactions(t *testing.T) {
|
||||
t.Parallel()
|
||||
testTxPoolConfig.OverflowPoolSlots = 1
|
||||
pool, _ := setupPoolWithConfig(eip1559Config)
|
||||
defer pool.Close()
|
||||
|
||||
pool.config.GlobalSlots = 1
|
||||
pool.config.GlobalQueue = 2
|
||||
|
||||
// Create a number of test accounts and fund them
|
||||
keys := make([]*ecdsa.PrivateKey, 5)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
|
||||
}
|
||||
|
||||
tx := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0])
|
||||
from, _ := types.Sender(pool.signer, tx)
|
||||
pool.addToOverflowPool([]*types.Transaction{tx}, true)
|
||||
pending, queue := pool.Stats()
|
||||
|
||||
assert.Equal(t, 0, pending, "pending transactions mismatched")
|
||||
assert.Equal(t, 0, queue, "queued transactions mismatched")
|
||||
assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected")
|
||||
|
||||
tx2 := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1])
|
||||
pool.addToOverflowPool([]*types.Transaction{tx2}, true)
|
||||
assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected")
|
||||
<-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
|
||||
pending, queue = pool.Stats()
|
||||
|
||||
assert.Equal(t, 0, pending, "pending transactions mismatched")
|
||||
assert.Equal(t, 1, queue, "queued transactions mismatched")
|
||||
assert.Equal(t, 0, pool.statsOverflowPool(), "OverflowPool size unexpected")
|
||||
|
||||
tx3 := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[2])
|
||||
pool.addToOverflowPool([]*types.Transaction{tx3}, true)
|
||||
pending, queue = pool.Stats()
|
||||
|
||||
assert.Equal(t, 1, pending, "pending transactions mismatched")
|
||||
assert.Equal(t, 0, queue, "queued transactions mismatched")
|
||||
assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected")
|
||||
}
|
||||
|
||||
// Tests that the pool rejects replacement dynamic fee transactions that don't
|
||||
// meet the minimum price bump required.
|
||||
func TestReplacementDynamicFee(t *testing.T) {
|
||||
|
@ -1,171 +0,0 @@
|
||||
package legacypool
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// txHeapItem implements the Interface interface (https://pkg.go.dev/container/heap#Interface) of heap so that it can be heapified
|
||||
type txHeapItem struct {
|
||||
tx *types.Transaction
|
||||
timestamp int64 // Unix timestamp (nanoseconds) of when the transaction was added
|
||||
index int
|
||||
}
|
||||
|
||||
type txHeap []*txHeapItem
|
||||
|
||||
func (h txHeap) Len() int { return len(h) }
|
||||
func (h txHeap) Less(i, j int) bool {
|
||||
return h[i].timestamp < h[j].timestamp
|
||||
}
|
||||
func (h txHeap) Swap(i, j int) {
|
||||
if i < 0 || j < 0 || i >= len(h) || j >= len(h) {
|
||||
return // Silently fail if indices are out of bounds
|
||||
}
|
||||
h[i], h[j] = h[j], h[i]
|
||||
if h[i] != nil {
|
||||
h[i].index = i
|
||||
}
|
||||
if h[j] != nil {
|
||||
h[j].index = j
|
||||
}
|
||||
}
|
||||
|
||||
func (h *txHeap) Push(x interface{}) {
|
||||
item, ok := x.(*txHeapItem)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
n := len(*h)
|
||||
item.index = n
|
||||
*h = append(*h, item)
|
||||
}
|
||||
|
||||
func (h *txHeap) Pop() interface{} {
|
||||
old := *h
|
||||
n := len(old)
|
||||
if n == 0 {
|
||||
return nil // Return nil if the heap is empty
|
||||
}
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak
|
||||
*h = old[0 : n-1]
|
||||
if item != nil {
|
||||
item.index = -1 // for safety
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
type TxOverflowPool struct {
|
||||
txHeap txHeap
|
||||
index map[common.Hash]*txHeapItem
|
||||
mu sync.RWMutex
|
||||
maxSize uint64
|
||||
totalSize int
|
||||
}
|
||||
|
||||
func NewTxOverflowPoolHeap(estimatedMaxSize uint64) *TxOverflowPool {
|
||||
return &TxOverflowPool{
|
||||
txHeap: make(txHeap, 0, estimatedMaxSize),
|
||||
index: make(map[common.Hash]*txHeapItem, estimatedMaxSize),
|
||||
maxSize: estimatedMaxSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TxOverflowPool) Add(tx *types.Transaction) {
|
||||
tp.mu.Lock()
|
||||
defer tp.mu.Unlock()
|
||||
|
||||
if _, exists := tp.index[tx.Hash()]; exists {
|
||||
// Transaction already in pool, ignore
|
||||
return
|
||||
}
|
||||
|
||||
if uint64(len(tp.txHeap)) >= tp.maxSize {
|
||||
// Remove the oldest transaction to make space
|
||||
oldestItem, ok := heap.Pop(&tp.txHeap).(*txHeapItem)
|
||||
if !ok || oldestItem == nil {
|
||||
return
|
||||
}
|
||||
delete(tp.index, oldestItem.tx.Hash())
|
||||
tp.totalSize -= numSlots(oldestItem.tx)
|
||||
OverflowPoolGauge.Dec(1)
|
||||
}
|
||||
|
||||
item := &txHeapItem{
|
||||
tx: tx,
|
||||
timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
heap.Push(&tp.txHeap, item)
|
||||
tp.index[tx.Hash()] = item
|
||||
tp.totalSize += numSlots(tx)
|
||||
OverflowPoolGauge.Inc(1)
|
||||
}
|
||||
|
||||
func (tp *TxOverflowPool) Get(hash common.Hash) (*types.Transaction, bool) {
|
||||
tp.mu.RLock()
|
||||
defer tp.mu.RUnlock()
|
||||
if item, ok := tp.index[hash]; ok {
|
||||
return item.tx, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (tp *TxOverflowPool) Remove(hash common.Hash) {
|
||||
tp.mu.Lock()
|
||||
defer tp.mu.Unlock()
|
||||
if item, ok := tp.index[hash]; ok {
|
||||
heap.Remove(&tp.txHeap, item.index)
|
||||
delete(tp.index, hash)
|
||||
tp.totalSize -= numSlots(item.tx)
|
||||
OverflowPoolGauge.Dec(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TxOverflowPool) Flush(n int) []*types.Transaction {
|
||||
tp.mu.Lock()
|
||||
defer tp.mu.Unlock()
|
||||
if n > tp.txHeap.Len() {
|
||||
n = tp.txHeap.Len()
|
||||
}
|
||||
txs := make([]*types.Transaction, n)
|
||||
for i := 0; i < n; i++ {
|
||||
item, ok := heap.Pop(&tp.txHeap).(*txHeapItem)
|
||||
if !ok || item == nil {
|
||||
continue
|
||||
}
|
||||
txs[i] = item.tx
|
||||
delete(tp.index, item.tx.Hash())
|
||||
tp.totalSize -= numSlots(item.tx)
|
||||
}
|
||||
|
||||
OverflowPoolGauge.Dec(int64(n))
|
||||
return txs
|
||||
}
|
||||
|
||||
func (tp *TxOverflowPool) Len() int {
|
||||
tp.mu.RLock()
|
||||
defer tp.mu.RUnlock()
|
||||
return tp.txHeap.Len()
|
||||
}
|
||||
|
||||
func (tp *TxOverflowPool) Size() int {
|
||||
tp.mu.RLock()
|
||||
defer tp.mu.RUnlock()
|
||||
return tp.totalSize
|
||||
}
|
||||
|
||||
func (tp *TxOverflowPool) PrintTxStats() {
|
||||
tp.mu.RLock()
|
||||
defer tp.mu.RUnlock()
|
||||
for _, item := range tp.txHeap {
|
||||
tx := item.tx
|
||||
fmt.Printf("Hash: %s, Timestamp: %d, GasFeeCap: %s, GasTipCap: %s\n",
|
||||
tx.Hash().String(), item.timestamp, tx.GasFeeCap().String(), tx.GasTipCap().String())
|
||||
}
|
||||
}
|
@ -1,266 +0,0 @@
|
||||
package legacypool
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
rand2 "math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cometbft/cometbft/libs/rand"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// Helper function to create a test transaction
|
||||
func createTestTx(nonce uint64, gasPrice *big.Int) *types.Transaction {
|
||||
to := common.HexToAddress("0x1234567890123456789012345678901234567890")
|
||||
return types.NewTransaction(nonce, to, big.NewInt(1000), 21000, gasPrice, nil)
|
||||
}
|
||||
|
||||
func TestNewTxOverflowPoolHeap(t *testing.T) {
|
||||
pool := NewTxOverflowPoolHeap(0)
|
||||
if pool == nil {
|
||||
t.Fatal("NewTxOverflowPoolHeap returned nil")
|
||||
}
|
||||
if pool.Len() != 0 {
|
||||
t.Errorf("New pool should be empty, got length %d", pool.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxOverflowPoolHeapAdd(t *testing.T) {
|
||||
pool := NewTxOverflowPoolHeap(1)
|
||||
tx := createTestTx(1, big.NewInt(1000))
|
||||
|
||||
pool.Add(tx)
|
||||
if pool.Len() != 1 {
|
||||
t.Errorf("Pool should have 1 transaction, got %d", pool.Len())
|
||||
}
|
||||
|
||||
// Add the same transaction again
|
||||
pool.Add(tx)
|
||||
if pool.Len() != 1 {
|
||||
t.Errorf("Pool should still have 1 transaction after adding duplicate, got %d", pool.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxOverflowPoolHeapGet(t *testing.T) {
|
||||
pool := NewTxOverflowPoolHeap(1)
|
||||
tx := createTestTx(1, big.NewInt(1000))
|
||||
pool.Add(tx)
|
||||
|
||||
gotTx, exists := pool.Get(tx.Hash())
|
||||
if !exists {
|
||||
t.Fatal("Get returned false for existing transaction")
|
||||
}
|
||||
if gotTx.Hash() != tx.Hash() {
|
||||
t.Errorf("Get returned wrong transaction. Want %v, got %v", tx.Hash(), gotTx.Hash())
|
||||
}
|
||||
|
||||
_, exists = pool.Get(common.Hash{})
|
||||
if exists {
|
||||
t.Error("Get returned true for non-existent transaction")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxOverflowPoolHeapRemove(t *testing.T) {
|
||||
pool := NewTxOverflowPoolHeap(1)
|
||||
tx := createTestTx(1, big.NewInt(1000))
|
||||
pool.Add(tx)
|
||||
|
||||
pool.Remove(tx.Hash())
|
||||
if pool.Len() != 0 {
|
||||
t.Errorf("Pool should be empty after removing the only transaction, got length %d", pool.Len())
|
||||
}
|
||||
|
||||
// Try to remove non-existent transaction
|
||||
pool.Remove(common.Hash{})
|
||||
if pool.Len() != 0 {
|
||||
t.Error("Removing non-existent transaction should not affect pool size")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxOverflowPoolHeapPopN(t *testing.T) {
|
||||
pool := NewTxOverflowPoolHeap(3)
|
||||
tx1 := createTestTx(1, big.NewInt(1000))
|
||||
tx2 := createTestTx(2, big.NewInt(2000))
|
||||
tx3 := createTestTx(3, big.NewInt(3000))
|
||||
|
||||
pool.Add(tx1)
|
||||
time.Sleep(time.Millisecond) // Ensure different timestamps
|
||||
pool.Add(tx2)
|
||||
time.Sleep(time.Millisecond)
|
||||
pool.Add(tx3)
|
||||
|
||||
popped := pool.Flush(2)
|
||||
if len(popped) != 2 {
|
||||
t.Fatalf("PopN(2) should return 2 transactions, got %d", len(popped))
|
||||
}
|
||||
if popped[0].Hash() != tx1.Hash() || popped[1].Hash() != tx2.Hash() {
|
||||
t.Error("PopN returned transactions in wrong order")
|
||||
}
|
||||
if pool.Len() != 1 {
|
||||
t.Errorf("Pool should have 1 transaction left, got %d", pool.Len())
|
||||
}
|
||||
|
||||
// Pop more than available
|
||||
popped = pool.Flush(2)
|
||||
if len(popped) != 1 {
|
||||
t.Fatalf("PopN(2) should return 1 transaction when only 1 is left, got %d", len(popped))
|
||||
}
|
||||
if popped[0].Hash() != tx3.Hash() {
|
||||
t.Error("PopN returned wrong transaction")
|
||||
}
|
||||
if pool.Len() != 0 {
|
||||
t.Errorf("Pool should be empty, got length %d", pool.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxOverflowPoolHeapOrdering(t *testing.T) {
|
||||
pool := NewTxOverflowPoolHeap(3)
|
||||
tx1 := createTestTx(1, big.NewInt(1000))
|
||||
tx2 := createTestTx(2, big.NewInt(2000))
|
||||
tx3 := createTestTx(3, big.NewInt(3000))
|
||||
|
||||
pool.Add(tx2)
|
||||
time.Sleep(time.Millisecond) // Ensure different timestamps
|
||||
pool.Add(tx1)
|
||||
pool.Add(tx3) // Added immediately after tx1, should have same timestamp but higher sequence
|
||||
|
||||
popped := pool.Flush(3)
|
||||
if len(popped) != 3 {
|
||||
t.Fatalf("PopN(3) should return 3 transactions, got %d", len(popped))
|
||||
}
|
||||
if popped[0].Hash() != tx2.Hash() || popped[1].Hash() != tx1.Hash() || popped[2].Hash() != tx3.Hash() {
|
||||
t.Error("Transactions not popped in correct order (earliest timestamp first, then by sequence)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxOverflowPoolHeapLen(t *testing.T) {
|
||||
pool := NewTxOverflowPoolHeap(2)
|
||||
if pool.Len() != 0 {
|
||||
t.Errorf("New pool should have length 0, got %d", pool.Len())
|
||||
}
|
||||
|
||||
pool.Add(createTestTx(1, big.NewInt(1000)))
|
||||
if pool.Len() != 1 {
|
||||
t.Errorf("Pool should have length 1 after adding a transaction, got %d", pool.Len())
|
||||
}
|
||||
|
||||
pool.Add(createTestTx(2, big.NewInt(2000)))
|
||||
if pool.Len() != 2 {
|
||||
t.Errorf("Pool should have length 2 after adding another transaction, got %d", pool.Len())
|
||||
}
|
||||
|
||||
pool.Flush(1)
|
||||
if pool.Len() != 1 {
|
||||
t.Errorf("Pool should have length 1 after popping a transaction, got %d", pool.Len())
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create a random test transaction
|
||||
func createRandomTestTx() *types.Transaction {
|
||||
nonce := uint64(rand.Intn(1000000))
|
||||
to := common.BytesToAddress(rand.Bytes(20))
|
||||
amount := new(big.Int).Rand(rand2.New(rand2.NewSource(rand.Int63())), big.NewInt(1e18))
|
||||
gasLimit := uint64(21000)
|
||||
gasPrice := new(big.Int).Rand(rand2.New(rand2.NewSource(rand.Int63())), big.NewInt(1e9))
|
||||
data := rand.Bytes(100)
|
||||
return types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data)
|
||||
}
|
||||
|
||||
func createRandomTestTxs(n int) []*types.Transaction {
|
||||
txs := make([]*types.Transaction, n)
|
||||
for i := 0; i < n; i++ {
|
||||
txs[i] = createRandomTestTx()
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
// goos: darwin
|
||||
// goarch: arm64
|
||||
// pkg: github.com/ethereum/go-ethereum/core/txpool/legacypool
|
||||
// BenchmarkTxOverflowPoolHeapAdd-8 813326 2858 ns/op
|
||||
func BenchmarkTxOverflowPoolHeapAdd(b *testing.B) {
|
||||
pool := NewTxOverflowPoolHeap(uint64(b.N))
|
||||
txs := createRandomTestTxs(b.N)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pool.Add(txs[i])
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkTxOverflowPoolHeapGet-8 32613938 35.63 ns/op
|
||||
func BenchmarkTxOverflowPoolHeapGet(b *testing.B) {
|
||||
pool := NewTxOverflowPoolHeap(1000)
|
||||
txs := createRandomTestTxs(1000)
|
||||
for _, tx := range txs {
|
||||
pool.Add(tx)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pool.Get(txs[i%1000].Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkTxOverflowPoolHeapRemove-8 3020841 417.8 ns/op
|
||||
func BenchmarkTxOverflowPoolHeapRemove(b *testing.B) {
|
||||
pool := NewTxOverflowPoolHeap(uint64(b.N))
|
||||
txs := createRandomTestTxs(b.N)
|
||||
for _, tx := range txs {
|
||||
pool.Add(tx)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pool.Remove(txs[i].Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkTxOverflowPoolHeapFlush-8 42963656 29.90 ns/op
|
||||
func BenchmarkTxOverflowPoolHeapFlush(b *testing.B) {
|
||||
pool := NewTxOverflowPoolHeap(1000)
|
||||
txs := createRandomTestTxs(1000)
|
||||
for _, tx := range txs {
|
||||
pool.Add(tx)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pool.Flush(10)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkTxOverflowPoolHeapLen-8 79147188 20.07 ns/op
|
||||
func BenchmarkTxOverflowPoolHeapLen(b *testing.B) {
|
||||
pool := NewTxOverflowPoolHeap(1000)
|
||||
txs := createRandomTestTxs(1000)
|
||||
for _, tx := range txs {
|
||||
pool.Add(tx)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pool.Len()
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkTxOverflowPoolHeapAddRemove-8 902896 1546 ns/op
|
||||
func BenchmarkTxOverflowPoolHeapAddRemove(b *testing.B) {
|
||||
pool := NewTxOverflowPoolHeap(uint64(b.N))
|
||||
txs := createRandomTestTxs(b.N)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pool.Add(txs[i])
|
||||
pool.Remove(txs[i].Hash())
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkTxOverflowPoolHeapAddFlush-8 84417 14899 ns/op
|
||||
func BenchmarkTxOverflowPoolHeapAddFlush(b *testing.B) {
|
||||
pool := NewTxOverflowPoolHeap(uint64(b.N * 10))
|
||||
txs := createRandomTestTxs(b.N * 10)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < 10; j++ {
|
||||
pool.Add(txs[i*10+j])
|
||||
}
|
||||
pool.Flush(10)
|
||||
}
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
type AccountStorage struct {
|
||||
StorageRoot *common.Hash
|
||||
StorageSlots map[common.Hash]common.Hash
|
||||
}
|
||||
|
||||
func (a *AccountStorage) UnmarshalJSON(data []byte) error {
|
||||
var hash common.Hash
|
||||
if err := json.Unmarshal(data, &hash); err == nil {
|
||||
a.StorageRoot = &hash
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(data, &a.StorageSlots)
|
||||
}
|
||||
|
||||
func (a AccountStorage) MarshalJSON() ([]byte, error) {
|
||||
if a.StorageRoot != nil {
|
||||
return json.Marshal(*a.StorageRoot)
|
||||
}
|
||||
return json.Marshal(a.StorageSlots)
|
||||
}
|
||||
|
||||
type KnownAccounts map[common.Address]AccountStorage
|
||||
|
||||
// It is known that marshaling is broken
|
||||
// https://github.com/golang/go/issues/55890
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type TransactionOpts -out gen_tx_opts_json.go
|
||||
type TransactionOpts struct {
|
||||
KnownAccounts KnownAccounts `json:"knownAccounts"`
|
||||
BlockNumberMin *hexutil.Uint64 `json:"blockNumberMin,omitempty"`
|
||||
BlockNumberMax *hexutil.Uint64 `json:"blockNumberMax,omitempty"`
|
||||
TimestampMin *hexutil.Uint64 `json:"timestampMin,omitempty"`
|
||||
TimestampMax *hexutil.Uint64 `json:"timestampMax,omitempty"`
|
||||
}
|
@ -17,7 +17,7 @@
|
||||
# - A constraint describing the requirements of the law, called "require"
|
||||
# * Implementations are transliterated into functions that operate as well on
|
||||
# algebraic input points, and are called once per combination of branches
|
||||
# executed. Each execution returns:
|
||||
# exectured. Each execution returns:
|
||||
# - A constraint describing the assumptions this implementation requires
|
||||
# (such as Z1=1), called "assumeFormula"
|
||||
# - A constraint describing the assumptions this specific branch requires,
|
||||
|
@ -193,16 +193,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
chainConfig.CancunTime = config.OverridePassedForkTime
|
||||
chainConfig.HaberTime = config.OverridePassedForkTime
|
||||
chainConfig.HaberFixTime = config.OverridePassedForkTime
|
||||
chainConfig.BohrTime = config.OverridePassedForkTime
|
||||
overrides.OverridePassedForkTime = config.OverridePassedForkTime
|
||||
}
|
||||
if config.OverridePascal != nil {
|
||||
chainConfig.PascalTime = config.OverridePascal
|
||||
overrides.OverridePascal = config.OverridePascal
|
||||
}
|
||||
if config.OverridePrague != nil {
|
||||
chainConfig.PragueTime = config.OverridePrague
|
||||
overrides.OverridePrague = config.OverridePrague
|
||||
if config.OverrideBohr != nil {
|
||||
chainConfig.BohrTime = config.OverrideBohr
|
||||
overrides.OverrideBohr = config.OverrideBohr
|
||||
}
|
||||
if config.OverrideVerkle != nil {
|
||||
chainConfig.VerkleTime = config.OverrideVerkle
|
||||
@ -296,6 +291,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
}
|
||||
)
|
||||
bcOps := make([]core.BlockChainOption, 0)
|
||||
if config.PipeCommit {
|
||||
bcOps = append(bcOps, core.EnablePipelineCommit)
|
||||
}
|
||||
if config.PersistDiff {
|
||||
bcOps = append(bcOps, core.EnablePersistDiff(config.DiffBlock))
|
||||
}
|
||||
|
@ -107,6 +107,7 @@ type Config struct {
|
||||
DirectBroadcast bool
|
||||
DisableSnapProtocol bool // Whether disable snap protocol
|
||||
EnableTrustProtocol bool // Whether enable trust protocol
|
||||
PipeCommit bool
|
||||
RangeLimit bool
|
||||
|
||||
// Deprecated, use 'TransactionHistory' instead.
|
||||
@ -190,11 +191,8 @@ type Config struct {
|
||||
// OverridePassedForkTime
|
||||
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverridePascal (TODO: remove after the fork)
|
||||
OverridePascal *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverridePrague (TODO: remove after the fork)
|
||||
OverridePrague *uint64 `toml:",omitempty"`
|
||||
// OverrideBohr (TODO: remove after the fork)
|
||||
OverrideBohr *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverrideVerkle (TODO: remove after the fork)
|
||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||
|
@ -30,6 +30,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
DirectBroadcast bool
|
||||
DisableSnapProtocol bool
|
||||
EnableTrustProtocol bool
|
||||
PipeCommit bool
|
||||
RangeLimit bool
|
||||
TxLookupLimit uint64 `toml:",omitempty"`
|
||||
TransactionHistory uint64 `toml:",omitempty"`
|
||||
@ -70,8 +71,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
RPCEVMTimeout time.Duration
|
||||
RPCTxFeeCap float64
|
||||
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
||||
OverridePascal *uint64 `toml:",omitempty"`
|
||||
OverridePrague *uint64 `toml:",omitempty"`
|
||||
OverrideBohr *uint64 `toml:",omitempty"`
|
||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||
BlobExtraReserve uint64
|
||||
}
|
||||
@ -89,6 +89,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.DirectBroadcast = c.DirectBroadcast
|
||||
enc.DisableSnapProtocol = c.DisableSnapProtocol
|
||||
enc.EnableTrustProtocol = c.EnableTrustProtocol
|
||||
enc.PipeCommit = c.PipeCommit
|
||||
enc.RangeLimit = c.RangeLimit
|
||||
enc.TxLookupLimit = c.TxLookupLimit
|
||||
enc.TransactionHistory = c.TransactionHistory
|
||||
@ -129,8 +130,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.RPCEVMTimeout = c.RPCEVMTimeout
|
||||
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
||||
enc.OverridePassedForkTime = c.OverridePassedForkTime
|
||||
enc.OverridePascal = c.OverridePascal
|
||||
enc.OverridePrague = c.OverridePrague
|
||||
enc.OverrideBohr = c.OverrideBohr
|
||||
enc.OverrideVerkle = c.OverrideVerkle
|
||||
enc.BlobExtraReserve = c.BlobExtraReserve
|
||||
return &enc, nil
|
||||
@ -152,6 +152,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
DirectBroadcast *bool
|
||||
DisableSnapProtocol *bool
|
||||
EnableTrustProtocol *bool
|
||||
PipeCommit *bool
|
||||
RangeLimit *bool
|
||||
TxLookupLimit *uint64 `toml:",omitempty"`
|
||||
TransactionHistory *uint64 `toml:",omitempty"`
|
||||
@ -192,8 +193,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
RPCEVMTimeout *time.Duration
|
||||
RPCTxFeeCap *float64
|
||||
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
||||
OverridePascal *uint64 `toml:",omitempty"`
|
||||
OverridePrague *uint64 `toml:",omitempty"`
|
||||
OverrideBohr *uint64 `toml:",omitempty"`
|
||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||
BlobExtraReserve *uint64
|
||||
}
|
||||
@ -240,6 +240,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.EnableTrustProtocol != nil {
|
||||
c.EnableTrustProtocol = *dec.EnableTrustProtocol
|
||||
}
|
||||
if dec.PipeCommit != nil {
|
||||
c.PipeCommit = *dec.PipeCommit
|
||||
}
|
||||
if dec.RangeLimit != nil {
|
||||
c.RangeLimit = *dec.RangeLimit
|
||||
}
|
||||
@ -360,11 +363,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.OverridePassedForkTime != nil {
|
||||
c.OverridePassedForkTime = dec.OverridePassedForkTime
|
||||
}
|
||||
if dec.OverridePascal != nil {
|
||||
c.OverridePascal = dec.OverridePascal
|
||||
}
|
||||
if dec.OverridePrague != nil {
|
||||
c.OverridePrague = dec.OverridePrague
|
||||
if dec.OverrideBohr != nil {
|
||||
c.OverrideBohr = dec.OverrideBohr
|
||||
}
|
||||
if dec.OverrideVerkle != nil {
|
||||
c.OverrideVerkle = dec.OverrideVerkle
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
@ -736,7 +737,7 @@ func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) er
|
||||
//
|
||||
// If the transaction was a contract creation use the TransactionReceipt method to get the
|
||||
// contract address after the transaction has been mined.
|
||||
func (ec *Client) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error {
|
||||
func (ec *Client) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error {
|
||||
data, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -769,9 +770,9 @@ func sendTransactionConditional(ec *Client) error {
|
||||
}
|
||||
|
||||
root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
return ec.SendTransactionConditional(context.Background(), tx, types.TransactionOpts{
|
||||
KnownAccounts: map[common.Address]types.AccountStorage{
|
||||
testAddr: types.AccountStorage{
|
||||
return ec.SendTransactionConditional(context.Background(), tx, ethapi.TransactionOpts{
|
||||
KnownAccounts: map[common.Address]ethapi.AccountStorage{
|
||||
testAddr: ethapi.AccountStorage{
|
||||
StorageRoot: &root,
|
||||
},
|
||||
},
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -38,7 +39,7 @@ import (
|
||||
|
||||
// TransactionConditionalSender injects the conditional transaction into the pending pool for execution after verification.
|
||||
type TransactionConditionalSender interface {
|
||||
SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error
|
||||
SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error
|
||||
}
|
||||
|
||||
// Client exposes the methods provided by the Ethereum RPC client.
|
||||
|
@ -2314,7 +2314,7 @@ func (s *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.B
|
||||
|
||||
// SendRawTransactionConditional will add the signed transaction to the transaction pool.
|
||||
// The sender/bundler is responsible for signing the transaction
|
||||
func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, input hexutil.Bytes, opts types.TransactionOpts) (common.Hash, error) {
|
||||
func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, input hexutil.Bytes, opts TransactionOpts) (common.Hash, error) {
|
||||
tx := new(types.Transaction)
|
||||
if err := tx.UnmarshalBinary(input); err != nil {
|
||||
return common.Hash{}, err
|
||||
@ -2324,7 +2324,7 @@ func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, inpu
|
||||
if state == nil || err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
if err := TxOptsCheck(opts, header.Number.Uint64(), header.Time, state); err != nil {
|
||||
if err := opts.Check(header.Number.Uint64(), header.Time, state); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
return SubmitTransaction(ctx, s.b, tx)
|
||||
|
@ -1,9 +1,10 @@
|
||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||
|
||||
package types
|
||||
package ethapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
@ -2,15 +2,52 @@ package ethapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
type AccountStorage struct {
|
||||
StorageRoot *common.Hash
|
||||
StorageSlots map[common.Hash]common.Hash
|
||||
}
|
||||
|
||||
func (a *AccountStorage) UnmarshalJSON(data []byte) error {
|
||||
var hash common.Hash
|
||||
if err := json.Unmarshal(data, &hash); err == nil {
|
||||
a.StorageRoot = &hash
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(data, &a.StorageSlots)
|
||||
}
|
||||
|
||||
func (a AccountStorage) MarshalJSON() ([]byte, error) {
|
||||
if a.StorageRoot != nil {
|
||||
return json.Marshal(*a.StorageRoot)
|
||||
}
|
||||
return json.Marshal(a.StorageSlots)
|
||||
}
|
||||
|
||||
type KnownAccounts map[common.Address]AccountStorage
|
||||
|
||||
// It is known that marshaling is broken
|
||||
// https://github.com/golang/go/issues/55890
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type TransactionOpts -out gen_tx_opts_json.go
|
||||
type TransactionOpts struct {
|
||||
KnownAccounts KnownAccounts `json:"knownAccounts"`
|
||||
BlockNumberMin *hexutil.Uint64 `json:"blockNumberMin,omitempty"`
|
||||
BlockNumberMax *hexutil.Uint64 `json:"blockNumberMax,omitempty"`
|
||||
TimestampMin *hexutil.Uint64 `json:"timestampMin,omitempty"`
|
||||
TimestampMax *hexutil.Uint64 `json:"timestampMax,omitempty"`
|
||||
}
|
||||
|
||||
const MaxNumberOfEntries = 1000
|
||||
|
||||
func TxOptsCheck(o types.TransactionOpts, blockNumber uint64, timeStamp uint64, statedb *state.StateDB) error {
|
||||
func (o *TransactionOpts) Check(blockNumber uint64, timeStamp uint64, statedb *state.StateDB) error {
|
||||
if o.BlockNumberMin != nil && blockNumber < uint64(*o.BlockNumberMin) {
|
||||
return errors.New("BlockNumberMin condition not met")
|
||||
}
|
||||
@ -34,10 +71,10 @@ func TxOptsCheck(o types.TransactionOpts, blockNumber uint64, timeStamp uint64,
|
||||
if counter > MaxNumberOfEntries {
|
||||
return errors.New("knownAccounts too large")
|
||||
}
|
||||
return TxOptsCheckStorage(o, statedb)
|
||||
return o.CheckStorage(statedb)
|
||||
}
|
||||
|
||||
func TxOptsCheckStorage(o types.TransactionOpts, statedb *state.StateDB) error {
|
||||
func (o *TransactionOpts) CheckStorage(statedb *state.StateDB) error {
|
||||
for address, accountStorage := range o.KnownAccounts {
|
||||
if accountStorage.StorageRoot != nil {
|
||||
rootHash := statedb.GetRoot(address)
|
@ -1,4 +1,4 @@
|
||||
package types
|
||||
package ethapi_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
)
|
||||
|
||||
func ptr(hash common.Hash) *common.Hash {
|
||||
@ -22,15 +23,15 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
||||
name string
|
||||
input string
|
||||
mustFail bool
|
||||
expected TransactionOpts
|
||||
expected ethapi.TransactionOpts
|
||||
}{
|
||||
{
|
||||
"StateRoot",
|
||||
`{"knownAccounts":{"0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0":"0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"}}`,
|
||||
false,
|
||||
TransactionOpts{
|
||||
KnownAccounts: map[common.Address]AccountStorage{
|
||||
common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): AccountStorage{
|
||||
ethapi.TransactionOpts{
|
||||
KnownAccounts: map[common.Address]ethapi.AccountStorage{
|
||||
common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): ethapi.AccountStorage{
|
||||
StorageRoot: ptr(common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")),
|
||||
},
|
||||
},
|
||||
@ -40,9 +41,9 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
||||
"StorageSlots",
|
||||
`{"knownAccounts":{"0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0":{"0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8":"0x0000000000000000000000000000000000000000000000000000000000000000"}}}`,
|
||||
false,
|
||||
TransactionOpts{
|
||||
KnownAccounts: map[common.Address]AccountStorage{
|
||||
common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): AccountStorage{
|
||||
ethapi.TransactionOpts{
|
||||
KnownAccounts: map[common.Address]ethapi.AccountStorage{
|
||||
common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): ethapi.AccountStorage{
|
||||
StorageRoot: nil,
|
||||
StorageSlots: map[common.Hash]common.Hash{
|
||||
common.HexToHash("0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8"): common.HexToHash("0x"),
|
||||
@ -55,15 +56,15 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
||||
"EmptyObject",
|
||||
`{"knownAccounts":{}}`,
|
||||
false,
|
||||
TransactionOpts{
|
||||
KnownAccounts: make(map[common.Address]AccountStorage),
|
||||
ethapi.TransactionOpts{
|
||||
KnownAccounts: make(map[common.Address]ethapi.AccountStorage),
|
||||
},
|
||||
},
|
||||
{
|
||||
"EmptyStrings",
|
||||
`{"knownAccounts":{"":""}}`,
|
||||
true,
|
||||
TransactionOpts{
|
||||
ethapi.TransactionOpts{
|
||||
KnownAccounts: nil,
|
||||
},
|
||||
},
|
||||
@ -71,7 +72,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
||||
"BlockNumberMin",
|
||||
`{"blockNumberMin":"0x1"}`,
|
||||
false,
|
||||
TransactionOpts{
|
||||
ethapi.TransactionOpts{
|
||||
BlockNumberMin: u64Ptr(1),
|
||||
},
|
||||
},
|
||||
@ -79,7 +80,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
||||
"BlockNumberMax",
|
||||
`{"blockNumberMin":"0x1", "blockNumberMax":"0x2"}`,
|
||||
false,
|
||||
TransactionOpts{
|
||||
ethapi.TransactionOpts{
|
||||
BlockNumberMin: u64Ptr(1),
|
||||
BlockNumberMax: u64Ptr(2),
|
||||
},
|
||||
@ -88,7 +89,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
||||
"TimestampMin",
|
||||
`{"timestampMin":"0xffff"}`,
|
||||
false,
|
||||
TransactionOpts{
|
||||
ethapi.TransactionOpts{
|
||||
TimestampMin: u64Ptr(0xffff),
|
||||
},
|
||||
},
|
||||
@ -96,7 +97,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
||||
"TimestampMax",
|
||||
`{"timestampMax":"0xffffff"}`,
|
||||
false,
|
||||
TransactionOpts{
|
||||
ethapi.TransactionOpts{
|
||||
TimestampMax: u64Ptr(0xffffff),
|
||||
},
|
||||
},
|
||||
@ -104,7 +105,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var opts TransactionOpts
|
||||
var opts ethapi.TransactionOpts
|
||||
err := json.Unmarshal([]byte(test.input), &opts)
|
||||
if test.mustFail && err == nil {
|
||||
t.Errorf("Test %s should fail", test.name)
|
@ -1430,6 +1430,15 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti
|
||||
if interval != nil {
|
||||
interval()
|
||||
}
|
||||
/*
|
||||
|
||||
err := env.state.WaitPipeVerification()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env.state.CorrectAccountsRoot(w.chain.CurrentBlock().Root)
|
||||
*/
|
||||
|
||||
fees := env.state.GetBalance(consensus.SystemAddress).ToBig()
|
||||
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether))
|
||||
// Withdrawals are set to nil here, because this is only called in PoW.
|
||||
|
@ -155,9 +155,6 @@ var (
|
||||
HaberTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC
|
||||
HaberFixTime: newUint64(1727316120), // 2024-09-26 02:02:00 AM UTC
|
||||
BohrTime: newUint64(1727317200), // 2024-09-26 02:20:00 AM UTC
|
||||
// TODO
|
||||
PascalTime: nil,
|
||||
PragueTime: nil,
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@ -199,9 +196,6 @@ var (
|
||||
HaberTime: newUint64(1716962820), // 2024-05-29 06:07:00 AM UTC
|
||||
HaberFixTime: newUint64(1719986788), // 2024-07-03 06:06:28 AM UTC
|
||||
BohrTime: newUint64(1724116996), // 2024-08-20 01:23:16 AM UTC
|
||||
// TODO
|
||||
PascalTime: nil,
|
||||
PragueTime: nil,
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@ -244,9 +238,6 @@ var (
|
||||
HaberTime: newUint64(0),
|
||||
HaberFixTime: newUint64(0),
|
||||
BohrTime: newUint64(0),
|
||||
// TODO: set them to `0` when passed on the mainnet
|
||||
PascalTime: nil,
|
||||
PragueTime: nil,
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@ -526,7 +517,6 @@ type ChainConfig struct {
|
||||
HaberTime *uint64 `json:"haberTime,omitempty"` // Haber switch time (nil = no fork, 0 = already on haber)
|
||||
HaberFixTime *uint64 `json:"haberFixTime,omitempty"` // HaberFix switch time (nil = no fork, 0 = already on haberFix)
|
||||
BohrTime *uint64 `json:"bohrTime,omitempty"` // Bohr switch time (nil = no fork, 0 = already on bohr)
|
||||
PascalTime *uint64 `json:"pascalTime,omitempty"` // Pascal switch time (nil = no fork, 0 = already on pascal)
|
||||
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
||||
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
||||
|
||||
@ -647,17 +637,7 @@ func (c *ChainConfig) String() string {
|
||||
BohrTime = big.NewInt(0).SetUint64(*c.BohrTime)
|
||||
}
|
||||
|
||||
var PascalTime *big.Int
|
||||
if c.PascalTime != nil {
|
||||
PascalTime = big.NewInt(0).SetUint64(*c.PascalTime)
|
||||
}
|
||||
|
||||
var PragueTime *big.Int
|
||||
if c.PragueTime != nil {
|
||||
PragueTime = big.NewInt(0).SetUint64(*c.PragueTime)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, CancunTime: %v, HaberTime: %v, HaberFixTime: %v, BohrTime: %v, PascalTime: %v, PragueTime: %v, Engine: %v}",
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, CancunTime: %v, HaberTime: %v, HaberFixTime: %v, BohrTime: %v, Engine: %v}",
|
||||
c.ChainID,
|
||||
c.HomesteadBlock,
|
||||
c.DAOForkBlock,
|
||||
@ -697,8 +677,6 @@ func (c *ChainConfig) String() string {
|
||||
HaberTime,
|
||||
HaberFixTime,
|
||||
BohrTime,
|
||||
PascalTime,
|
||||
PragueTime,
|
||||
engine,
|
||||
)
|
||||
}
|
||||
@ -999,20 +977,6 @@ func (c *ChainConfig) IsOnBohr(currentBlockNumber *big.Int, lastBlockTime uint64
|
||||
return !c.IsBohr(lastBlockNumber, lastBlockTime) && c.IsBohr(currentBlockNumber, currentBlockTime)
|
||||
}
|
||||
|
||||
// IsPascal returns whether time is either equal to the Pascal fork time or greater.
|
||||
func (c *ChainConfig) IsPascal(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.PascalTime, time)
|
||||
}
|
||||
|
||||
// IsOnPascal returns whether currentBlockTime is either equal to the Pascal fork time or greater firstly.
|
||||
func (c *ChainConfig) IsOnPascal(currentBlockNumber *big.Int, lastBlockTime uint64, currentBlockTime uint64) bool {
|
||||
lastBlockNumber := new(big.Int)
|
||||
if currentBlockNumber.Cmp(big.NewInt(1)) >= 0 {
|
||||
lastBlockNumber.Sub(currentBlockNumber, big.NewInt(1))
|
||||
}
|
||||
return !c.IsPascal(lastBlockNumber, lastBlockTime) && c.IsPascal(currentBlockNumber, currentBlockTime)
|
||||
}
|
||||
|
||||
// IsPrague returns whether num is either equal to the Prague fork time or greater.
|
||||
func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.PragueTime, time)
|
||||
@ -1079,8 +1043,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
|
||||
{name: "haberTime", timestamp: c.HaberTime},
|
||||
{name: "haberFixTime", timestamp: c.HaberFixTime},
|
||||
{name: "bohrTime", timestamp: c.BohrTime},
|
||||
{name: "pascalTime", timestamp: c.PascalTime},
|
||||
{name: "pragueTime", timestamp: c.PragueTime},
|
||||
{name: "pragueTime", timestamp: c.PragueTime, optional: true},
|
||||
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
|
||||
} {
|
||||
if lastFork.name != "" {
|
||||
@ -1236,9 +1199,6 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int,
|
||||
if isForkTimestampIncompatible(c.BohrTime, newcfg.BohrTime, headTimestamp) {
|
||||
return newTimestampCompatError("Bohr fork timestamp", c.BohrTime, newcfg.BohrTime)
|
||||
}
|
||||
if isForkTimestampIncompatible(c.PascalTime, newcfg.PascalTime, headTimestamp) {
|
||||
return newTimestampCompatError("Pascal fork timestamp", c.PascalTime, newcfg.PascalTime)
|
||||
}
|
||||
if isForkTimestampIncompatible(c.PragueTime, newcfg.PragueTime, headTimestamp) {
|
||||
return newTimestampCompatError("Prague fork timestamp", c.PragueTime, newcfg.PragueTime)
|
||||
}
|
||||
@ -1422,7 +1382,7 @@ type Rules struct {
|
||||
IsHertz bool
|
||||
IsHertzfix bool
|
||||
IsShanghai, IsKepler, IsFeynman, IsCancun, IsHaber bool
|
||||
IsBohr, IsPascal, IsPrague, IsVerkle bool
|
||||
IsBohr, IsPrague, IsVerkle bool
|
||||
}
|
||||
|
||||
// Rules ensures c's ChainID is not nil.
|
||||
@ -1459,7 +1419,6 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules
|
||||
IsCancun: c.IsCancun(num, timestamp),
|
||||
IsHaber: c.IsHaber(num, timestamp),
|
||||
IsBohr: c.IsBohr(num, timestamp),
|
||||
IsPascal: c.IsPascal(num, timestamp),
|
||||
IsPrague: c.IsPrague(num, timestamp),
|
||||
IsVerkle: c.IsVerkle(num, timestamp),
|
||||
}
|
||||
|
@ -347,6 +347,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
||||
|
||||
// Keep committing nodes from the flush-list until we're below allowance
|
||||
oldest := db.oldest
|
||||
err := func() error {
|
||||
for size > limit && oldest != (common.Hash{}) {
|
||||
// Fetch the oldest referenced node and push into the batch
|
||||
node := db.dirties[oldest]
|
||||
@ -369,6 +370,11 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
||||
}
|
||||
oldest = node.flushNext
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Flush out any remainder data from the last batch
|
||||
if err := batch.Write(); err != nil {
|
||||
log.Error("Failed to write flush list to disk", "err", err)
|
||||
|
Loading…
x
Reference in New Issue
Block a user