Compare commits
10 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
caa9e7991a | ||
|
675449a1d9 | ||
|
be0eb10f1f | ||
|
00a36bb0cc | ||
|
3a6dbe4d85 | ||
|
72ec06eae7 | ||
|
55896bf610 | ||
|
bff9d252d6 | ||
|
27f67a5210 | ||
|
8c1acb0b22 |
8
.github/workflows/pre-release.yml
vendored
8
.github/workflows/pre-release.yml
vendored
@ -82,28 +82,28 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
|
|
||||||
- name: Upload Linux Build
|
- name: Upload Linux Build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4.3.3
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
with:
|
with:
|
||||||
name: linux
|
name: linux
|
||||||
path: ./build/bin/geth
|
path: ./build/bin/geth
|
||||||
|
|
||||||
- name: Upload MacOS Build
|
- name: Upload MacOS Build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4.3.3
|
||||||
if: matrix.os == 'macos-latest'
|
if: matrix.os == 'macos-latest'
|
||||||
with:
|
with:
|
||||||
name: macos
|
name: macos
|
||||||
path: ./build/bin/geth
|
path: ./build/bin/geth
|
||||||
|
|
||||||
- name: Upload Windows Build
|
- name: Upload Windows Build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4.3.3
|
||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
with:
|
with:
|
||||||
name: windows
|
name: windows
|
||||||
path: ./build/bin/geth.exe
|
path: ./build/bin/geth.exe
|
||||||
|
|
||||||
- name: Upload ARM-64 Build
|
- name: Upload ARM-64 Build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4.3.3
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
with:
|
with:
|
||||||
name: arm64
|
name: arm64
|
||||||
|
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@ -81,28 +81,28 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
|
|
||||||
- name: Upload Linux Build
|
- name: Upload Linux Build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4.3.3
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
with:
|
with:
|
||||||
name: linux
|
name: linux
|
||||||
path: ./build/bin/geth
|
path: ./build/bin/geth
|
||||||
|
|
||||||
- name: Upload MacOS Build
|
- name: Upload MacOS Build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4.3.3
|
||||||
if: matrix.os == 'macos-latest'
|
if: matrix.os == 'macos-latest'
|
||||||
with:
|
with:
|
||||||
name: macos
|
name: macos
|
||||||
path: ./build/bin/geth
|
path: ./build/bin/geth
|
||||||
|
|
||||||
- name: Upload Windows Build
|
- name: Upload Windows Build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4.3.3
|
||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
with:
|
with:
|
||||||
name: windows
|
name: windows
|
||||||
path: ./build/bin/geth.exe
|
path: ./build/bin/geth.exe
|
||||||
|
|
||||||
- name: Upload ARM-64 Build
|
- name: Upload ARM-64 Build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4.3.3
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
with:
|
with:
|
||||||
name: arm64
|
name: arm64
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -101,7 +100,7 @@ type ContractTransactor interface {
|
|||||||
PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
|
PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
|
||||||
|
|
||||||
// SendTransactionConditional injects the conditional transaction into the pending pool for execution after verification.
|
// SendTransactionConditional injects the conditional transaction into the pending pool for execution after verification.
|
||||||
SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error
|
SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
||||||
|
@ -31,7 +31,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@ -76,7 +75,7 @@ func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transac
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mt *mockTransactor) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error {
|
func (mt *mockTransactor) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,7 +63,8 @@ var (
|
|||||||
Flags: flags.Merge([]cli.Flag{
|
Flags: flags.Merge([]cli.Flag{
|
||||||
utils.CachePreimagesFlag,
|
utils.CachePreimagesFlag,
|
||||||
utils.OverridePassedForkTime,
|
utils.OverridePassedForkTime,
|
||||||
utils.OverrideBohr,
|
utils.OverridePascal,
|
||||||
|
utils.OverridePrague,
|
||||||
utils.OverrideVerkle,
|
utils.OverrideVerkle,
|
||||||
utils.MultiDataBaseFlag,
|
utils.MultiDataBaseFlag,
|
||||||
}, utils.DatabaseFlags),
|
}, utils.DatabaseFlags),
|
||||||
@ -258,9 +259,13 @@ func initGenesis(ctx *cli.Context) error {
|
|||||||
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
|
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
|
||||||
overrides.OverridePassedForkTime = &v
|
overrides.OverridePassedForkTime = &v
|
||||||
}
|
}
|
||||||
if ctx.IsSet(utils.OverrideBohr.Name) {
|
if ctx.IsSet(utils.OverridePascal.Name) {
|
||||||
v := ctx.Uint64(utils.OverrideBohr.Name)
|
v := ctx.Uint64(utils.OverridePascal.Name)
|
||||||
overrides.OverrideBohr = &v
|
overrides.OverridePascal = &v
|
||||||
|
}
|
||||||
|
if ctx.IsSet(utils.OverridePrague.Name) {
|
||||||
|
v := ctx.Uint64(utils.OverridePrague.Name)
|
||||||
|
overrides.OverridePrague = &v
|
||||||
}
|
}
|
||||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||||
|
@ -191,9 +191,13 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
|
v := ctx.Uint64(utils.OverridePassedForkTime.Name)
|
||||||
cfg.Eth.OverridePassedForkTime = &v
|
cfg.Eth.OverridePassedForkTime = &v
|
||||||
}
|
}
|
||||||
if ctx.IsSet(utils.OverrideBohr.Name) {
|
if ctx.IsSet(utils.OverridePascal.Name) {
|
||||||
v := ctx.Uint64(utils.OverrideBohr.Name)
|
v := ctx.Uint64(utils.OverridePascal.Name)
|
||||||
cfg.Eth.OverrideBohr = &v
|
cfg.Eth.OverridePascal = &v
|
||||||
|
}
|
||||||
|
if ctx.IsSet(utils.OverridePrague.Name) {
|
||||||
|
v := ctx.Uint64(utils.OverridePrague.Name)
|
||||||
|
cfg.Eth.OverridePrague = &v
|
||||||
}
|
}
|
||||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||||
|
@ -67,13 +67,13 @@ var (
|
|||||||
utils.DirectBroadcastFlag,
|
utils.DirectBroadcastFlag,
|
||||||
utils.DisableSnapProtocolFlag,
|
utils.DisableSnapProtocolFlag,
|
||||||
utils.EnableTrustProtocolFlag,
|
utils.EnableTrustProtocolFlag,
|
||||||
utils.PipeCommitFlag,
|
|
||||||
utils.RangeLimitFlag,
|
utils.RangeLimitFlag,
|
||||||
utils.USBFlag,
|
utils.USBFlag,
|
||||||
utils.SmartCardDaemonPathFlag,
|
utils.SmartCardDaemonPathFlag,
|
||||||
utils.RialtoHash,
|
utils.RialtoHash,
|
||||||
utils.OverridePassedForkTime,
|
utils.OverridePassedForkTime,
|
||||||
utils.OverrideBohr,
|
utils.OverridePascal,
|
||||||
|
utils.OverridePrague,
|
||||||
utils.OverrideVerkle,
|
utils.OverrideVerkle,
|
||||||
utils.OverrideFullImmutabilityThreshold,
|
utils.OverrideFullImmutabilityThreshold,
|
||||||
utils.OverrideMinBlocksForBlobRequests,
|
utils.OverrideMinBlocksForBlobRequests,
|
||||||
@ -91,6 +91,7 @@ var (
|
|||||||
utils.TxPoolGlobalSlotsFlag,
|
utils.TxPoolGlobalSlotsFlag,
|
||||||
utils.TxPoolAccountQueueFlag,
|
utils.TxPoolAccountQueueFlag,
|
||||||
utils.TxPoolGlobalQueueFlag,
|
utils.TxPoolGlobalQueueFlag,
|
||||||
|
utils.TxPoolOverflowPoolSlotsFlag,
|
||||||
utils.TxPoolLifetimeFlag,
|
utils.TxPoolLifetimeFlag,
|
||||||
utils.TxPoolReannounceTimeFlag,
|
utils.TxPoolReannounceTimeFlag,
|
||||||
utils.BlobPoolDataDirFlag,
|
utils.BlobPoolDataDirFlag,
|
||||||
|
@ -117,11 +117,6 @@ var (
|
|||||||
Usage: "Enable trust protocol",
|
Usage: "Enable trust protocol",
|
||||||
Category: flags.FastNodeCategory,
|
Category: flags.FastNodeCategory,
|
||||||
}
|
}
|
||||||
PipeCommitFlag = &cli.BoolFlag{
|
|
||||||
Name: "pipecommit",
|
|
||||||
Usage: "Enable MPT pipeline commit, it will improve syncing performance. It is an experimental feature(default is false)",
|
|
||||||
Category: flags.DeprecatedCategory,
|
|
||||||
}
|
|
||||||
RangeLimitFlag = &cli.BoolFlag{
|
RangeLimitFlag = &cli.BoolFlag{
|
||||||
Name: "rangelimit",
|
Name: "rangelimit",
|
||||||
Usage: "Enable 5000 blocks limit for range query",
|
Usage: "Enable 5000 blocks limit for range query",
|
||||||
@ -310,12 +305,17 @@ var (
|
|||||||
}
|
}
|
||||||
OverridePassedForkTime = &cli.Uint64Flag{
|
OverridePassedForkTime = &cli.Uint64Flag{
|
||||||
Name: "override.passedforktime",
|
Name: "override.passedforktime",
|
||||||
Usage: "Manually specify the hard fork timestamp except the last one, overriding the bundled setting",
|
Usage: "Manually specify the hard fork timestamps which have passed on the mainnet, overriding the bundled setting",
|
||||||
Category: flags.EthCategory,
|
Category: flags.EthCategory,
|
||||||
}
|
}
|
||||||
OverrideBohr = &cli.Uint64Flag{
|
OverridePascal = &cli.Uint64Flag{
|
||||||
Name: "override.bohr",
|
Name: "override.pascal",
|
||||||
Usage: "Manually specify the Bohr fork timestamp, overriding the bundled setting",
|
Usage: "Manually specify the Pascal fork timestamp, overriding the bundled setting",
|
||||||
|
Category: flags.EthCategory,
|
||||||
|
}
|
||||||
|
OverridePrague = &cli.Uint64Flag{
|
||||||
|
Name: "override.prague",
|
||||||
|
Usage: "Manually specify the Prague fork timestamp, overriding the bundled setting",
|
||||||
Category: flags.EthCategory,
|
Category: flags.EthCategory,
|
||||||
}
|
}
|
||||||
OverrideVerkle = &cli.Uint64Flag{
|
OverrideVerkle = &cli.Uint64Flag{
|
||||||
@ -453,6 +453,12 @@ var (
|
|||||||
Value: ethconfig.Defaults.TxPool.GlobalQueue,
|
Value: ethconfig.Defaults.TxPool.GlobalQueue,
|
||||||
Category: flags.TxPoolCategory,
|
Category: flags.TxPoolCategory,
|
||||||
}
|
}
|
||||||
|
TxPoolOverflowPoolSlotsFlag = &cli.Uint64Flag{
|
||||||
|
Name: "txpool.overflowpoolslots",
|
||||||
|
Usage: "Maximum number of transaction slots in overflow pool",
|
||||||
|
Value: ethconfig.Defaults.TxPool.OverflowPoolSlots,
|
||||||
|
Category: flags.TxPoolCategory,
|
||||||
|
}
|
||||||
TxPoolLifetimeFlag = &cli.DurationFlag{
|
TxPoolLifetimeFlag = &cli.DurationFlag{
|
||||||
Name: "txpool.lifetime",
|
Name: "txpool.lifetime",
|
||||||
Usage: "Maximum amount of time non-executable transaction are queued",
|
Usage: "Maximum amount of time non-executable transaction are queued",
|
||||||
@ -1784,6 +1790,9 @@ func setTxPool(ctx *cli.Context, cfg *legacypool.Config) {
|
|||||||
if ctx.IsSet(TxPoolGlobalQueueFlag.Name) {
|
if ctx.IsSet(TxPoolGlobalQueueFlag.Name) {
|
||||||
cfg.GlobalQueue = ctx.Uint64(TxPoolGlobalQueueFlag.Name)
|
cfg.GlobalQueue = ctx.Uint64(TxPoolGlobalQueueFlag.Name)
|
||||||
}
|
}
|
||||||
|
if ctx.IsSet(TxPoolOverflowPoolSlotsFlag.Name) {
|
||||||
|
cfg.OverflowPoolSlots = ctx.Uint64(TxPoolOverflowPoolSlotsFlag.Name)
|
||||||
|
}
|
||||||
if ctx.IsSet(TxPoolLifetimeFlag.Name) {
|
if ctx.IsSet(TxPoolLifetimeFlag.Name) {
|
||||||
cfg.Lifetime = ctx.Duration(TxPoolLifetimeFlag.Name)
|
cfg.Lifetime = ctx.Duration(TxPoolLifetimeFlag.Name)
|
||||||
}
|
}
|
||||||
@ -1968,9 +1977,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
if ctx.IsSet(EnableTrustProtocolFlag.Name) {
|
if ctx.IsSet(EnableTrustProtocolFlag.Name) {
|
||||||
cfg.EnableTrustProtocol = ctx.IsSet(EnableTrustProtocolFlag.Name)
|
cfg.EnableTrustProtocol = ctx.IsSet(EnableTrustProtocolFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(PipeCommitFlag.Name) {
|
|
||||||
log.Warn("The --pipecommit flag is deprecated and could be removed in the future!")
|
|
||||||
}
|
|
||||||
if ctx.IsSet(RangeLimitFlag.Name) {
|
if ctx.IsSet(RangeLimitFlag.Name) {
|
||||||
cfg.RangeLimit = ctx.Bool(RangeLimitFlag.Name)
|
cfg.RangeLimit = ctx.Bool(RangeLimitFlag.Name)
|
||||||
}
|
}
|
||||||
@ -2305,16 +2311,17 @@ func EnableNodeInfo(poolConfig *legacypool.Config, nodeInfo *p2p.NodeInfo) Setup
|
|||||||
return func() {
|
return func() {
|
||||||
// register node info into metrics
|
// register node info into metrics
|
||||||
metrics.NewRegisteredLabel("node-info", nil).Mark(map[string]interface{}{
|
metrics.NewRegisteredLabel("node-info", nil).Mark(map[string]interface{}{
|
||||||
"Enode": nodeInfo.Enode,
|
"Enode": nodeInfo.Enode,
|
||||||
"ENR": nodeInfo.ENR,
|
"ENR": nodeInfo.ENR,
|
||||||
"ID": nodeInfo.ID,
|
"ID": nodeInfo.ID,
|
||||||
"PriceLimit": poolConfig.PriceLimit,
|
"PriceLimit": poolConfig.PriceLimit,
|
||||||
"PriceBump": poolConfig.PriceBump,
|
"PriceBump": poolConfig.PriceBump,
|
||||||
"AccountSlots": poolConfig.AccountSlots,
|
"AccountSlots": poolConfig.AccountSlots,
|
||||||
"GlobalSlots": poolConfig.GlobalSlots,
|
"GlobalSlots": poolConfig.GlobalSlots,
|
||||||
"AccountQueue": poolConfig.AccountQueue,
|
"AccountQueue": poolConfig.AccountQueue,
|
||||||
"GlobalQueue": poolConfig.GlobalQueue,
|
"GlobalQueue": poolConfig.GlobalQueue,
|
||||||
"Lifetime": poolConfig.Lifetime,
|
"OverflowPoolSlots": poolConfig.OverflowPoolSlots,
|
||||||
|
"Lifetime": poolConfig.Lifetime,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1806,27 +1806,30 @@ func (p *Parlia) getCurrentValidators(blockHash common.Hash, blockNum *big.Int)
|
|||||||
func (p *Parlia) distributeIncoming(val common.Address, state *state.StateDB, header *types.Header, chain core.ChainContext,
|
func (p *Parlia) distributeIncoming(val common.Address, state *state.StateDB, header *types.Header, chain core.ChainContext,
|
||||||
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
|
txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error {
|
||||||
coinbase := header.Coinbase
|
coinbase := header.Coinbase
|
||||||
|
|
||||||
|
doDistributeSysReward := !p.chainConfig.IsKepler(header.Number, header.Time) &&
|
||||||
|
state.GetBalance(common.HexToAddress(systemcontracts.SystemRewardContract)).Cmp(maxSystemBalance) < 0
|
||||||
|
if doDistributeSysReward {
|
||||||
|
balance := state.GetBalance(consensus.SystemAddress)
|
||||||
|
rewards := new(uint256.Int)
|
||||||
|
rewards = rewards.Rsh(balance, systemRewardPercent)
|
||||||
|
if rewards.Cmp(common.U2560) > 0 {
|
||||||
|
state.SetBalance(consensus.SystemAddress, balance.Sub(balance, rewards))
|
||||||
|
state.AddBalance(coinbase, rewards)
|
||||||
|
err := p.distributeToSystem(rewards.ToBig(), state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Trace("distribute to system reward pool", "block hash", header.Hash(), "amount", rewards)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
balance := state.GetBalance(consensus.SystemAddress)
|
balance := state.GetBalance(consensus.SystemAddress)
|
||||||
if balance.Cmp(common.U2560) <= 0 {
|
if balance.Cmp(common.U2560) <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
state.SetBalance(consensus.SystemAddress, common.U2560)
|
state.SetBalance(consensus.SystemAddress, common.U2560)
|
||||||
state.AddBalance(coinbase, balance)
|
state.AddBalance(coinbase, balance)
|
||||||
|
|
||||||
doDistributeSysReward := !p.chainConfig.IsKepler(header.Number, header.Time) &&
|
|
||||||
state.GetBalance(common.HexToAddress(systemcontracts.SystemRewardContract)).Cmp(maxSystemBalance) < 0
|
|
||||||
if doDistributeSysReward {
|
|
||||||
rewards := new(uint256.Int)
|
|
||||||
rewards = rewards.Rsh(balance, systemRewardPercent)
|
|
||||||
if rewards.Cmp(common.U2560) > 0 {
|
|
||||||
err := p.distributeToSystem(rewards.ToBig(), state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Trace("distribute to system reward pool", "block hash", header.Hash(), "amount", rewards)
|
|
||||||
balance = balance.Sub(balance, rewards)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Trace("distribute to validator contract", "block hash", header.Hash(), "amount", balance)
|
log.Trace("distribute to validator contract", "block hash", header.Hash(), "amount", balance)
|
||||||
return p.distributeToValidator(balance.ToBig(), val, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
|
return p.distributeToValidator(balance.ToBig(), val, state, header, chain, txs, receipts, receivedTxs, usedGas, mining)
|
||||||
}
|
}
|
||||||
|
@ -19,9 +19,7 @@ package core
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
@ -29,8 +27,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
const badBlockCacheExpire = 30 * time.Second
|
|
||||||
|
|
||||||
type BlockValidatorOption func(*BlockValidator) *BlockValidator
|
type BlockValidatorOption func(*BlockValidator) *BlockValidator
|
||||||
|
|
||||||
func EnableRemoteVerifyManager(remoteValidator *remoteVerifyManager) BlockValidatorOption {
|
func EnableRemoteVerifyManager(remoteValidator *remoteVerifyManager) BlockValidatorOption {
|
||||||
@ -74,9 +70,6 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
|||||||
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
|
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
|
||||||
return ErrKnownBlock
|
return ErrKnownBlock
|
||||||
}
|
}
|
||||||
if v.bc.isCachedBadBlock(block) {
|
|
||||||
return ErrKnownBadBlock
|
|
||||||
}
|
|
||||||
// Header validity is known at this point. Here we verify that uncles, transactions
|
// Header validity is known at this point. Here we verify that uncles, transactions
|
||||||
// and withdrawals given in the block body match the header.
|
// and withdrawals given in the block body match the header.
|
||||||
header := block.Header()
|
header := block.Header()
|
||||||
@ -192,23 +185,12 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if statedb.IsPipeCommit() {
|
validateFuns = append(validateFuns, func() error {
|
||||||
validateFuns = append(validateFuns, func() error {
|
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
|
||||||
if err := statedb.WaitPipeVerification(); err != nil {
|
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
|
||||||
return err
|
}
|
||||||
}
|
return nil
|
||||||
statedb.CorrectAccountsRoot(common.Hash{})
|
})
|
||||||
statedb.Finalise(v.config.IsEIP158(header.Number))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
validateFuns = append(validateFuns, func() error {
|
|
||||||
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
|
|
||||||
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
validateRes := make(chan error, len(validateFuns))
|
validateRes := make(chan error, len(validateFuns))
|
||||||
for _, f := range validateFuns {
|
for _, f := range validateFuns {
|
||||||
tmpFunc := f
|
tmpFunc := f
|
||||||
|
@ -102,11 +102,10 @@ var (
|
|||||||
|
|
||||||
blockRecvTimeDiffGauge = metrics.NewRegisteredGauge("chain/block/recvtimediff", nil)
|
blockRecvTimeDiffGauge = metrics.NewRegisteredGauge("chain/block/recvtimediff", nil)
|
||||||
|
|
||||||
errStateRootVerificationFailed = errors.New("state root verification failed")
|
errInsertionInterrupted = errors.New("insertion is interrupted")
|
||||||
errInsertionInterrupted = errors.New("insertion is interrupted")
|
errChainStopped = errors.New("blockchain is stopped")
|
||||||
errChainStopped = errors.New("blockchain is stopped")
|
errInvalidOldChain = errors.New("invalid old chain")
|
||||||
errInvalidOldChain = errors.New("invalid old chain")
|
errInvalidNewChain = errors.New("invalid new chain")
|
||||||
errInvalidNewChain = errors.New("invalid new chain")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -116,7 +115,6 @@ const (
|
|||||||
receiptsCacheLimit = 10000
|
receiptsCacheLimit = 10000
|
||||||
sidecarsCacheLimit = 1024
|
sidecarsCacheLimit = 1024
|
||||||
txLookupCacheLimit = 1024
|
txLookupCacheLimit = 1024
|
||||||
maxBadBlockLimit = 16
|
|
||||||
maxFutureBlocks = 256
|
maxFutureBlocks = 256
|
||||||
maxTimeFutureBlocks = 30
|
maxTimeFutureBlocks = 30
|
||||||
TriesInMemory = 128
|
TriesInMemory = 128
|
||||||
@ -126,8 +124,6 @@ const (
|
|||||||
diffLayerFreezerRecheckInterval = 3 * time.Second
|
diffLayerFreezerRecheckInterval = 3 * time.Second
|
||||||
maxDiffForkDist = 11 // Maximum allowed backward distance from the chain head
|
maxDiffForkDist = 11 // Maximum allowed backward distance from the chain head
|
||||||
|
|
||||||
rewindBadBlockInterval = 1 * time.Second
|
|
||||||
|
|
||||||
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
|
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
|
||||||
//
|
//
|
||||||
// Changelog:
|
// Changelog:
|
||||||
@ -294,8 +290,6 @@ type BlockChain struct {
|
|||||||
|
|
||||||
// future blocks are blocks added for later processing
|
// future blocks are blocks added for later processing
|
||||||
futureBlocks *lru.Cache[common.Hash, *types.Block]
|
futureBlocks *lru.Cache[common.Hash, *types.Block]
|
||||||
// Cache for the blocks that failed to pass MPT root verification
|
|
||||||
badBlockCache *lru.Cache[common.Hash, time.Time]
|
|
||||||
|
|
||||||
// trusted diff layers
|
// trusted diff layers
|
||||||
diffLayerCache *exlru.Cache // Cache for the diffLayers
|
diffLayerCache *exlru.Cache // Cache for the diffLayers
|
||||||
@ -316,7 +310,6 @@ type BlockChain struct {
|
|||||||
processor Processor // Block transaction processor interface
|
processor Processor // Block transaction processor interface
|
||||||
forker *ForkChoice
|
forker *ForkChoice
|
||||||
vmConfig vm.Config
|
vmConfig vm.Config
|
||||||
pipeCommit bool
|
|
||||||
|
|
||||||
// monitor
|
// monitor
|
||||||
doubleSignMonitor *monitor.DoubleSignMonitor
|
doubleSignMonitor *monitor.DoubleSignMonitor
|
||||||
@ -378,7 +371,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
|
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
|
||||||
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
|
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
|
||||||
futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks),
|
futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks),
|
||||||
badBlockCache: lru.NewCache[common.Hash, time.Time](maxBadBlockLimit),
|
|
||||||
diffLayerCache: diffLayerCache,
|
diffLayerCache: diffLayerCache,
|
||||||
diffLayerChanCache: diffLayerChanCache,
|
diffLayerChanCache: diffLayerChanCache,
|
||||||
engine: engine,
|
engine: engine,
|
||||||
@ -559,11 +551,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
bc.wg.Add(1)
|
bc.wg.Add(1)
|
||||||
go bc.trustedDiffLayerLoop()
|
go bc.trustedDiffLayerLoop()
|
||||||
}
|
}
|
||||||
if bc.pipeCommit {
|
|
||||||
// check current block and rewind invalid one
|
|
||||||
bc.wg.Add(1)
|
|
||||||
go bc.rewindInvalidHeaderBlockLoop()
|
|
||||||
}
|
|
||||||
|
|
||||||
if bc.doubleSignMonitor != nil {
|
if bc.doubleSignMonitor != nil {
|
||||||
bc.wg.Add(1)
|
bc.wg.Add(1)
|
||||||
@ -817,26 +804,6 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *BlockChain) tryRewindBadBlocks() {
|
|
||||||
if !bc.chainmu.TryLock() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer bc.chainmu.Unlock()
|
|
||||||
block := bc.CurrentBlock()
|
|
||||||
snaps := bc.snaps
|
|
||||||
// Verified and Result is false
|
|
||||||
if snaps != nil && snaps.Snapshot(block.Root) != nil &&
|
|
||||||
snaps.Snapshot(block.Root).Verified() && !snaps.Snapshot(block.Root).WaitAndGetVerifyRes() {
|
|
||||||
// Rewind by one block
|
|
||||||
log.Warn("current block verified failed, rewind to its parent", "height", block.Number.Uint64(), "hash", block.Hash())
|
|
||||||
bc.futureBlocks.Remove(block.Hash())
|
|
||||||
bc.badBlockCache.Add(block.Hash(), time.Now())
|
|
||||||
bc.diffLayerCache.Remove(block.Hash())
|
|
||||||
bc.reportBlock(bc.GetBlockByHash(block.Hash()), nil, errStateRootVerificationFailed)
|
|
||||||
bc.setHeadBeyondRoot(block.Number.Uint64()-1, 0, common.Hash{}, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rewindHashHead implements the logic of rewindHead in the context of hash scheme.
|
// rewindHashHead implements the logic of rewindHead in the context of hash scheme.
|
||||||
func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
|
func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
|
||||||
var (
|
var (
|
||||||
@ -1893,7 +1860,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Commit all cached state changes into underlying memory database.
|
// Commit all cached state changes into underlying memory database.
|
||||||
_, diffLayer, err := state.Commit(block.NumberU64(), bc.tryRewindBadBlocks, tryCommitTrieDB)
|
_, diffLayer, err := state.Commit(block.NumberU64(), tryCommitTrieDB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2269,9 +2236,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Process block using the parent state as reference point
|
// Process block using the parent state as reference point
|
||||||
if bc.pipeCommit {
|
|
||||||
statedb.EnablePipeCommit()
|
|
||||||
}
|
|
||||||
statedb.SetExpectedStateRoot(block.Root())
|
statedb.SetExpectedStateRoot(block.Root())
|
||||||
pstart := time.Now()
|
pstart := time.Now()
|
||||||
statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
|
statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
|
||||||
@ -2889,22 +2853,6 @@ func (bc *BlockChain) updateFutureBlocks() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *BlockChain) rewindInvalidHeaderBlockLoop() {
|
|
||||||
recheck := time.NewTicker(rewindBadBlockInterval)
|
|
||||||
defer func() {
|
|
||||||
recheck.Stop()
|
|
||||||
bc.wg.Done()
|
|
||||||
}()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-recheck.C:
|
|
||||||
bc.tryRewindBadBlocks()
|
|
||||||
case <-bc.quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc *BlockChain) trustedDiffLayerLoop() {
|
func (bc *BlockChain) trustedDiffLayerLoop() {
|
||||||
recheck := time.NewTicker(diffLayerFreezerRecheckInterval)
|
recheck := time.NewTicker(diffLayerFreezerRecheckInterval)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -3042,17 +2990,6 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *BlockChain) isCachedBadBlock(block *types.Block) bool {
|
|
||||||
if timeAt, exist := bc.badBlockCache.Get(block.Hash()); exist {
|
|
||||||
if time.Since(timeAt) >= badBlockCacheExpire {
|
|
||||||
bc.badBlockCache.Remove(block.Hash())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// reportBlock logs a bad block error.
|
// reportBlock logs a bad block error.
|
||||||
// bad block need not save receipts & sidecars.
|
// bad block need not save receipts & sidecars.
|
||||||
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
||||||
@ -3114,11 +3051,6 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
|
|||||||
|
|
||||||
func (bc *BlockChain) TriesInMemory() uint64 { return bc.triesInMemory }
|
func (bc *BlockChain) TriesInMemory() uint64 { return bc.triesInMemory }
|
||||||
|
|
||||||
func EnablePipelineCommit(bc *BlockChain) (*BlockChain, error) {
|
|
||||||
bc.pipeCommit = false
|
|
||||||
return bc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func EnablePersistDiff(limit uint64) BlockChainOption {
|
func EnablePersistDiff(limit uint64) BlockChainOption {
|
||||||
return func(chain *BlockChain) (*BlockChain, error) {
|
return func(chain *BlockChain) (*BlockChain, error) {
|
||||||
chain.diffLayerFreezerBlockLimit = limit
|
chain.diffLayerFreezerBlockLimit = limit
|
||||||
|
@ -237,7 +237,7 @@ func TestFreezeDiffLayer(t *testing.T) {
|
|||||||
// Wait for the buffer to be zero.
|
// Wait for the buffer to be zero.
|
||||||
}
|
}
|
||||||
// Minus one empty block.
|
// Minus one empty block.
|
||||||
if fullBackend.chain.diffQueue.Size() > blockNum-1 && fullBackend.chain.diffQueue.Size() < blockNum-2 {
|
if fullBackend.chain.diffQueue.Size() != blockNum-1 {
|
||||||
t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size())
|
t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -351,12 +351,6 @@ func (bc *BlockChain) HasState(hash common.Hash) bool {
|
|||||||
if bc.NoTries() {
|
if bc.NoTries() {
|
||||||
return bc.snaps != nil && bc.snaps.Snapshot(hash) != nil
|
return bc.snaps != nil && bc.snaps.Snapshot(hash) != nil
|
||||||
}
|
}
|
||||||
if bc.pipeCommit && bc.snaps != nil {
|
|
||||||
// If parent snap is pending on verification, treat it as state exist
|
|
||||||
if s := bc.snaps.Snapshot(hash); s != nil && !s.Verified() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err := bc.stateCache.OpenTrie(hash)
|
_, err := bc.stateCache.OpenTrie(hash)
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
@ -51,8 +51,7 @@ import (
|
|||||||
// So we can deterministically seed different blockchains
|
// So we can deterministically seed different blockchains
|
||||||
var (
|
var (
|
||||||
canonicalSeed = 1
|
canonicalSeed = 1
|
||||||
forkSeed1 = 2
|
forkSeed = 2
|
||||||
forkSeed2 = 3
|
|
||||||
|
|
||||||
TestTriesInMemory = 128
|
TestTriesInMemory = 128
|
||||||
)
|
)
|
||||||
@ -61,19 +60,15 @@ var (
|
|||||||
// chain. Depending on the full flag, it creates either a full block chain or a
|
// chain. Depending on the full flag, it creates either a full block chain or a
|
||||||
// header only chain. The database and genesis specification for block generation
|
// header only chain. The database and genesis specification for block generation
|
||||||
// are also returned in case more test blocks are needed later.
|
// are also returned in case more test blocks are needed later.
|
||||||
func newCanonical(engine consensus.Engine, n int, full bool, scheme string, pipeline bool) (ethdb.Database, *Genesis, *BlockChain, error) {
|
func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (ethdb.Database, *Genesis, *BlockChain, error) {
|
||||||
var (
|
var (
|
||||||
genesis = &Genesis{
|
genesis = &Genesis{
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
Config: params.AllEthashProtocolChanges,
|
Config: params.AllEthashProtocolChanges,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Initialize a fresh chain with only a genesis block
|
// Initialize a fresh chain with only a genesis block
|
||||||
var ops []BlockChainOption
|
var ops []BlockChainOption
|
||||||
if pipeline {
|
|
||||||
ops = append(ops, EnablePipelineCommit)
|
|
||||||
}
|
|
||||||
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil, ops...)
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil, ops...)
|
||||||
// Create and inject the requested chain
|
// Create and inject the requested chain
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
@ -96,53 +91,9 @@ func newGwei(n int64) *big.Int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test fork of length N starting from block i
|
// Test fork of length N starting from block i
|
||||||
func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n int, pipeline bool) {
|
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string) {
|
||||||
// Copy old chain up to #i into a new db
|
// Copy old chain up to #i into a new db
|
||||||
db, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline)
|
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("could not make new canonical in testFork", err)
|
|
||||||
}
|
|
||||||
defer blockchain2.Stop()
|
|
||||||
|
|
||||||
// Assert the chains have the same header/block at #i
|
|
||||||
hash1 := blockchain.GetBlockByNumber(uint64(i)).Hash()
|
|
||||||
hash2 := blockchain2.GetBlockByNumber(uint64(i)).Hash()
|
|
||||||
if hash1 != hash2 {
|
|
||||||
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
|
|
||||||
}
|
|
||||||
// Extend the newly created chain
|
|
||||||
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed1)
|
|
||||||
for idx, block := range blockChainB {
|
|
||||||
block.SetRoot(common.Hash{0: byte(forkSeed1), 19: byte(idx)})
|
|
||||||
}
|
|
||||||
previousBlock := blockchain.CurrentBlock()
|
|
||||||
// Sanity check that the forked chain can be imported into the original
|
|
||||||
if _, err := blockchain.InsertChain(blockChainB); err == nil {
|
|
||||||
t.Fatalf("failed to report insert error")
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(2 * rewindBadBlockInterval)
|
|
||||||
latestBlock := blockchain.CurrentBlock()
|
|
||||||
if latestBlock.Hash() != previousBlock.Hash() || latestBlock.Number.Uint64() != previousBlock.Number.Uint64() {
|
|
||||||
t.Fatalf("rewind do not take effect")
|
|
||||||
}
|
|
||||||
db, _, blockchain3, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("could not make new canonical in testFork", err)
|
|
||||||
}
|
|
||||||
defer blockchain3.Stop()
|
|
||||||
|
|
||||||
blockChainC := makeBlockChain(blockchain3.chainConfig, blockchain3.GetBlockByHash(blockchain3.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed2)
|
|
||||||
|
|
||||||
if _, err := blockchain.InsertChain(blockChainC); err != nil {
|
|
||||||
t.Fatalf("failed to insert forking chain: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test fork of length N starting from block i
|
|
||||||
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string, pipeline bool) {
|
|
||||||
// Copy old chain up to #i into a new db
|
|
||||||
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, pipeline)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("could not make new canonical in testFork", err)
|
t.Fatal("could not make new canonical in testFork", err)
|
||||||
}
|
}
|
||||||
@ -166,12 +117,12 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
|||||||
headerChainB []*types.Header
|
headerChainB []*types.Header
|
||||||
)
|
)
|
||||||
if full {
|
if full {
|
||||||
blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1)
|
blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
|
||||||
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
||||||
t.Fatalf("failed to insert forking chain: %v", err)
|
t.Fatalf("failed to insert forking chain: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1)
|
headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
||||||
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
||||||
t.Fatalf("failed to insert forking chain: %v", err)
|
t.Fatalf("failed to insert forking chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -182,7 +133,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
|||||||
if full {
|
if full {
|
||||||
cur := blockchain.CurrentBlock()
|
cur := blockchain.CurrentBlock()
|
||||||
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
|
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
|
||||||
if err := testBlockChainImport(blockChainB, pipeline, blockchain); err != nil {
|
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
|
||||||
t.Fatalf("failed to import forked block chain: %v", err)
|
t.Fatalf("failed to import forked block chain: %v", err)
|
||||||
}
|
}
|
||||||
last := blockChainB[len(blockChainB)-1]
|
last := blockChainB[len(blockChainB)-1]
|
||||||
@ -202,7 +153,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
|||||||
|
|
||||||
// testBlockChainImport tries to process a chain of blocks, writing them into
|
// testBlockChainImport tries to process a chain of blocks, writing them into
|
||||||
// the database if successful.
|
// the database if successful.
|
||||||
func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *BlockChain) error {
|
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||||
for _, block := range chain {
|
for _, block := range chain {
|
||||||
// Try and process the block
|
// Try and process the block
|
||||||
err := blockchain.engine.VerifyHeader(blockchain, block.Header())
|
err := blockchain.engine.VerifyHeader(blockchain, block.Header())
|
||||||
@ -220,9 +171,6 @@ func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *B
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
statedb.SetExpectedStateRoot(block.Root())
|
statedb.SetExpectedStateRoot(block.Root())
|
||||||
if pipelineCommit {
|
|
||||||
statedb.EnablePipeCommit()
|
|
||||||
}
|
|
||||||
statedb, receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
|
statedb, receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
blockchain.reportBlock(block, receipts, err)
|
blockchain.reportBlock(block, receipts, err)
|
||||||
@ -262,26 +210,13 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockImportVerification(t *testing.T) {
|
|
||||||
length := 5
|
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, true, rawdb.HashScheme, true)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
|
||||||
}
|
|
||||||
defer processor.Stop()
|
|
||||||
// Start fork from current height
|
|
||||||
processor, _ = EnablePipelineCommit(processor)
|
|
||||||
testInvalidStateRootBlockImport(t, processor, length, 10, true)
|
|
||||||
}
|
|
||||||
func TestLastBlock(t *testing.T) {
|
func TestLastBlock(t *testing.T) {
|
||||||
testLastBlock(t, rawdb.HashScheme)
|
testLastBlock(t, rawdb.HashScheme)
|
||||||
testLastBlock(t, rawdb.PathScheme)
|
testLastBlock(t, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLastBlock(t *testing.T, scheme string) {
|
func testLastBlock(t *testing.T, scheme string) {
|
||||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false)
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create pristine chain: %v", err)
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -300,7 +235,7 @@ func testLastBlock(t *testing.T, scheme string) {
|
|||||||
// The chain is reorged to whatever specified.
|
// The chain is reorged to whatever specified.
|
||||||
func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) {
|
func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) {
|
||||||
// Copy old chain up to #i into a new db
|
// Copy old chain up to #i into a new db
|
||||||
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, false)
|
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("could not make new canonical in testFork", err)
|
t.Fatal("could not make new canonical in testFork", err)
|
||||||
}
|
}
|
||||||
@ -321,7 +256,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b
|
|||||||
|
|
||||||
// Extend the newly created chain
|
// Extend the newly created chain
|
||||||
if full {
|
if full {
|
||||||
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1)
|
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
|
||||||
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
||||||
t.Fatalf("failed to insert forking chain: %v", err)
|
t.Fatalf("failed to insert forking chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -332,7 +267,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b
|
|||||||
t.Fatalf("failed to reorg to the given chain")
|
t.Fatalf("failed to reorg to the given chain")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1)
|
headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
||||||
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
||||||
t.Fatalf("failed to insert forking chain: %v", err)
|
t.Fatalf("failed to insert forking chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -348,21 +283,20 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b
|
|||||||
// Tests that given a starting canonical chain of a given size, it can be extended
|
// Tests that given a starting canonical chain of a given size, it can be extended
|
||||||
// with various length chains.
|
// with various length chains.
|
||||||
func TestExtendCanonicalHeaders(t *testing.T) {
|
func TestExtendCanonicalHeaders(t *testing.T) {
|
||||||
testExtendCanonical(t, false, rawdb.HashScheme, false)
|
testExtendCanonical(t, false, rawdb.HashScheme)
|
||||||
testExtendCanonical(t, false, rawdb.PathScheme, false)
|
testExtendCanonical(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExtendCanonicalBlocks(t *testing.T) {
|
func TestExtendCanonicalBlocks(t *testing.T) {
|
||||||
testExtendCanonical(t, true, rawdb.HashScheme, false)
|
testExtendCanonical(t, true, rawdb.HashScheme)
|
||||||
testExtendCanonical(t, true, rawdb.PathScheme, false)
|
testExtendCanonical(t, true, rawdb.PathScheme)
|
||||||
testExtendCanonical(t, true, rawdb.HashScheme, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testExtendCanonical(t *testing.T, full bool, scheme string) {
|
||||||
length := 5
|
length := 5
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
// Make first chain starting from genesis
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -375,10 +309,10 @@ func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Start fork from current height
|
// Start fork from current height
|
||||||
testFork(t, processor, length, 1, full, better, scheme, pipeline)
|
testFork(t, processor, length, 1, full, better, scheme)
|
||||||
testFork(t, processor, length, 2, full, better, scheme, pipeline)
|
testFork(t, processor, length, 2, full, better, scheme)
|
||||||
testFork(t, processor, length, 5, full, better, scheme, pipeline)
|
testFork(t, processor, length, 5, full, better, scheme)
|
||||||
testFork(t, processor, length, 10, full, better, scheme, pipeline)
|
testFork(t, processor, length, 10, full, better, scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that given a starting canonical chain of a given size, it can be extended
|
// Tests that given a starting canonical chain of a given size, it can be extended
|
||||||
@ -396,7 +330,7 @@ func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) {
|
|||||||
length := 5
|
length := 5
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
// Make first chain starting from genesis
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -409,20 +343,19 @@ func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) {
|
|||||||
// Tests that given a starting canonical chain of a given size, creating shorter
|
// Tests that given a starting canonical chain of a given size, creating shorter
|
||||||
// forks do not take canonical ownership.
|
// forks do not take canonical ownership.
|
||||||
func TestShorterForkHeaders(t *testing.T) {
|
func TestShorterForkHeaders(t *testing.T) {
|
||||||
testShorterFork(t, false, rawdb.HashScheme, false)
|
testShorterFork(t, false, rawdb.HashScheme)
|
||||||
testShorterFork(t, false, rawdb.PathScheme, false)
|
testShorterFork(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
func TestShorterForkBlocks(t *testing.T) {
|
func TestShorterForkBlocks(t *testing.T) {
|
||||||
testShorterFork(t, true, rawdb.HashScheme, false)
|
testShorterFork(t, true, rawdb.HashScheme)
|
||||||
testShorterFork(t, true, rawdb.PathScheme, false)
|
testShorterFork(t, true, rawdb.PathScheme)
|
||||||
testShorterFork(t, true, rawdb.HashScheme, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testShorterFork(t *testing.T, full bool, scheme string) {
|
||||||
length := 10
|
length := 10
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
// Make first chain starting from genesis
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -435,12 +368,12 @@ func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Sum of numbers must be less than `length` for this to be a shorter fork
|
// Sum of numbers must be less than `length` for this to be a shorter fork
|
||||||
testFork(t, processor, 0, 3, full, worse, scheme, pipeline)
|
testFork(t, processor, 0, 3, full, worse, scheme)
|
||||||
testFork(t, processor, 0, 7, full, worse, scheme, pipeline)
|
testFork(t, processor, 0, 7, full, worse, scheme)
|
||||||
testFork(t, processor, 1, 1, full, worse, scheme, pipeline)
|
testFork(t, processor, 1, 1, full, worse, scheme)
|
||||||
testFork(t, processor, 1, 7, full, worse, scheme, pipeline)
|
testFork(t, processor, 1, 7, full, worse, scheme)
|
||||||
testFork(t, processor, 5, 3, full, worse, scheme, pipeline)
|
testFork(t, processor, 5, 3, full, worse, scheme)
|
||||||
testFork(t, processor, 5, 4, full, worse, scheme, pipeline)
|
testFork(t, processor, 5, 4, full, worse, scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that given a starting canonical chain of a given size, creating shorter
|
// Tests that given a starting canonical chain of a given size, creating shorter
|
||||||
@ -458,7 +391,7 @@ func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) {
|
|||||||
length := 10
|
length := 10
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
// Make first chain starting from genesis
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -475,20 +408,19 @@ func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) {
|
|||||||
// Tests that given a starting canonical chain of a given size, creating longer
|
// Tests that given a starting canonical chain of a given size, creating longer
|
||||||
// forks do take canonical ownership.
|
// forks do take canonical ownership.
|
||||||
func TestLongerForkHeaders(t *testing.T) {
|
func TestLongerForkHeaders(t *testing.T) {
|
||||||
testLongerFork(t, false, rawdb.HashScheme, false)
|
testLongerFork(t, false, rawdb.HashScheme)
|
||||||
testLongerFork(t, false, rawdb.PathScheme, false)
|
testLongerFork(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
func TestLongerForkBlocks(t *testing.T) {
|
func TestLongerForkBlocks(t *testing.T) {
|
||||||
testLongerFork(t, true, rawdb.HashScheme, false)
|
testLongerFork(t, true, rawdb.HashScheme)
|
||||||
testLongerFork(t, true, rawdb.PathScheme, false)
|
testLongerFork(t, true, rawdb.PathScheme)
|
||||||
testLongerFork(t, true, rawdb.HashScheme, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLongerFork(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testLongerFork(t *testing.T, full bool, scheme string) {
|
||||||
length := 10
|
length := 10
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
// Make first chain starting from genesis
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -517,7 +449,7 @@ func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) {
|
|||||||
length := 10
|
length := 10
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
// Make first chain starting from genesis
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -534,20 +466,19 @@ func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) {
|
|||||||
// Tests that given a starting canonical chain of a given size, creating equal
|
// Tests that given a starting canonical chain of a given size, creating equal
|
||||||
// forks do take canonical ownership.
|
// forks do take canonical ownership.
|
||||||
func TestEqualForkHeaders(t *testing.T) {
|
func TestEqualForkHeaders(t *testing.T) {
|
||||||
testEqualFork(t, false, rawdb.HashScheme, false)
|
testEqualFork(t, false, rawdb.HashScheme)
|
||||||
testEqualFork(t, false, rawdb.PathScheme, false)
|
testEqualFork(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
func TestEqualForkBlocks(t *testing.T) {
|
func TestEqualForkBlocks(t *testing.T) {
|
||||||
testEqualFork(t, true, rawdb.HashScheme, false)
|
testEqualFork(t, true, rawdb.HashScheme)
|
||||||
testEqualFork(t, true, rawdb.PathScheme, false)
|
testEqualFork(t, true, rawdb.PathScheme)
|
||||||
testEqualFork(t, true, rawdb.HashScheme, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testEqualFork(t *testing.T, full bool, scheme string) {
|
||||||
length := 10
|
length := 10
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
// Make first chain starting from genesis
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline)
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -560,12 +491,12 @@ func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Sum of numbers must be equal to `length` for this to be an equal fork
|
// Sum of numbers must be equal to `length` for this to be an equal fork
|
||||||
testFork(t, processor, 0, 10, full, equal, scheme, pipeline)
|
testFork(t, processor, 0, 10, full, equal, scheme)
|
||||||
testFork(t, processor, 1, 9, full, equal, scheme, pipeline)
|
testFork(t, processor, 1, 9, full, equal, scheme)
|
||||||
testFork(t, processor, 2, 8, full, equal, scheme, pipeline)
|
testFork(t, processor, 2, 8, full, equal, scheme)
|
||||||
testFork(t, processor, 5, 5, full, equal, scheme, pipeline)
|
testFork(t, processor, 5, 5, full, equal, scheme)
|
||||||
testFork(t, processor, 6, 4, full, equal, scheme, pipeline)
|
testFork(t, processor, 6, 4, full, equal, scheme)
|
||||||
testFork(t, processor, 9, 1, full, equal, scheme, pipeline)
|
testFork(t, processor, 9, 1, full, equal, scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that given a starting canonical chain of a given size, creating equal
|
// Tests that given a starting canonical chain of a given size, creating equal
|
||||||
@ -583,7 +514,7 @@ func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) {
|
|||||||
length := 10
|
length := 10
|
||||||
|
|
||||||
// Make first chain starting from genesis
|
// Make first chain starting from genesis
|
||||||
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false)
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -599,18 +530,17 @@ func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) {
|
|||||||
|
|
||||||
// Tests that chains missing links do not get accepted by the processor.
|
// Tests that chains missing links do not get accepted by the processor.
|
||||||
func TestBrokenHeaderChain(t *testing.T) {
|
func TestBrokenHeaderChain(t *testing.T) {
|
||||||
testBrokenChain(t, false, rawdb.HashScheme, false)
|
testBrokenChain(t, false, rawdb.HashScheme)
|
||||||
testBrokenChain(t, false, rawdb.PathScheme, false)
|
testBrokenChain(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
func TestBrokenBlockChain(t *testing.T) {
|
func TestBrokenBlockChain(t *testing.T) {
|
||||||
testBrokenChain(t, true, rawdb.HashScheme, false)
|
testBrokenChain(t, true, rawdb.HashScheme)
|
||||||
testBrokenChain(t, true, rawdb.PathScheme, false)
|
testBrokenChain(t, true, rawdb.PathScheme)
|
||||||
testBrokenChain(t, true, rawdb.HashScheme, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testBrokenChain(t *testing.T, full bool, scheme string) {
|
||||||
// Make chain starting from genesis
|
// Make chain starting from genesis
|
||||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme, pipeline)
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make new canonical chain: %v", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -618,12 +548,12 @@ func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) {
|
|||||||
|
|
||||||
// Create a forked chain, and try to insert with a missing link
|
// Create a forked chain, and try to insert with a missing link
|
||||||
if full {
|
if full {
|
||||||
chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed1)[1:]
|
chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
|
||||||
if err := testBlockChainImport(chain, pipeline, blockchain); err == nil {
|
if err := testBlockChainImport(chain, blockchain); err == nil {
|
||||||
t.Errorf("broken block chain not reported")
|
t.Errorf("broken block chain not reported")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed1)[1:]
|
chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
|
||||||
if err := testHeaderChainImport(chain, blockchain); err == nil {
|
if err := testHeaderChainImport(chain, blockchain); err == nil {
|
||||||
t.Errorf("broken header chain not reported")
|
t.Errorf("broken header chain not reported")
|
||||||
}
|
}
|
||||||
@ -633,32 +563,30 @@ func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) {
|
|||||||
// Tests that reorganising a long difficult chain after a short easy one
|
// Tests that reorganising a long difficult chain after a short easy one
|
||||||
// overwrites the canonical numbers and links in the database.
|
// overwrites the canonical numbers and links in the database.
|
||||||
func TestReorgLongHeaders(t *testing.T) {
|
func TestReorgLongHeaders(t *testing.T) {
|
||||||
testReorgLong(t, false, rawdb.HashScheme, false)
|
testReorgLong(t, false, rawdb.HashScheme)
|
||||||
testReorgLong(t, false, rawdb.PathScheme, false)
|
testReorgLong(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
func TestReorgLongBlocks(t *testing.T) {
|
func TestReorgLongBlocks(t *testing.T) {
|
||||||
testReorgLong(t, true, rawdb.HashScheme, false)
|
testReorgLong(t, true, rawdb.HashScheme)
|
||||||
testReorgLong(t, true, rawdb.PathScheme, false)
|
testReorgLong(t, true, rawdb.PathScheme)
|
||||||
testReorgLong(t, true, rawdb.HashScheme, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReorgLong(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testReorgLong(t *testing.T, full bool, scheme string) {
|
||||||
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme, pipeline)
|
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that reorganising a short difficult chain after a long easy one
|
// Tests that reorganising a short difficult chain after a long easy one
|
||||||
// overwrites the canonical numbers and links in the database.
|
// overwrites the canonical numbers and links in the database.
|
||||||
func TestReorgShortHeaders(t *testing.T) {
|
func TestReorgShortHeaders(t *testing.T) {
|
||||||
testReorgShort(t, false, rawdb.HashScheme, false)
|
testReorgShort(t, false, rawdb.HashScheme)
|
||||||
testReorgShort(t, false, rawdb.PathScheme, false)
|
testReorgShort(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
func TestReorgShortBlocks(t *testing.T) {
|
func TestReorgShortBlocks(t *testing.T) {
|
||||||
testReorgShort(t, true, rawdb.HashScheme, false)
|
testReorgShort(t, true, rawdb.HashScheme)
|
||||||
testReorgShort(t, true, rawdb.PathScheme, false)
|
testReorgShort(t, true, rawdb.PathScheme)
|
||||||
testReorgShort(t, true, rawdb.HashScheme, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testReorgShort(t *testing.T, full bool, scheme string) {
|
||||||
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
|
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
|
||||||
// we need a fairly long chain of blocks with different difficulties for a short
|
// we need a fairly long chain of blocks with different difficulties for a short
|
||||||
// one to become heavier than a long one. The 96 is an empirical value.
|
// one to become heavier than a long one. The 96 is an empirical value.
|
||||||
@ -670,12 +598,12 @@ func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) {
|
|||||||
for i := 0; i < len(diff); i++ {
|
for i := 0; i < len(diff); i++ {
|
||||||
diff[i] = -9
|
diff[i] = -9
|
||||||
}
|
}
|
||||||
testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme, pipeline)
|
testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string, pipeline bool) {
|
func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string) {
|
||||||
// Create a pristine chain and database
|
// Create a pristine chain and database
|
||||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create pristine chain: %v", err)
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -744,19 +672,18 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme
|
|||||||
|
|
||||||
// Tests that the insertion functions detect banned hashes.
|
// Tests that the insertion functions detect banned hashes.
|
||||||
func TestBadHeaderHashes(t *testing.T) {
|
func TestBadHeaderHashes(t *testing.T) {
|
||||||
testBadHashes(t, false, rawdb.HashScheme, false)
|
testBadHashes(t, false, rawdb.HashScheme)
|
||||||
testBadHashes(t, false, rawdb.PathScheme, false)
|
testBadHashes(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBadBlockHashes(t *testing.T) {
|
func TestBadBlockHashes(t *testing.T) {
|
||||||
testBadHashes(t, true, rawdb.HashScheme, false)
|
testBadHashes(t, true, rawdb.HashScheme)
|
||||||
testBadHashes(t, true, rawdb.HashScheme, true)
|
testBadHashes(t, true, rawdb.PathScheme)
|
||||||
testBadHashes(t, true, rawdb.PathScheme, false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testBadHashes(t *testing.T, full bool, scheme string) {
|
||||||
// Create a pristine chain and database
|
// Create a pristine chain and database
|
||||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create pristine chain: %v", err)
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -786,18 +713,17 @@ func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) {
|
|||||||
// Tests that bad hashes are detected on boot, and the chain rolled back to a
|
// Tests that bad hashes are detected on boot, and the chain rolled back to a
|
||||||
// good state prior to the bad hash.
|
// good state prior to the bad hash.
|
||||||
func TestReorgBadHeaderHashes(t *testing.T) {
|
func TestReorgBadHeaderHashes(t *testing.T) {
|
||||||
testReorgBadHashes(t, false, rawdb.HashScheme, false)
|
testReorgBadHashes(t, false, rawdb.HashScheme)
|
||||||
testReorgBadHashes(t, false, rawdb.PathScheme, false)
|
testReorgBadHashes(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
func TestReorgBadBlockHashes(t *testing.T) {
|
func TestReorgBadBlockHashes(t *testing.T) {
|
||||||
testReorgBadHashes(t, true, rawdb.HashScheme, false)
|
testReorgBadHashes(t, true, rawdb.HashScheme)
|
||||||
testReorgBadHashes(t, true, rawdb.HashScheme, true)
|
testReorgBadHashes(t, true, rawdb.PathScheme)
|
||||||
testReorgBadHashes(t, true, rawdb.PathScheme, false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testReorgBadHashes(t *testing.T, full bool, scheme string) {
|
||||||
// Create a pristine chain and database
|
// Create a pristine chain and database
|
||||||
genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
|
genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create pristine chain: %v", err)
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -848,19 +774,18 @@ func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) {
|
|||||||
|
|
||||||
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
||||||
func TestHeadersInsertNonceError(t *testing.T) {
|
func TestHeadersInsertNonceError(t *testing.T) {
|
||||||
testInsertNonceError(t, false, rawdb.HashScheme, false)
|
testInsertNonceError(t, false, rawdb.HashScheme)
|
||||||
testInsertNonceError(t, false, rawdb.PathScheme, false)
|
testInsertNonceError(t, false, rawdb.PathScheme)
|
||||||
}
|
}
|
||||||
func TestBlocksInsertNonceError(t *testing.T) {
|
func TestBlocksInsertNonceError(t *testing.T) {
|
||||||
testInsertNonceError(t, true, rawdb.HashScheme, false)
|
testInsertNonceError(t, true, rawdb.HashScheme)
|
||||||
testInsertNonceError(t, true, rawdb.HashScheme, true)
|
testInsertNonceError(t, true, rawdb.PathScheme)
|
||||||
testInsertNonceError(t, true, rawdb.PathScheme, false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testInsertNonceError(t *testing.T, full bool, scheme string, pipeline bool) {
|
func testInsertNonceError(t *testing.T, full bool, scheme string) {
|
||||||
doTest := func(i int) {
|
doTest := func(i int) {
|
||||||
// Create a pristine chain and database
|
// Create a pristine chain and database
|
||||||
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline)
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create pristine chain: %v", err)
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
}
|
}
|
||||||
@ -1611,7 +1536,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testCanonicalBlockRetrieval(t *testing.T, scheme string) {
|
func testCanonicalBlockRetrieval(t *testing.T, scheme string) {
|
||||||
_, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false)
|
_, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create pristine chain: %v", err)
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -39,9 +39,6 @@ var (
|
|||||||
|
|
||||||
// ErrCurrentBlockNotFound is returned when current block not found.
|
// ErrCurrentBlockNotFound is returned when current block not found.
|
||||||
ErrCurrentBlockNotFound = errors.New("current block not found")
|
ErrCurrentBlockNotFound = errors.New("current block not found")
|
||||||
|
|
||||||
// ErrKnownBadBlock is return when the block is a known bad block
|
|
||||||
ErrKnownBadBlock = errors.New("already known bad block")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// List of evm-call-message pre-checking errors. All state transition messages will
|
// List of evm-call-message pre-checking errors. All state transition messages will
|
||||||
|
@ -217,7 +217,8 @@ func (e *GenesisMismatchError) Error() string {
|
|||||||
// Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes.
|
// Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes.
|
||||||
type ChainOverrides struct {
|
type ChainOverrides struct {
|
||||||
OverridePassedForkTime *uint64
|
OverridePassedForkTime *uint64
|
||||||
OverrideBohr *uint64
|
OverridePascal *uint64
|
||||||
|
OverridePrague *uint64
|
||||||
OverrideVerkle *uint64
|
OverrideVerkle *uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,9 +253,13 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
|
|||||||
config.CancunTime = overrides.OverridePassedForkTime
|
config.CancunTime = overrides.OverridePassedForkTime
|
||||||
config.HaberTime = overrides.OverridePassedForkTime
|
config.HaberTime = overrides.OverridePassedForkTime
|
||||||
config.HaberFixTime = overrides.OverridePassedForkTime
|
config.HaberFixTime = overrides.OverridePassedForkTime
|
||||||
|
config.BohrTime = overrides.OverridePassedForkTime
|
||||||
}
|
}
|
||||||
if overrides != nil && overrides.OverrideBohr != nil {
|
if overrides != nil && overrides.OverridePascal != nil {
|
||||||
config.BohrTime = overrides.OverrideBohr
|
config.PascalTime = overrides.OverridePascal
|
||||||
|
}
|
||||||
|
if overrides != nil && overrides.OverridePrague != nil {
|
||||||
|
config.PragueTime = overrides.OverridePrague
|
||||||
}
|
}
|
||||||
if overrides != nil && overrides.OverrideVerkle != nil {
|
if overrides != nil && overrides.OverrideVerkle != nil {
|
||||||
config.VerkleTime = overrides.OverrideVerkle
|
config.VerkleTime = overrides.OverrideVerkle
|
||||||
|
@ -119,9 +119,6 @@ type diffLayer struct {
|
|||||||
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||||
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
||||||
|
|
||||||
verifiedCh chan struct{} // the difflayer is verified when verifiedCh is nil or closed
|
|
||||||
valid bool // mark the difflayer is valid or not.
|
|
||||||
|
|
||||||
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
|
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
|
||||||
|
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
@ -145,7 +142,7 @@ func storageBloomHash(h0, h1 common.Hash) uint64 {
|
|||||||
|
|
||||||
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
||||||
// level persistent database or a hierarchical diff already.
|
// level persistent database or a hierarchical diff already.
|
||||||
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
|
func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||||
// Create the new layer with some pre-allocated data segments
|
// Create the new layer with some pre-allocated data segments
|
||||||
dl := &diffLayer{
|
dl := &diffLayer{
|
||||||
parent: parent,
|
parent: parent,
|
||||||
@ -154,7 +151,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
|
|||||||
accountData: accounts,
|
accountData: accounts,
|
||||||
storageData: storage,
|
storageData: storage,
|
||||||
storageList: make(map[common.Hash][]common.Hash),
|
storageList: make(map[common.Hash][]common.Hash),
|
||||||
verifiedCh: verified,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch parent := parent.(type) {
|
switch parent := parent.(type) {
|
||||||
@ -236,39 +232,6 @@ func (dl *diffLayer) Root() common.Hash {
|
|||||||
return dl.root
|
return dl.root
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitAndGetVerifyRes will wait until the diff layer been verified and return the verification result
|
|
||||||
func (dl *diffLayer) WaitAndGetVerifyRes() bool {
|
|
||||||
if dl.verifiedCh == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
<-dl.verifiedCh
|
|
||||||
return dl.valid
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *diffLayer) MarkValid() {
|
|
||||||
dl.valid = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Represent whether the difflayer is been verified, does not means it is a valid or invalid difflayer
|
|
||||||
func (dl *diffLayer) Verified() bool {
|
|
||||||
if dl.verifiedCh == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-dl.verifiedCh:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *diffLayer) CorrectAccounts(accounts map[common.Hash][]byte) {
|
|
||||||
dl.lock.Lock()
|
|
||||||
defer dl.lock.Unlock()
|
|
||||||
|
|
||||||
dl.accountData = accounts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parent returns the subsequent layer of a diff layer.
|
// Parent returns the subsequent layer of a diff layer.
|
||||||
func (dl *diffLayer) Parent() snapshot {
|
func (dl *diffLayer) Parent() snapshot {
|
||||||
dl.lock.RLock()
|
dl.lock.RLock()
|
||||||
@ -467,8 +430,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
|
|||||||
|
|
||||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||||
// the specified data items.
|
// the specified data items.
|
||||||
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
|
func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||||
return newDiffLayer(dl, blockRoot, destructs, accounts, storage, verified)
|
return newDiffLayer(dl, blockRoot, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// flatten pushes all data from this point downwards, flattening everything into
|
// flatten pushes all data from this point downwards, flattening everything into
|
||||||
|
@ -80,11 +80,11 @@ func TestMergeBasics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Add some (identical) layers on top
|
// Add some (identical) layers on top
|
||||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||||
child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||||
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||||
// And flatten
|
// And flatten
|
||||||
merged := (child.flatten()).(*diffLayer)
|
merged := (child.flatten()).(*diffLayer)
|
||||||
|
|
||||||
@ -152,13 +152,13 @@ func TestMergeDelete(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Add some flipAccs-flopping layers on top
|
// Add some flipAccs-flopping layers on top
|
||||||
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage, nil)
|
parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||||
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
|
child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
|
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||||
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
|
child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
|
||||||
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
|
child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
|
||||||
|
|
||||||
if data, _ := child.Account(h1); data == nil {
|
if data, _ := child.Account(h1); data == nil {
|
||||||
t.Errorf("last diff layer: expected %x account to be non-nil", h1)
|
t.Errorf("last diff layer: expected %x account to be non-nil", h1)
|
||||||
@ -210,7 +210,7 @@ func TestInsertAndMerge(t *testing.T) {
|
|||||||
accounts = make(map[common.Hash][]byte)
|
accounts = make(map[common.Hash][]byte)
|
||||||
storage = make(map[common.Hash]map[common.Hash][]byte)
|
storage = make(map[common.Hash]map[common.Hash][]byte)
|
||||||
)
|
)
|
||||||
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage, nil)
|
parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
var (
|
var (
|
||||||
@ -221,7 +221,7 @@ func TestInsertAndMerge(t *testing.T) {
|
|||||||
accounts[acc] = randomAccount()
|
accounts[acc] = randomAccount()
|
||||||
storage[acc] = make(map[common.Hash][]byte)
|
storage[acc] = make(map[common.Hash][]byte)
|
||||||
storage[acc][slot] = []byte{0x01}
|
storage[acc][slot] = []byte{0x01}
|
||||||
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
// And flatten
|
// And flatten
|
||||||
merged := (child.flatten()).(*diffLayer)
|
merged := (child.flatten()).(*diffLayer)
|
||||||
@ -257,7 +257,7 @@ func BenchmarkSearch(b *testing.B) {
|
|||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
accounts[randomHash()] = randomAccount()
|
accounts[randomHash()] = randomAccount()
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
var layer snapshot
|
var layer snapshot
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
@ -299,7 +299,7 @@ func BenchmarkSearchSlot(b *testing.B) {
|
|||||||
accStorage[randomHash()] = value
|
accStorage[randomHash()] = value
|
||||||
storage[accountKey] = accStorage
|
storage[accountKey] = accStorage
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
var layer snapshot
|
var layer snapshot
|
||||||
layer = emptyLayer()
|
layer = emptyLayer()
|
||||||
@ -336,7 +336,7 @@ func BenchmarkFlatten(b *testing.B) {
|
|||||||
}
|
}
|
||||||
storage[accountKey] = accStorage
|
storage[accountKey] = accStorage
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@ -385,7 +385,7 @@ func BenchmarkJournal(b *testing.B) {
|
|||||||
}
|
}
|
||||||
storage[accountKey] = accStorage
|
storage[accountKey] = accStorage
|
||||||
}
|
}
|
||||||
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
|
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
layer := snapshot(emptyLayer())
|
layer := snapshot(emptyLayer())
|
||||||
for i := 1; i < 128; i++ {
|
for i := 1; i < 128; i++ {
|
||||||
|
@ -60,19 +60,6 @@ func (dl *diskLayer) Root() common.Hash {
|
|||||||
return dl.root
|
return dl.root
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *diskLayer) WaitAndGetVerifyRes() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *diskLayer) MarkValid() {}
|
|
||||||
|
|
||||||
func (dl *diskLayer) Verified() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dl *diskLayer) CorrectAccounts(map[common.Hash][]byte) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parent always returns nil as there's no layer below the disk.
|
// Parent always returns nil as there's no layer below the disk.
|
||||||
func (dl *diskLayer) Parent() snapshot {
|
func (dl *diskLayer) Parent() snapshot {
|
||||||
return nil
|
return nil
|
||||||
@ -191,6 +178,6 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
|
|||||||
// Update creates a new layer on top of the existing snapshot diff tree with
|
// Update creates a new layer on top of the existing snapshot diff tree with
|
||||||
// the specified data items. Note, the maps are retained by the method to avoid
|
// the specified data items. Note, the maps are retained by the method to avoid
|
||||||
// copying everything.
|
// copying everything.
|
||||||
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
|
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
|
||||||
return newDiffLayer(dl, blockHash, destructs, accounts, storage, verified)
|
return newDiffLayer(dl, blockHash, destructs, accounts, storage)
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ func TestDiskMerge(t *testing.T) {
|
|||||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||||
conDelNoCache: {conDelNoCacheSlot: nil},
|
conDelNoCache: {conDelNoCacheSlot: nil},
|
||||||
conDelCache: {conDelCacheSlot: nil},
|
conDelCache: {conDelCacheSlot: nil},
|
||||||
}, nil); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||||
@ -353,7 +353,7 @@ func TestDiskPartialMerge(t *testing.T) {
|
|||||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])},
|
||||||
conDelNoCache: {conDelNoCacheSlot: nil},
|
conDelNoCache: {conDelNoCacheSlot: nil},
|
||||||
conDelCache: {conDelCacheSlot: nil},
|
conDelCache: {conDelCacheSlot: nil},
|
||||||
}, nil); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
|
t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
|
||||||
}
|
}
|
||||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||||
@ -464,7 +464,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
|
|||||||
// Modify or delete some accounts, flatten everything onto disk
|
// Modify or delete some accounts, flatten everything onto disk
|
||||||
if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
|
if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
|
||||||
accTwo: accTwo[:],
|
accTwo: accTwo[:],
|
||||||
}, nil, nil); err != nil {
|
}, nil); err != nil {
|
||||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Cap(diffRoot, 0); err != nil {
|
if err := snaps.Cap(diffRoot, 0); err != nil {
|
||||||
@ -484,7 +484,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
|
|||||||
accThree: accThree.Bytes(),
|
accThree: accThree.Bytes(),
|
||||||
}, map[common.Hash]map[common.Hash][]byte{
|
}, map[common.Hash]map[common.Hash][]byte{
|
||||||
accThree: {accThreeSlot: accThreeSlot.Bytes()},
|
accThree: {accThreeSlot: accThreeSlot.Bytes()},
|
||||||
}, nil); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("failed to update snapshot tree: %v", err)
|
t.Fatalf("failed to update snapshot tree: %v", err)
|
||||||
}
|
}
|
||||||
diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
|
diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
|
||||||
|
@ -54,7 +54,7 @@ func TestAccountIteratorBasics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Add some (identical) layers on top
|
// Add some (identical) layers on top
|
||||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
|
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
|
||||||
it := diffLayer.AccountIterator(common.Hash{})
|
it := diffLayer.AccountIterator(common.Hash{})
|
||||||
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ func TestStorageIteratorBasics(t *testing.T) {
|
|||||||
nilStorage[h] = nilstorage
|
nilStorage[h] = nilstorage
|
||||||
}
|
}
|
||||||
// Add some (identical) layers on top
|
// Add some (identical) layers on top
|
||||||
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage), nil)
|
diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage))
|
||||||
for account := range accounts {
|
for account := range accounts {
|
||||||
it, _ := diffLayer.StorageIterator(account, common.Hash{})
|
it, _ := diffLayer.StorageIterator(account, common.Hash{})
|
||||||
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
|
||||||
@ -223,13 +223,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
|
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
|
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
|
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||||
|
|
||||||
// Verify the single and multi-layer iterators
|
// Verify the single and multi-layer iterators
|
||||||
head := snaps.Snapshot(common.HexToHash("0x04"))
|
head := snaps.Snapshot(common.HexToHash("0x04"))
|
||||||
@ -270,13 +270,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
|
||||||
|
|
||||||
// Verify the single and multi-layer iterators
|
// Verify the single and multi-layer iterators
|
||||||
head := snaps.Snapshot(common.HexToHash("0x04"))
|
head := snaps.Snapshot(common.HexToHash("0x04"))
|
||||||
@ -354,14 +354,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Assemble a stack of snapshots from the account layers
|
// Assemble a stack of snapshots from the account layers
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil, nil)
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil)
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil, nil)
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil)
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil, nil)
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil)
|
||||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil, nil)
|
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil)
|
||||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil, nil)
|
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil)
|
||||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil, nil)
|
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil)
|
||||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil, nil)
|
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil)
|
||||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil, nil)
|
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
|
||||||
|
|
||||||
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
|
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
|
||||||
head := snaps.Snapshot(common.HexToHash("0x09"))
|
head := snaps.Snapshot(common.HexToHash("0x09"))
|
||||||
@ -453,14 +453,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Assemble a stack of snapshots from the account layers
|
// Assemble a stack of snapshots from the account layers
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a), nil)
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b), nil)
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c), nil)
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
|
||||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d), nil)
|
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
|
||||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil)
|
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
|
||||||
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil)
|
snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
|
||||||
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g), nil)
|
snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
|
||||||
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h), nil)
|
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
|
||||||
|
|
||||||
it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
head := snaps.Snapshot(common.HexToHash("0x09"))
|
head := snaps.Snapshot(common.HexToHash("0x09"))
|
||||||
@ -523,7 +523,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i := 1; i < 128; i++ {
|
for i := 1; i < 128; i++ {
|
||||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil)
|
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
|
||||||
}
|
}
|
||||||
// Iterate the entire stack and ensure everything is hit only once
|
// Iterate the entire stack and ensure everything is hit only once
|
||||||
head := snaps.Snapshot(common.HexToHash("0x80"))
|
head := snaps.Snapshot(common.HexToHash("0x80"))
|
||||||
@ -568,13 +568,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Create a stack of diffs on top
|
// Create a stack of diffs on top
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
|
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
|
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
|
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||||
|
|
||||||
// Create an iterator and flatten the data from underneath it
|
// Create an iterator and flatten the data from underneath it
|
||||||
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
|
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
|
||||||
@ -599,13 +599,13 @@ func TestAccountIteratorSeek(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
|
randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
|
randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||||
randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
|
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
|
||||||
|
|
||||||
// Account set is now
|
// Account set is now
|
||||||
// 02: aa, ee, f0, ff
|
// 02: aa, ee, f0, ff
|
||||||
@ -663,13 +663,13 @@ func TestStorageIteratorSeek(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
|
||||||
|
|
||||||
// Account set is now
|
// Account set is now
|
||||||
// 02: 01, 03, 05
|
// 02: 01, 03, 05
|
||||||
@ -726,17 +726,17 @@ func TestAccountIteratorDeletions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"),
|
||||||
nil, randomAccountSet("0x11", "0x22", "0x33"), nil, nil)
|
nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
|
||||||
|
|
||||||
deleted := common.HexToHash("0x22")
|
deleted := common.HexToHash("0x22")
|
||||||
destructed := map[common.Hash]struct{}{
|
destructed := map[common.Hash]struct{}{
|
||||||
deleted: {},
|
deleted: {},
|
||||||
}
|
}
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
|
||||||
destructed, randomAccountSet("0x11", "0x33"), nil, nil)
|
destructed, randomAccountSet("0x11", "0x33"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"),
|
||||||
nil, randomAccountSet("0x33", "0x44", "0x55"), nil, nil)
|
nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
|
||||||
|
|
||||||
// The output should be 11,33,44,55
|
// The output should be 11,33,44,55
|
||||||
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
|
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
|
||||||
@ -772,10 +772,10 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Stack three diff layers on top with various overlaps
|
// Stack three diff layers on top with various overlaps
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
|
||||||
|
|
||||||
// The output should be 02,04,05,06
|
// The output should be 02,04,05,06
|
||||||
it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
@ -791,7 +791,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
|||||||
destructed := map[common.Hash]struct{}{
|
destructed := map[common.Hash]struct{}{
|
||||||
common.HexToHash("0xaa"): {},
|
common.HexToHash("0xaa"): {},
|
||||||
}
|
}
|
||||||
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil, nil)
|
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
|
||||||
|
|
||||||
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
verifyIterator(t, 0, it, verifyStorage)
|
verifyIterator(t, 0, it, verifyStorage)
|
||||||
@ -799,7 +799,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
|||||||
|
|
||||||
// Re-insert the slots of the same account
|
// Re-insert the slots of the same account
|
||||||
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
|
snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
|
||||||
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil)
|
randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
|
||||||
|
|
||||||
// The output should be 07,08,09
|
// The output should be 07,08,09
|
||||||
it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
@ -807,7 +807,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
|
|||||||
it.Release()
|
it.Release()
|
||||||
|
|
||||||
// Destruct the whole storage but re-create the account in the same layer
|
// Destruct the whole storage but re-create the account in the same layer
|
||||||
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil), nil)
|
snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
|
||||||
it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
|
it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
|
||||||
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
|
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
|
||||||
it.Release()
|
it.Release()
|
||||||
@ -849,7 +849,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i := 1; i <= 100; i++ {
|
for i := 1; i <= 100; i++ {
|
||||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil)
|
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
|
||||||
}
|
}
|
||||||
// We call this once before the benchmark, so the creation of
|
// We call this once before the benchmark, so the creation of
|
||||||
// sorted accountlists are not included in the results.
|
// sorted accountlists are not included in the results.
|
||||||
@ -944,9 +944,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
|
|||||||
base.root: base,
|
base.root: base,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil, nil)
|
snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
|
||||||
for i := 2; i <= 100; i++ {
|
for i := 2; i <= 100; i++ {
|
||||||
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil, nil)
|
snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
|
||||||
}
|
}
|
||||||
// We call this once before the benchmark, so the creation of
|
// We call this once before the benchmark, so the creation of
|
||||||
// sorted accountlists are not included in the results.
|
// sorted accountlists are not included in the results.
|
||||||
|
@ -110,7 +110,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou
|
|||||||
// etc.), we just discard all diffs and try to recover them later.
|
// etc.), we just discard all diffs and try to recover them later.
|
||||||
var current snapshot = base
|
var current snapshot = base
|
||||||
err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error {
|
err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error {
|
||||||
current = newDiffLayer(current, root, destructSet, accountData, storageData, nil)
|
current = newDiffLayer(current, root, destructSet, accountData, storageData)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -100,18 +100,6 @@ type Snapshot interface {
|
|||||||
// Root returns the root hash for which this snapshot was made.
|
// Root returns the root hash for which this snapshot was made.
|
||||||
Root() common.Hash
|
Root() common.Hash
|
||||||
|
|
||||||
// WaitAndGetVerifyRes will wait until the snapshot been verified and return verification result
|
|
||||||
WaitAndGetVerifyRes() bool
|
|
||||||
|
|
||||||
// Verified returns whether the snapshot is verified
|
|
||||||
Verified() bool
|
|
||||||
|
|
||||||
// MarkValid stores the verification result
|
|
||||||
MarkValid()
|
|
||||||
|
|
||||||
// CorrectAccounts updates account data for storing the correct data during pipecommit
|
|
||||||
CorrectAccounts(map[common.Hash][]byte)
|
|
||||||
|
|
||||||
// Account directly retrieves the account associated with a particular hash in
|
// Account directly retrieves the account associated with a particular hash in
|
||||||
// the snapshot slim data format.
|
// the snapshot slim data format.
|
||||||
Account(hash common.Hash) (*types.SlimAccount, error)
|
Account(hash common.Hash) (*types.SlimAccount, error)
|
||||||
@ -142,7 +130,7 @@ type snapshot interface {
|
|||||||
// the specified data items.
|
// the specified data items.
|
||||||
//
|
//
|
||||||
// Note, the maps are retained by the method to avoid copying everything.
|
// Note, the maps are retained by the method to avoid copying everything.
|
||||||
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer
|
Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
|
||||||
|
|
||||||
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
// Journal commits an entire diff hierarchy to disk into a single journal entry.
|
||||||
// This is meant to be used during shutdown to persist the snapshot without
|
// This is meant to be used during shutdown to persist the snapshot without
|
||||||
@ -367,7 +355,7 @@ func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
|
|||||||
|
|
||||||
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
// Update adds a new snapshot into the tree, if that can be linked to an existing
|
||||||
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
// old parent. It is disallowed to insert a disk layer (the origin of all).
|
||||||
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) error {
|
func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
|
||||||
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
// Reject noop updates to avoid self-loops in the snapshot tree. This is a
|
||||||
// special case that can only happen for Clique networks where empty blocks
|
// special case that can only happen for Clique networks where empty blocks
|
||||||
// don't modify the state (0 block subsidy).
|
// don't modify the state (0 block subsidy).
|
||||||
@ -382,7 +370,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m
|
|||||||
if parent == nil {
|
if parent == nil {
|
||||||
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
|
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
|
||||||
}
|
}
|
||||||
snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage, verified)
|
snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
|
||||||
|
|
||||||
// Save the new snapshot for later
|
// Save the new snapshot for later
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
@ -708,11 +696,6 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
|
|||||||
if snap == nil {
|
if snap == nil {
|
||||||
return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
|
return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
|
||||||
}
|
}
|
||||||
// Wait the snapshot(difflayer) is verified, it means the account data also been refreshed with the correct data
|
|
||||||
if !snap.WaitAndGetVerifyRes() {
|
|
||||||
return common.Hash{}, ErrSnapshotStale
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the journaling
|
// Run the journaling
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
defer t.lock.Unlock()
|
defer t.lock.Unlock()
|
||||||
|
@ -107,7 +107,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
|
|||||||
accounts := map[common.Hash][]byte{
|
accounts := map[common.Hash][]byte{
|
||||||
common.HexToHash("0xa1"): randomAccount(),
|
common.HexToHash("0xa1"): randomAccount(),
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if n := len(snaps.layers); n != 2 {
|
if n := len(snaps.layers); n != 2 {
|
||||||
@ -151,10 +151,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
|||||||
accounts := map[common.Hash][]byte{
|
accounts := map[common.Hash][]byte{
|
||||||
common.HexToHash("0xa1"): randomAccount(),
|
common.HexToHash("0xa1"): randomAccount(),
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if n := len(snaps.layers); n != 3 {
|
if n := len(snaps.layers); n != 3 {
|
||||||
@ -203,13 +203,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
|
|||||||
accounts := map[common.Hash][]byte{
|
accounts := map[common.Hash][]byte{
|
||||||
common.HexToHash("0xa1"): randomAccount(),
|
common.HexToHash("0xa1"): randomAccount(),
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil, nil); err != nil {
|
if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
|
||||||
t.Fatalf("failed to create a diff layer: %v", err)
|
t.Fatalf("failed to create a diff layer: %v", err)
|
||||||
}
|
}
|
||||||
if n := len(snaps.layers); n != 4 {
|
if n := len(snaps.layers); n != 4 {
|
||||||
@ -263,12 +263,12 @@ func TestPostCapBasicDataAccess(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
// The lowest difflayer
|
// The lowest difflayer
|
||||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil)
|
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
|
||||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil)
|
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
|
||||||
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil, nil)
|
snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
|
||||||
|
|
||||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil)
|
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
|
||||||
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil, nil)
|
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
|
||||||
|
|
||||||
// checkExist verifies if an account exists in a snapshot
|
// checkExist verifies if an account exists in a snapshot
|
||||||
checkExist := func(layer *diffLayer, key string) error {
|
checkExist := func(layer *diffLayer, key string) error {
|
||||||
@ -363,7 +363,7 @@ func TestSnaphots(t *testing.T) {
|
|||||||
)
|
)
|
||||||
for i := 0; i < 129; i++ {
|
for i := 0; i < 129; i++ {
|
||||||
head = makeRoot(uint64(i + 2))
|
head = makeRoot(uint64(i + 2))
|
||||||
snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil, nil)
|
snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil)
|
||||||
last = head
|
last = head
|
||||||
snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
|
snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
|
||||||
}
|
}
|
||||||
@ -456,9 +456,9 @@ func TestReadStateDuringFlattening(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
// 4 layers in total, 3 diff layers and 1 disk layers
|
// 4 layers in total, 3 diff layers and 1 disk layers
|
||||||
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil)
|
snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
|
||||||
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil)
|
snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
|
||||||
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil)
|
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
|
||||||
|
|
||||||
// Obtain the topmost snapshot handler for state accessing
|
// Obtain the topmost snapshot handler for state accessing
|
||||||
snap := snaps.Snapshot(common.HexToHash("0xa3"))
|
snap := snaps.Snapshot(common.HexToHash("0xa3"))
|
||||||
|
@ -35,7 +35,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
"github.com/ethereum/go-ethereum/trie/triestate"
|
"github.com/ethereum/go-ethereum/trie/triestate"
|
||||||
@ -82,7 +81,6 @@ type StateDB struct {
|
|||||||
stateRoot common.Hash // The calculation result of IntermediateRoot
|
stateRoot common.Hash // The calculation result of IntermediateRoot
|
||||||
|
|
||||||
fullProcessed bool
|
fullProcessed bool
|
||||||
pipeCommit bool
|
|
||||||
|
|
||||||
// These maps hold the state changes (including the corresponding
|
// These maps hold the state changes (including the corresponding
|
||||||
// original value) that occurred in this **block**.
|
// original value) that occurred in this **block**.
|
||||||
@ -197,8 +195,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
tr, err := db.OpenTrie(root)
|
tr, err := db.OpenTrie(root)
|
||||||
// return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification
|
if err != nil {
|
||||||
if err != nil && (sdb.snap == nil || sdb.snap.Verified()) {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
_, sdb.noTrie = tr.(*trie.EmptyTrie)
|
_, sdb.noTrie = tr.(*trie.EmptyTrie)
|
||||||
@ -300,20 +297,6 @@ func (s *StateDB) SetExpectedStateRoot(root common.Hash) {
|
|||||||
s.expectedRoot = root
|
s.expectedRoot = root
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable the pipeline commit function of statedb
|
|
||||||
func (s *StateDB) EnablePipeCommit() {
|
|
||||||
if s.snap != nil && s.snaps.Layers() > 1 {
|
|
||||||
// after big merge, disable pipeCommit for now,
|
|
||||||
// because `s.db.TrieDB().Update` should be called after `s.trie.Commit(true)`
|
|
||||||
s.pipeCommit = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPipeCommit checks whether pipecommit is enabled on the statedb or not
|
|
||||||
func (s *StateDB) IsPipeCommit() bool {
|
|
||||||
return s.pipeCommit
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark that the block is full processed
|
// Mark that the block is full processed
|
||||||
func (s *StateDB) MarkFullProcessed() {
|
func (s *StateDB) MarkFullProcessed() {
|
||||||
s.fullProcessed = true
|
s.fullProcessed = true
|
||||||
@ -335,22 +318,6 @@ func (s *StateDB) Error() error {
|
|||||||
return s.dbErr
|
return s.dbErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Not thread safe
|
|
||||||
func (s *StateDB) Trie() (Trie, error) {
|
|
||||||
if s.trie == nil {
|
|
||||||
err := s.WaitPipeVerification()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tr, err := s.db.OpenTrie(s.originalRoot)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.trie = tr
|
|
||||||
}
|
|
||||||
return s.trie, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StateDB) AddLog(log *types.Log) {
|
func (s *StateDB) AddLog(log *types.Log) {
|
||||||
s.journal.append(addLogChange{txhash: s.thash})
|
s.journal.append(addLogChange{txhash: s.thash})
|
||||||
|
|
||||||
@ -867,8 +834,7 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB {
|
|||||||
// expectedRoot: s.expectedRoot,
|
// expectedRoot: s.expectedRoot,
|
||||||
// stateRoot: s.stateRoot,
|
// stateRoot: s.stateRoot,
|
||||||
originalRoot: s.originalRoot,
|
originalRoot: s.originalRoot,
|
||||||
// fullProcessed: s.fullProcessed,
|
// fullProcessed: s.fullProcessed,
|
||||||
// pipeCommit: s.pipeCommit,
|
|
||||||
accounts: make(map[common.Hash][]byte),
|
accounts: make(map[common.Hash][]byte),
|
||||||
storages: make(map[common.Hash]map[common.Hash][]byte),
|
storages: make(map[common.Hash]map[common.Hash][]byte),
|
||||||
accountsOrigin: make(map[common.Address][]byte),
|
accountsOrigin: make(map[common.Address][]byte),
|
||||||
@ -999,17 +965,6 @@ func (s *StateDB) GetRefund() uint64 {
|
|||||||
return s.refund
|
return s.refund
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitPipeVerification waits until the snapshot been verified
|
|
||||||
func (s *StateDB) WaitPipeVerification() error {
|
|
||||||
// Need to wait for the parent trie to commit
|
|
||||||
if s.snap != nil {
|
|
||||||
if valid := s.snap.WaitAndGetVerifyRes(); !valid {
|
|
||||||
return errors.New("verification on parent snap failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finalise finalises the state by removing the destructed objects and clears
|
// Finalise finalises the state by removing the destructed objects and clears
|
||||||
// the journal as well as the refunds. Finalise, however, will not push any updates
|
// the journal as well as the refunds. Finalise, however, will not push any updates
|
||||||
// into the tries just yet. Only IntermediateRoot or Commit will do that.
|
// into the tries just yet. Only IntermediateRoot or Commit will do that.
|
||||||
@ -1056,11 +1011,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
|||||||
}
|
}
|
||||||
prefetcher := s.prefetcher
|
prefetcher := s.prefetcher
|
||||||
if prefetcher != nil && len(addressesToPrefetch) > 0 {
|
if prefetcher != nil && len(addressesToPrefetch) > 0 {
|
||||||
if s.snap.Verified() {
|
prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch)
|
||||||
prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch)
|
|
||||||
} else if prefetcher.rootParent != (common.Hash{}) {
|
|
||||||
prefetcher.prefetch(common.Hash{}, prefetcher.rootParent, common.Address{}, addressesToPrefetch)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Invalidate journal because reverting across transactions is not allowed.
|
// Invalidate journal because reverting across transactions is not allowed.
|
||||||
s.clearJournalAndRefund()
|
s.clearJournalAndRefund()
|
||||||
@ -1076,76 +1027,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||||||
return s.StateIntermediateRoot()
|
return s.StateIntermediateRoot()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CorrectAccountsRoot will fix account roots in pipecommit mode
|
|
||||||
func (s *StateDB) CorrectAccountsRoot(blockRoot common.Hash) {
|
|
||||||
var snapshot snapshot.Snapshot
|
|
||||||
if blockRoot == (common.Hash{}) {
|
|
||||||
snapshot = s.snap
|
|
||||||
} else if s.snaps != nil {
|
|
||||||
snapshot = s.snaps.Snapshot(blockRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
if snapshot == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if accounts, err := snapshot.Accounts(); err == nil && accounts != nil {
|
|
||||||
for _, obj := range s.stateObjects {
|
|
||||||
if !obj.deleted {
|
|
||||||
if account, exist := accounts[crypto.Keccak256Hash(obj.address[:])]; exist {
|
|
||||||
if len(account.Root) == 0 {
|
|
||||||
obj.data.Root = types.EmptyRootHash
|
|
||||||
} else {
|
|
||||||
obj.data.Root = common.BytesToHash(account.Root)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PopulateSnapAccountAndStorage tries to populate required accounts and storages for pipecommit
|
|
||||||
func (s *StateDB) PopulateSnapAccountAndStorage() {
|
|
||||||
for addr := range s.stateObjectsPending {
|
|
||||||
if obj := s.stateObjects[addr]; !obj.deleted {
|
|
||||||
if s.snap != nil {
|
|
||||||
s.populateSnapStorage(obj)
|
|
||||||
s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// populateSnapStorage tries to populate required storages for pipecommit, and returns a flag to indicate whether the storage root changed or not
|
|
||||||
func (s *StateDB) populateSnapStorage(obj *stateObject) bool {
|
|
||||||
for key, value := range obj.dirtyStorage {
|
|
||||||
obj.pendingStorage[key] = value
|
|
||||||
}
|
|
||||||
if len(obj.pendingStorage) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
hasher := crypto.NewKeccakState()
|
|
||||||
var storage map[common.Hash][]byte
|
|
||||||
for key, value := range obj.pendingStorage {
|
|
||||||
var v []byte
|
|
||||||
if (value != common.Hash{}) {
|
|
||||||
// Encoding []byte cannot fail, ok to ignore the error.
|
|
||||||
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
|
||||||
}
|
|
||||||
// If state snapshotting is active, cache the data til commit
|
|
||||||
if obj.db.snap != nil {
|
|
||||||
if storage == nil {
|
|
||||||
// Retrieve the old storage map, if available, create a new one otherwise
|
|
||||||
if storage = obj.db.storages[obj.addrHash]; storage == nil {
|
|
||||||
storage = make(map[common.Hash][]byte)
|
|
||||||
obj.db.storages[obj.addrHash] = storage
|
|
||||||
}
|
|
||||||
}
|
|
||||||
storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StateDB) AccountsIntermediateRoot() {
|
func (s *StateDB) AccountsIntermediateRoot() {
|
||||||
tasks := make(chan func())
|
tasks := make(chan func())
|
||||||
finishCh := make(chan struct{})
|
finishCh := make(chan struct{})
|
||||||
@ -1482,7 +1363,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A
|
|||||||
//
|
//
|
||||||
// The associated block number of the state transition is also provided
|
// The associated block number of the state transition is also provided
|
||||||
// for more chain context.
|
// for more chain context.
|
||||||
func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFuncs ...func() error) (common.Hash, *types.DiffLayer, error) {
|
func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash, *types.DiffLayer, error) {
|
||||||
// Short circuit in case any database failure occurred earlier.
|
// Short circuit in case any database failure occurred earlier.
|
||||||
if s.dbErr != nil {
|
if s.dbErr != nil {
|
||||||
s.StopPrefetcher()
|
s.StopPrefetcher()
|
||||||
@ -1490,38 +1371,17 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
|||||||
}
|
}
|
||||||
// Finalize any pending changes and merge everything into the tries
|
// Finalize any pending changes and merge everything into the tries
|
||||||
var (
|
var (
|
||||||
diffLayer *types.DiffLayer
|
diffLayer *types.DiffLayer
|
||||||
verified chan struct{}
|
incomplete map[common.Address]struct{}
|
||||||
snapUpdated chan struct{}
|
nodes = trienode.NewMergedNodeSet()
|
||||||
incomplete map[common.Address]struct{}
|
|
||||||
nodes = trienode.NewMergedNodeSet()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if s.snap != nil {
|
if s.snap != nil {
|
||||||
diffLayer = &types.DiffLayer{}
|
diffLayer = &types.DiffLayer{}
|
||||||
}
|
}
|
||||||
if s.pipeCommit {
|
|
||||||
// async commit the MPT
|
|
||||||
verified = make(chan struct{})
|
|
||||||
snapUpdated = make(chan struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
commmitTrie := func() error {
|
commmitTrie := func() error {
|
||||||
commitErr := func() error {
|
commitErr := func() error {
|
||||||
if s.pipeCommit {
|
|
||||||
<-snapUpdated
|
|
||||||
// Due to state verification pipeline, the accounts roots are not updated, leading to the data in the difflayer is not correct, capture the correct data here
|
|
||||||
s.AccountsIntermediateRoot()
|
|
||||||
if parent := s.snap.Root(); parent != s.expectedRoot {
|
|
||||||
accountData := make(map[common.Hash][]byte)
|
|
||||||
for k, v := range s.accounts {
|
|
||||||
accountData[crypto.Keccak256Hash(k[:])] = v
|
|
||||||
}
|
|
||||||
s.snaps.Snapshot(s.expectedRoot).CorrectAccounts(accountData)
|
|
||||||
}
|
|
||||||
s.snap = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.stateRoot = s.StateIntermediateRoot(); s.fullProcessed && s.expectedRoot != s.stateRoot {
|
if s.stateRoot = s.StateIntermediateRoot(); s.fullProcessed && s.expectedRoot != s.stateRoot {
|
||||||
log.Error("Invalid merkle root", "remote", s.expectedRoot, "local", s.stateRoot)
|
log.Error("Invalid merkle root", "remote", s.expectedRoot, "local", s.stateRoot)
|
||||||
return fmt.Errorf("invalid merkle root (remote: %x local: %x)", s.expectedRoot, s.stateRoot)
|
return fmt.Errorf("invalid merkle root (remote: %x local: %x)", s.expectedRoot, s.stateRoot)
|
||||||
@ -1629,8 +1489,8 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, postFunc := range postCommitFuncs {
|
if postCommitFunc != nil {
|
||||||
err := postFunc()
|
err := postCommitFunc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1639,19 +1499,6 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
|||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if s.pipeCommit {
|
|
||||||
if commitErr == nil {
|
|
||||||
s.snaps.Snapshot(s.stateRoot).MarkValid()
|
|
||||||
close(verified)
|
|
||||||
} else {
|
|
||||||
// The blockchain will do the further rewind if write block not finish yet
|
|
||||||
close(verified)
|
|
||||||
if failPostCommitFunc != nil {
|
|
||||||
failPostCommitFunc()
|
|
||||||
}
|
|
||||||
log.Error("state verification failed", "err", commitErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return commitErr
|
return commitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1693,15 +1540,10 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
|||||||
if metrics.EnabledExpensive {
|
if metrics.EnabledExpensive {
|
||||||
defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
|
defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
|
||||||
}
|
}
|
||||||
if s.pipeCommit {
|
|
||||||
defer close(snapUpdated)
|
|
||||||
// State verification pipeline - accounts root are not calculated here, just populate needed fields for process
|
|
||||||
s.PopulateSnapAccountAndStorage()
|
|
||||||
}
|
|
||||||
diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer()
|
diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer()
|
||||||
// Only update if there's a state transition (skip empty Clique blocks)
|
// Only update if there's a state transition (skip empty Clique blocks)
|
||||||
if parent := s.snap.Root(); parent != s.expectedRoot {
|
if parent := s.snap.Root(); parent != s.expectedRoot {
|
||||||
err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, verified)
|
err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err)
|
log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err)
|
||||||
@ -1721,12 +1563,9 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if s.pipeCommit {
|
|
||||||
go commmitTrie()
|
defer s.StopPrefetcher()
|
||||||
} else {
|
commitFuncs = append(commitFuncs, commmitTrie)
|
||||||
defer s.StopPrefetcher()
|
|
||||||
commitFuncs = append(commitFuncs, commmitTrie)
|
|
||||||
}
|
|
||||||
commitRes := make(chan error, len(commitFuncs))
|
commitRes := make(chan error, len(commitFuncs))
|
||||||
for _, f := range commitFuncs {
|
for _, f := range commitFuncs {
|
||||||
// commitFuncs[0] and commitFuncs[1] both read map `stateObjects`, but no conflicts
|
// commitFuncs[0] and commitFuncs[1] both read map `stateObjects`, but no conflicts
|
||||||
@ -1743,11 +1582,7 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
|||||||
}
|
}
|
||||||
|
|
||||||
root := s.stateRoot
|
root := s.stateRoot
|
||||||
if s.pipeCommit {
|
s.snap = nil
|
||||||
root = s.expectedRoot
|
|
||||||
} else {
|
|
||||||
s.snap = nil
|
|
||||||
}
|
|
||||||
if root == (common.Hash{}) {
|
if root == (common.Hash{}) {
|
||||||
root = types.EmptyRootHash
|
root = types.EmptyRootHash
|
||||||
}
|
}
|
||||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -17,11 +17,3 @@ var (
|
|||||||
//go:embed chapel/StakeHubContract
|
//go:embed chapel/StakeHubContract
|
||||||
ChapelStakeHubContract string
|
ChapelStakeHubContract string
|
||||||
)
|
)
|
||||||
|
|
||||||
// contract codes for Rialto upgrade
|
|
||||||
var (
|
|
||||||
//go:embed rialto/ValidatorContract
|
|
||||||
RialtoValidatorContract string
|
|
||||||
//go:embed rialto/StakeHubContract
|
|
||||||
RialtoStakeHubContract string
|
|
||||||
)
|
|
||||||
|
1
core/systemcontracts/pascal/chapel/CrossChainContract
Normal file
1
core/systemcontracts/pascal/chapel/CrossChainContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/GovHubContract
Normal file
1
core/systemcontracts/pascal/chapel/GovHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/GovTokenContract
Normal file
1
core/systemcontracts/pascal/chapel/GovTokenContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/GovernorContract
Normal file
1
core/systemcontracts/pascal/chapel/GovernorContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/LightClientContract
Normal file
1
core/systemcontracts/pascal/chapel/LightClientContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/RelayerHubContract
Normal file
1
core/systemcontracts/pascal/chapel/RelayerHubContract
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/SlashContract
Normal file
1
core/systemcontracts/pascal/chapel/SlashContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/StakeCreditContract
Normal file
1
core/systemcontracts/pascal/chapel/StakeCreditContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/StakeHubContract
Normal file
1
core/systemcontracts/pascal/chapel/StakeHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/StakingContract
Normal file
1
core/systemcontracts/pascal/chapel/StakingContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/SystemRewardContract
Normal file
1
core/systemcontracts/pascal/chapel/SystemRewardContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/TimelockContract
Normal file
1
core/systemcontracts/pascal/chapel/TimelockContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/TokenHubContract
Normal file
1
core/systemcontracts/pascal/chapel/TokenHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/TokenManagerContract
Normal file
1
core/systemcontracts/pascal/chapel/TokenManagerContract
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/chapel/ValidatorContract
Normal file
1
core/systemcontracts/pascal/chapel/ValidatorContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/CrossChainContract
Normal file
1
core/systemcontracts/pascal/mainnet/CrossChainContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/GovHubContract
Normal file
1
core/systemcontracts/pascal/mainnet/GovHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/GovTokenContract
Normal file
1
core/systemcontracts/pascal/mainnet/GovTokenContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/GovernorContract
Normal file
1
core/systemcontracts/pascal/mainnet/GovernorContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/LightClientContract
Normal file
1
core/systemcontracts/pascal/mainnet/LightClientContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/RelayerHubContract
Normal file
1
core/systemcontracts/pascal/mainnet/RelayerHubContract
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/SlashContract
Normal file
1
core/systemcontracts/pascal/mainnet/SlashContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/StakeCreditContract
Normal file
1
core/systemcontracts/pascal/mainnet/StakeCreditContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/StakeHubContract
Normal file
1
core/systemcontracts/pascal/mainnet/StakeHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/StakingContract
Normal file
1
core/systemcontracts/pascal/mainnet/StakingContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/SystemRewardContract
Normal file
1
core/systemcontracts/pascal/mainnet/SystemRewardContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/TimelockContract
Normal file
1
core/systemcontracts/pascal/mainnet/TimelockContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/TokenHubContract
Normal file
1
core/systemcontracts/pascal/mainnet/TokenHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/TokenManagerContract
Normal file
1
core/systemcontracts/pascal/mainnet/TokenManagerContract
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/mainnet/ValidatorContract
Normal file
1
core/systemcontracts/pascal/mainnet/ValidatorContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/CrossChainContract
Normal file
1
core/systemcontracts/pascal/rialto/CrossChainContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/GovHubContract
Normal file
1
core/systemcontracts/pascal/rialto/GovHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/GovTokenContract
Normal file
1
core/systemcontracts/pascal/rialto/GovTokenContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/GovernorContract
Normal file
1
core/systemcontracts/pascal/rialto/GovernorContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/LightClientContract
Normal file
1
core/systemcontracts/pascal/rialto/LightClientContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/RelayerHubContract
Normal file
1
core/systemcontracts/pascal/rialto/RelayerHubContract
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/SlashContract
Normal file
1
core/systemcontracts/pascal/rialto/SlashContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/StakeCreditContract
Normal file
1
core/systemcontracts/pascal/rialto/StakeCreditContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/StakeHubContract
Normal file
1
core/systemcontracts/pascal/rialto/StakeHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/StakingContract
Normal file
1
core/systemcontracts/pascal/rialto/StakingContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/SystemRewardContract
Normal file
1
core/systemcontracts/pascal/rialto/SystemRewardContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/TimelockContract
Normal file
1
core/systemcontracts/pascal/rialto/TimelockContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/TokenHubContract
Normal file
1
core/systemcontracts/pascal/rialto/TokenHubContract
Normal file
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/TokenManagerContract
Normal file
1
core/systemcontracts/pascal/rialto/TokenManagerContract
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
core/systemcontracts/pascal/rialto/ValidatorContract
Normal file
1
core/systemcontracts/pascal/rialto/ValidatorContract
Normal file
File diff suppressed because one or more lines are too long
168
core/systemcontracts/pascal/types.go
Normal file
168
core/systemcontracts/pascal/types.go
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
package pascal
|
||||||
|
|
||||||
|
import _ "embed"
|
||||||
|
|
||||||
|
// contract codes for Mainnet upgrade
|
||||||
|
var (
|
||||||
|
|
||||||
|
//go:embed mainnet/ValidatorContract
|
||||||
|
MainnetValidatorContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/SlashContract
|
||||||
|
MainnetSlashContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/SystemRewardContract
|
||||||
|
MainnetSystemRewardContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/LightClientContract
|
||||||
|
MainnetLightClientContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/TokenHubContract
|
||||||
|
MainnetTokenHubContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/RelayerIncentivizeContract
|
||||||
|
MainnetRelayerIncentivizeContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/RelayerHubContract
|
||||||
|
MainnetRelayerHubContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/GovHubContract
|
||||||
|
MainnetGovHubContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/TokenManagerContract
|
||||||
|
MainnetTokenManagerContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/CrossChainContract
|
||||||
|
MainnetCrossChainContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/StakingContract
|
||||||
|
MainnetStakingContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/StakeHubContract
|
||||||
|
MainnetStakeHubContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/StakeCreditContract
|
||||||
|
MainnetStakeCreditContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/GovernorContract
|
||||||
|
MainnetGovernorContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/GovTokenContract
|
||||||
|
MainnetGovTokenContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/TimelockContract
|
||||||
|
MainnetTimelockContract string
|
||||||
|
|
||||||
|
//go:embed mainnet/TokenRecoverPortalContract
|
||||||
|
MainnetTokenRecoverPortalContract string
|
||||||
|
)
|
||||||
|
|
||||||
|
// contract codes for Chapel upgrade
|
||||||
|
var (
|
||||||
|
|
||||||
|
//go:embed chapel/ValidatorContract
|
||||||
|
ChapelValidatorContract string
|
||||||
|
|
||||||
|
//go:embed chapel/SlashContract
|
||||||
|
ChapelSlashContract string
|
||||||
|
|
||||||
|
//go:embed chapel/SystemRewardContract
|
||||||
|
ChapelSystemRewardContract string
|
||||||
|
|
||||||
|
//go:embed chapel/LightClientContract
|
||||||
|
ChapelLightClientContract string
|
||||||
|
|
||||||
|
//go:embed chapel/TokenHubContract
|
||||||
|
ChapelTokenHubContract string
|
||||||
|
|
||||||
|
//go:embed chapel/RelayerIncentivizeContract
|
||||||
|
ChapelRelayerIncentivizeContract string
|
||||||
|
|
||||||
|
//go:embed chapel/RelayerHubContract
|
||||||
|
ChapelRelayerHubContract string
|
||||||
|
|
||||||
|
//go:embed chapel/GovHubContract
|
||||||
|
ChapelGovHubContract string
|
||||||
|
|
||||||
|
//go:embed chapel/TokenManagerContract
|
||||||
|
ChapelTokenManagerContract string
|
||||||
|
|
||||||
|
//go:embed chapel/CrossChainContract
|
||||||
|
ChapelCrossChainContract string
|
||||||
|
|
||||||
|
//go:embed chapel/StakingContract
|
||||||
|
ChapelStakingContract string
|
||||||
|
|
||||||
|
//go:embed chapel/StakeHubContract
|
||||||
|
ChapelStakeHubContract string
|
||||||
|
|
||||||
|
//go:embed chapel/StakeCreditContract
|
||||||
|
ChapelStakeCreditContract string
|
||||||
|
|
||||||
|
//go:embed chapel/GovernorContract
|
||||||
|
ChapelGovernorContract string
|
||||||
|
|
||||||
|
//go:embed chapel/GovTokenContract
|
||||||
|
ChapelGovTokenContract string
|
||||||
|
|
||||||
|
//go:embed chapel/TimelockContract
|
||||||
|
ChapelTimelockContract string
|
||||||
|
|
||||||
|
//go:embed chapel/TokenRecoverPortalContract
|
||||||
|
ChapelTokenRecoverPortalContract string
|
||||||
|
)
|
||||||
|
|
||||||
|
// contract codes for Rialto upgrade
|
||||||
|
var (
|
||||||
|
|
||||||
|
//go:embed rialto/ValidatorContract
|
||||||
|
RialtoValidatorContract string
|
||||||
|
|
||||||
|
//go:embed rialto/SlashContract
|
||||||
|
RialtoSlashContract string
|
||||||
|
|
||||||
|
//go:embed rialto/SystemRewardContract
|
||||||
|
RialtoSystemRewardContract string
|
||||||
|
|
||||||
|
//go:embed rialto/LightClientContract
|
||||||
|
RialtoLightClientContract string
|
||||||
|
|
||||||
|
//go:embed rialto/TokenHubContract
|
||||||
|
RialtoTokenHubContract string
|
||||||
|
|
||||||
|
//go:embed rialto/RelayerIncentivizeContract
|
||||||
|
RialtoRelayerIncentivizeContract string
|
||||||
|
|
||||||
|
//go:embed rialto/RelayerHubContract
|
||||||
|
RialtoRelayerHubContract string
|
||||||
|
|
||||||
|
//go:embed rialto/GovHubContract
|
||||||
|
RialtoGovHubContract string
|
||||||
|
|
||||||
|
//go:embed rialto/TokenManagerContract
|
||||||
|
RialtoTokenManagerContract string
|
||||||
|
|
||||||
|
//go:embed rialto/CrossChainContract
|
||||||
|
RialtoCrossChainContract string
|
||||||
|
|
||||||
|
//go:embed rialto/StakingContract
|
||||||
|
RialtoStakingContract string
|
||||||
|
|
||||||
|
//go:embed rialto/StakeHubContract
|
||||||
|
RialtoStakeHubContract string
|
||||||
|
|
||||||
|
//go:embed rialto/StakeCreditContract
|
||||||
|
RialtoStakeCreditContract string
|
||||||
|
|
||||||
|
//go:embed rialto/GovernorContract
|
||||||
|
RialtoGovernorContract string
|
||||||
|
|
||||||
|
//go:embed rialto/GovTokenContract
|
||||||
|
RialtoGovTokenContract string
|
||||||
|
|
||||||
|
//go:embed rialto/TimelockContract
|
||||||
|
RialtoTimelockContract string
|
||||||
|
|
||||||
|
//go:embed rialto/TokenRecoverPortalContract
|
||||||
|
RialtoTokenRecoverPortalContract string
|
||||||
|
)
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/mirror"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/mirror"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/moran"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/moran"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/niels"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/niels"
|
||||||
|
"github.com/ethereum/go-ethereum/core/systemcontracts/pascal"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/planck"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/planck"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/plato"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/plato"
|
||||||
"github.com/ethereum/go-ethereum/core/systemcontracts/ramanujan"
|
"github.com/ethereum/go-ethereum/core/systemcontracts/ramanujan"
|
||||||
@ -82,6 +83,8 @@ var (
|
|||||||
haberFixUpgrade = make(map[string]*Upgrade)
|
haberFixUpgrade = make(map[string]*Upgrade)
|
||||||
|
|
||||||
bohrUpgrade = make(map[string]*Upgrade)
|
bohrUpgrade = make(map[string]*Upgrade)
|
||||||
|
|
||||||
|
pascalUpgrade = make(map[string]*Upgrade)
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -773,18 +776,275 @@ func init() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bohrUpgrade[rialtoNet] = &Upgrade{
|
pascalUpgrade[mainNet] = &Upgrade{
|
||||||
UpgradeName: "bohr",
|
UpgradeName: "pascal",
|
||||||
Configs: []*UpgradeConfig{
|
Configs: []*UpgradeConfig{
|
||||||
{
|
{
|
||||||
ContractAddr: common.HexToAddress(ValidatorContract),
|
ContractAddr: common.HexToAddress(ValidatorContract),
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
Code: bohr.RialtoValidatorContract,
|
Code: pascal.MainnetValidatorContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(SlashContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetSlashContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(SystemRewardContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetSystemRewardContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(LightClientContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetLightClientContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetTokenHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(RelayerIncentivizeContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetRelayerIncentivizeContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(RelayerHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetRelayerHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetGovHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenManagerContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetTokenManagerContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(CrossChainContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetCrossChainContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(StakingContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetStakingContract,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ContractAddr: common.HexToAddress(StakeHubContract),
|
ContractAddr: common.HexToAddress(StakeHubContract),
|
||||||
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/398c9364aad5261c1ecd90ac3ab2df89b65c45e3",
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
Code: bohr.RialtoStakeHubContract,
|
Code: pascal.MainnetStakeHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(StakeCreditContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetStakeCreditContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovernorContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetGovernorContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovTokenContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetGovTokenContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TimelockContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetTimelockContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenRecoverPortalContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.MainnetTokenRecoverPortalContract,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pascalUpgrade[chapelNet] = &Upgrade{
|
||||||
|
UpgradeName: "pascal",
|
||||||
|
Configs: []*UpgradeConfig{
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(ValidatorContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelValidatorContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(SlashContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelSlashContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(SystemRewardContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelSystemRewardContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(LightClientContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelLightClientContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelTokenHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(RelayerIncentivizeContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelRelayerIncentivizeContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(RelayerHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelRelayerHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelGovHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenManagerContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelTokenManagerContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(CrossChainContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelCrossChainContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(StakingContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelStakingContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(StakeHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelStakeHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(StakeCreditContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelStakeCreditContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovernorContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelGovernorContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovTokenContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelGovTokenContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TimelockContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelTimelockContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenRecoverPortalContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.ChapelTokenRecoverPortalContract,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pascalUpgrade[rialtoNet] = &Upgrade{
|
||||||
|
UpgradeName: "pascal",
|
||||||
|
Configs: []*UpgradeConfig{
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(ValidatorContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoValidatorContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(SlashContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoSlashContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(SystemRewardContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoSystemRewardContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(LightClientContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoLightClientContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoTokenHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(RelayerIncentivizeContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoRelayerIncentivizeContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(RelayerHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoRelayerHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoGovHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenManagerContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoTokenManagerContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(CrossChainContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoCrossChainContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(StakingContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoStakingContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(StakeHubContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoStakeHubContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(StakeCreditContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoStakeCreditContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovernorContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoGovernorContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(GovTokenContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoGovTokenContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TimelockContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoTimelockContract,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ContractAddr: common.HexToAddress(TokenRecoverPortalContract),
|
||||||
|
CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa",
|
||||||
|
Code: pascal.RialtoTokenRecoverPortalContract,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -873,6 +1133,10 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I
|
|||||||
applySystemContractUpgrade(bohrUpgrade[network], blockNumber, statedb, logger)
|
applySystemContractUpgrade(bohrUpgrade[network], blockNumber, statedb, logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.IsOnPascal(blockNumber, lastBlockTime, blockTime) {
|
||||||
|
applySystemContractUpgrade(pascalUpgrade[network], blockNumber, statedb, logger)
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
apply other upgrades
|
apply other upgrades
|
||||||
*/
|
*/
|
||||||
|
@ -19,6 +19,7 @@ package legacypool
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
@ -99,10 +100,11 @@ var (
|
|||||||
// that this number is pretty low, since txpool reorgs happen very frequently.
|
// that this number is pretty low, since txpool reorgs happen very frequently.
|
||||||
dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
|
dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
|
|
||||||
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
|
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
|
||||||
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
|
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
|
||||||
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
|
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
|
||||||
slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
|
slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
|
||||||
|
OverflowPoolGauge = metrics.NewRegisteredGauge("txpool/overflowpool", nil)
|
||||||
|
|
||||||
reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
|
reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
|
||||||
)
|
)
|
||||||
@ -133,10 +135,11 @@ type Config struct {
|
|||||||
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
|
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
|
||||||
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
|
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
|
||||||
|
|
||||||
AccountSlots uint64 // Number of executable transaction slots guaranteed per account
|
AccountSlots uint64 // Number of executable transaction slots guaranteed per account
|
||||||
GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
|
GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
|
||||||
AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
|
AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
|
||||||
GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
|
GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
|
||||||
|
OverflowPoolSlots uint64 // Maximum number of transaction slots in overflow pool
|
||||||
|
|
||||||
Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
|
Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
|
||||||
ReannounceTime time.Duration // Duration for announcing local pending transactions again
|
ReannounceTime time.Duration // Duration for announcing local pending transactions again
|
||||||
@ -150,10 +153,11 @@ var DefaultConfig = Config{
|
|||||||
PriceLimit: 1,
|
PriceLimit: 1,
|
||||||
PriceBump: 10,
|
PriceBump: 10,
|
||||||
|
|
||||||
AccountSlots: 16,
|
AccountSlots: 16,
|
||||||
GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
|
GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
|
||||||
AccountQueue: 64,
|
AccountQueue: 64,
|
||||||
GlobalQueue: 1024,
|
GlobalQueue: 1024,
|
||||||
|
OverflowPoolSlots: 0,
|
||||||
|
|
||||||
Lifetime: 3 * time.Hour,
|
Lifetime: 3 * time.Hour,
|
||||||
ReannounceTime: 10 * 365 * 24 * time.Hour,
|
ReannounceTime: 10 * 365 * 24 * time.Hour,
|
||||||
@ -235,6 +239,8 @@ type LegacyPool struct {
|
|||||||
all *lookup // All transactions to allow lookups
|
all *lookup // All transactions to allow lookups
|
||||||
priced *pricedList // All transactions sorted by price
|
priced *pricedList // All transactions sorted by price
|
||||||
|
|
||||||
|
localBufferPool *TxOverflowPool // Local buffer transactions
|
||||||
|
|
||||||
reqResetCh chan *txpoolResetRequest
|
reqResetCh chan *txpoolResetRequest
|
||||||
reqPromoteCh chan *accountSet
|
reqPromoteCh chan *accountSet
|
||||||
queueTxEventCh chan *types.Transaction
|
queueTxEventCh chan *types.Transaction
|
||||||
@ -272,6 +278,7 @@ func New(config Config, chain BlockChain) *LegacyPool {
|
|||||||
reorgDoneCh: make(chan chan struct{}),
|
reorgDoneCh: make(chan chan struct{}),
|
||||||
reorgShutdownCh: make(chan struct{}),
|
reorgShutdownCh: make(chan struct{}),
|
||||||
initDoneCh: make(chan struct{}),
|
initDoneCh: make(chan struct{}),
|
||||||
|
localBufferPool: NewTxOverflowPoolHeap(config.OverflowPoolSlots),
|
||||||
}
|
}
|
||||||
pool.locals = newAccountSet(pool.signer)
|
pool.locals = newAccountSet(pool.signer)
|
||||||
for _, addr := range config.Locals {
|
for _, addr := range config.Locals {
|
||||||
@ -408,7 +415,6 @@ func (pool *LegacyPool) loop() {
|
|||||||
if !pool.locals.contains(addr) {
|
if !pool.locals.contains(addr) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tx := range list.Flatten() {
|
for _, tx := range list.Flatten() {
|
||||||
// Default ReannounceTime is 10 years, won't announce by default.
|
// Default ReannounceTime is 10 years, won't announce by default.
|
||||||
if time.Since(tx.Time()) < pool.config.ReannounceTime {
|
if time.Since(tx.Time()) < pool.config.ReannounceTime {
|
||||||
@ -517,6 +523,17 @@ func (pool *LegacyPool) Stats() (int, int) {
|
|||||||
return pool.stats()
|
return pool.stats()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pool *LegacyPool) statsOverflowPool() int {
|
||||||
|
pool.mu.RLock()
|
||||||
|
defer pool.mu.RUnlock()
|
||||||
|
|
||||||
|
if pool.localBufferPool == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return pool.localBufferPool.Size()
|
||||||
|
}
|
||||||
|
|
||||||
// stats retrieves the current pool stats, namely the number of pending and the
|
// stats retrieves the current pool stats, namely the number of pending and the
|
||||||
// number of queued (non-executable) transactions.
|
// number of queued (non-executable) transactions.
|
||||||
func (pool *LegacyPool) stats() (int, int) {
|
func (pool *LegacyPool) stats() (int, int) {
|
||||||
@ -831,6 +848,8 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pool.addToOverflowPool(drop, isLocal)
|
||||||
|
|
||||||
// Kick out the underpriced remote transactions.
|
// Kick out the underpriced remote transactions.
|
||||||
for _, tx := range drop {
|
for _, tx := range drop {
|
||||||
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
|
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
|
||||||
@ -887,6 +906,29 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
|
|||||||
return replaced, nil
|
return replaced, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pool *LegacyPool) addToOverflowPool(drop types.Transactions, isLocal bool) {
|
||||||
|
// calculate total number of slots in drop. Accordingly add them to OverflowPool (if there is space)
|
||||||
|
availableSlotsOverflowPool := pool.availableSlotsOverflowPool()
|
||||||
|
if availableSlotsOverflowPool > 0 {
|
||||||
|
// transfer availableSlotsOverflowPool number of transactions slots from drop to OverflowPool
|
||||||
|
currentSlotsUsed := 0
|
||||||
|
for i, tx := range drop {
|
||||||
|
txSlots := numSlots(tx)
|
||||||
|
if currentSlotsUsed+txSlots <= availableSlotsOverflowPool {
|
||||||
|
from, _ := types.Sender(pool.signer, tx)
|
||||||
|
pool.localBufferPool.Add(tx)
|
||||||
|
log.Debug("adding to OverflowPool", "transaction", tx.Hash().String(), "from", from.String())
|
||||||
|
currentSlotsUsed += txSlots
|
||||||
|
} else {
|
||||||
|
log.Debug("not all got added to OverflowPool", "totalAdded", i+1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Debug("adding to OverflowPool unsuccessful", "availableSlotsOverflowPool", availableSlotsOverflowPool)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// isGapped reports whether the given transaction is immediately executable.
|
// isGapped reports whether the given transaction is immediately executable.
|
||||||
func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool {
|
func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool {
|
||||||
// Short circuit if transaction falls within the scope of the pending list
|
// Short circuit if transaction falls within the scope of the pending list
|
||||||
@ -1333,7 +1375,6 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
|
|||||||
reorgDurationTimer.Update(time.Since(t0))
|
reorgDurationTimer.Update(time.Since(t0))
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
var promoteAddrs []common.Address
|
var promoteAddrs []common.Address
|
||||||
if dirtyAccounts != nil && reset == nil {
|
if dirtyAccounts != nil && reset == nil {
|
||||||
// Only dirty accounts need to be promoted, unless we're resetting.
|
// Only dirty accounts need to be promoted, unless we're resetting.
|
||||||
@ -1391,6 +1432,9 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
|
|||||||
pool.changesSinceReorg = 0 // Reset change counter
|
pool.changesSinceReorg = 0 // Reset change counter
|
||||||
pool.mu.Unlock()
|
pool.mu.Unlock()
|
||||||
|
|
||||||
|
// Transfer transactions from OverflowPool to MainPool for new block import
|
||||||
|
pool.transferTransactions()
|
||||||
|
|
||||||
// Notify subsystems for newly added transactions
|
// Notify subsystems for newly added transactions
|
||||||
for _, tx := range promoted {
|
for _, tx := range promoted {
|
||||||
addr, _ := types.Sender(pool.signer, tx)
|
addr, _ := types.Sender(pool.signer, tx)
|
||||||
@ -2038,3 +2082,50 @@ func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
|
|||||||
func numSlots(tx *types.Transaction) int {
|
func numSlots(tx *types.Transaction) int {
|
||||||
return int((tx.Size() + txSlotSize - 1) / txSlotSize)
|
return int((tx.Size() + txSlotSize - 1) / txSlotSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// transferTransactions moves transactions from OverflowPool to MainPool
|
||||||
|
func (pool *LegacyPool) transferTransactions() {
|
||||||
|
// Fail fast if the overflow pool is empty
|
||||||
|
if pool.localBufferPool.Size() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
maxMainPoolSize := int(pool.config.GlobalSlots + pool.config.GlobalQueue)
|
||||||
|
// Use pool.all.Slots() to get the total slots used by all transactions
|
||||||
|
currentMainPoolSize := pool.all.Slots()
|
||||||
|
if currentMainPoolSize >= maxMainPoolSize {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
extraSlots := maxMainPoolSize - currentMainPoolSize
|
||||||
|
extraTransactions := (extraSlots + 3) / 4 // Since a transaction can take up to 4 slots
|
||||||
|
log.Debug("Will attempt to transfer from OverflowPool to MainPool", "transactions", extraTransactions)
|
||||||
|
txs := pool.localBufferPool.Flush(extraTransactions)
|
||||||
|
if len(txs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pool.Add(txs, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pool *LegacyPool) availableSlotsOverflowPool() int {
|
||||||
|
maxOverflowPoolSize := int(pool.config.OverflowPoolSlots)
|
||||||
|
availableSlots := maxOverflowPoolSize - pool.localBufferPool.Size()
|
||||||
|
if availableSlots > 0 {
|
||||||
|
return availableSlots
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pool *LegacyPool) PrintTxStats() {
|
||||||
|
for _, l := range pool.pending {
|
||||||
|
for _, transaction := range l.txs.items {
|
||||||
|
from, _ := types.Sender(pool.signer, transaction)
|
||||||
|
fmt.Println("from: ", from, " Pending:", transaction.Hash().String(), transaction.GasFeeCap(), transaction.GasTipCap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pool.localBufferPool.PrintTxStats()
|
||||||
|
fmt.Println("length of all: ", pool.all.Slots())
|
||||||
|
fmt.Println("----------------------------------------------------")
|
||||||
|
}
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -1739,6 +1740,7 @@ func TestRepricingKeepsLocals(t *testing.T) {
|
|||||||
// Note, local transactions are never allowed to be dropped.
|
// Note, local transactions are never allowed to be dropped.
|
||||||
func TestUnderpricing(t *testing.T) {
|
func TestUnderpricing(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
testTxPoolConfig.OverflowPoolSlots = 5
|
||||||
|
|
||||||
// Create the pool to test the pricing enforcement with
|
// Create the pool to test the pricing enforcement with
|
||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
@ -1931,6 +1933,8 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
|||||||
pool.config.GlobalSlots = 2
|
pool.config.GlobalSlots = 2
|
||||||
pool.config.GlobalQueue = 2
|
pool.config.GlobalQueue = 2
|
||||||
|
|
||||||
|
pool.config.OverflowPoolSlots = 0
|
||||||
|
|
||||||
// Keep track of transaction events to ensure all executables get announced
|
// Keep track of transaction events to ensure all executables get announced
|
||||||
events := make(chan core.NewTxsEvent, 32)
|
events := make(chan core.NewTxsEvent, 32)
|
||||||
sub := pool.txFeed.Subscribe(events)
|
sub := pool.txFeed.Subscribe(events)
|
||||||
@ -1955,7 +1959,6 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
|||||||
// Import the batch and that both pending and queued transactions match up
|
// Import the batch and that both pending and queued transactions match up
|
||||||
pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1
|
pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1
|
||||||
pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
|
pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
|
||||||
|
|
||||||
pending, queued := pool.Stats()
|
pending, queued := pool.Stats()
|
||||||
if pending != 3 {
|
if pending != 3 {
|
||||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
|
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
|
||||||
@ -1995,9 +1998,9 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
|||||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
||||||
}
|
}
|
||||||
if queued != 2 {
|
if queued != 2 {
|
||||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
|
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
|
||||||
}
|
}
|
||||||
if err := validateEvents(events, 2); err != nil {
|
if err := validateEvents(events, 2); err != nil { // todo make it 4...After this validateEvents the pending becomes 3?!
|
||||||
t.Fatalf("additional event firing failed: %v", err)
|
t.Fatalf("additional event firing failed: %v", err)
|
||||||
}
|
}
|
||||||
if err := validatePoolInternals(pool); err != nil {
|
if err := validatePoolInternals(pool); err != nil {
|
||||||
@ -2012,11 +2015,12 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
|||||||
if err := pool.addLocal(ltx); err != nil {
|
if err := pool.addLocal(ltx); err != nil {
|
||||||
t.Fatalf("failed to add new underpriced local transaction: %v", err)
|
t.Fatalf("failed to add new underpriced local transaction: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pending, queued = pool.Stats()
|
pending, queued = pool.Stats()
|
||||||
if pending != 3 {
|
if pending != 3 { // 3
|
||||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
|
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
|
||||||
}
|
}
|
||||||
if queued != 1 {
|
if queued != 1 { // 1
|
||||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
|
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
|
||||||
}
|
}
|
||||||
if err := validateEvents(events, 2); err != nil {
|
if err := validateEvents(events, 2); err != nil {
|
||||||
@ -2032,41 +2036,51 @@ func TestUnderpricingDynamicFee(t *testing.T) {
|
|||||||
func TestDualHeapEviction(t *testing.T) {
|
func TestDualHeapEviction(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
testTxPoolConfig.OverflowPoolSlots = 1
|
||||||
pool, _ := setupPoolWithConfig(eip1559Config)
|
pool, _ := setupPoolWithConfig(eip1559Config)
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
pool.config.GlobalSlots = 10
|
pool.config.GlobalSlots = 2
|
||||||
pool.config.GlobalQueue = 10
|
pool.config.GlobalQueue = 2
|
||||||
|
pool.config.OverflowPoolSlots = 1
|
||||||
|
|
||||||
var (
|
var (
|
||||||
highTip, highCap *types.Transaction
|
highTip, highCap *types.Transaction
|
||||||
baseFee int
|
baseFee int
|
||||||
|
highCapValue int64
|
||||||
|
highTipValue int64
|
||||||
)
|
)
|
||||||
|
|
||||||
check := func(tx *types.Transaction, name string) {
|
check := func(tx *types.Transaction, name string) {
|
||||||
if pool.all.GetRemote(tx.Hash()) == nil {
|
if pool.all.GetRemote(tx.Hash()) == nil {
|
||||||
t.Fatalf("highest %s transaction evicted from the pool", name)
|
t.Fatalf("highest %s transaction evicted from the pool, gasTip: %s, gasFeeCap: %s, hash: %s", name, highTip.GasTipCap().String(), highCap.GasFeeCap().String(), tx.Hash().String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
add := func(urgent bool) {
|
add := func(urgent bool) {
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 4; i++ {
|
||||||
var tx *types.Transaction
|
var tx *types.Transaction
|
||||||
// Create a test accounts and fund it
|
// Create a test accounts and fund it
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000))
|
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000))
|
||||||
if urgent {
|
if urgent {
|
||||||
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key)
|
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key)
|
||||||
highTip = tx
|
if int64(1+i) > highTipValue || (int64(1+i) == highTipValue && int64(baseFee+1+i) > highTip.GasFeeCap().Int64()) {
|
||||||
|
highTipValue = int64(1 + i)
|
||||||
|
highTip = tx
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
|
tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
|
||||||
highCap = tx
|
if int64(baseFee+200+i) > highCapValue {
|
||||||
|
highCapValue = int64(baseFee + 200 + i)
|
||||||
|
highCap = tx
|
||||||
|
}
|
||||||
}
|
}
|
||||||
pool.addRemotesSync([]*types.Transaction{tx})
|
pool.addRemotesSync([]*types.Transaction{tx})
|
||||||
}
|
}
|
||||||
pending, queued := pool.Stats()
|
pending, queued := pool.Stats()
|
||||||
if pending+queued != 20 {
|
if pending+queued != 4 {
|
||||||
t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10)
|
t.Fatalf("transaction count mismatch: have %d, want %d, pending %d, queued %d, OverflowPool %d", pending+queued, 5, pending, queued, pool.localBufferPool.Size())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2231,6 +2245,50 @@ func TestReplacement(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTransferTransactions(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
testTxPoolConfig.OverflowPoolSlots = 1
|
||||||
|
pool, _ := setupPoolWithConfig(eip1559Config)
|
||||||
|
defer pool.Close()
|
||||||
|
|
||||||
|
pool.config.GlobalSlots = 1
|
||||||
|
pool.config.GlobalQueue = 2
|
||||||
|
|
||||||
|
// Create a number of test accounts and fund them
|
||||||
|
keys := make([]*ecdsa.PrivateKey, 5)
|
||||||
|
for i := 0; i < len(keys); i++ {
|
||||||
|
keys[i], _ = crypto.GenerateKey()
|
||||||
|
testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
|
||||||
|
}
|
||||||
|
|
||||||
|
tx := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0])
|
||||||
|
from, _ := types.Sender(pool.signer, tx)
|
||||||
|
pool.addToOverflowPool([]*types.Transaction{tx}, true)
|
||||||
|
pending, queue := pool.Stats()
|
||||||
|
|
||||||
|
assert.Equal(t, 0, pending, "pending transactions mismatched")
|
||||||
|
assert.Equal(t, 0, queue, "queued transactions mismatched")
|
||||||
|
assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected")
|
||||||
|
|
||||||
|
tx2 := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1])
|
||||||
|
pool.addToOverflowPool([]*types.Transaction{tx2}, true)
|
||||||
|
assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected")
|
||||||
|
<-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
|
||||||
|
pending, queue = pool.Stats()
|
||||||
|
|
||||||
|
assert.Equal(t, 0, pending, "pending transactions mismatched")
|
||||||
|
assert.Equal(t, 1, queue, "queued transactions mismatched")
|
||||||
|
assert.Equal(t, 0, pool.statsOverflowPool(), "OverflowPool size unexpected")
|
||||||
|
|
||||||
|
tx3 := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[2])
|
||||||
|
pool.addToOverflowPool([]*types.Transaction{tx3}, true)
|
||||||
|
pending, queue = pool.Stats()
|
||||||
|
|
||||||
|
assert.Equal(t, 1, pending, "pending transactions mismatched")
|
||||||
|
assert.Equal(t, 0, queue, "queued transactions mismatched")
|
||||||
|
assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected")
|
||||||
|
}
|
||||||
|
|
||||||
// Tests that the pool rejects replacement dynamic fee transactions that don't
|
// Tests that the pool rejects replacement dynamic fee transactions that don't
|
||||||
// meet the minimum price bump required.
|
// meet the minimum price bump required.
|
||||||
func TestReplacementDynamicFee(t *testing.T) {
|
func TestReplacementDynamicFee(t *testing.T) {
|
||||||
|
171
core/txpool/legacypool/tx_overflowpool.go
Normal file
171
core/txpool/legacypool/tx_overflowpool.go
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
package legacypool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/heap"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// txHeapItem implements the Interface interface (https://pkg.go.dev/container/heap#Interface) of heap so that it can be heapified
|
||||||
|
type txHeapItem struct {
|
||||||
|
tx *types.Transaction
|
||||||
|
timestamp int64 // Unix timestamp (nanoseconds) of when the transaction was added
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
type txHeap []*txHeapItem
|
||||||
|
|
||||||
|
func (h txHeap) Len() int { return len(h) }
|
||||||
|
func (h txHeap) Less(i, j int) bool {
|
||||||
|
return h[i].timestamp < h[j].timestamp
|
||||||
|
}
|
||||||
|
func (h txHeap) Swap(i, j int) {
|
||||||
|
if i < 0 || j < 0 || i >= len(h) || j >= len(h) {
|
||||||
|
return // Silently fail if indices are out of bounds
|
||||||
|
}
|
||||||
|
h[i], h[j] = h[j], h[i]
|
||||||
|
if h[i] != nil {
|
||||||
|
h[i].index = i
|
||||||
|
}
|
||||||
|
if h[j] != nil {
|
||||||
|
h[j].index = j
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *txHeap) Push(x interface{}) {
|
||||||
|
item, ok := x.(*txHeapItem)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n := len(*h)
|
||||||
|
item.index = n
|
||||||
|
*h = append(*h, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *txHeap) Pop() interface{} {
|
||||||
|
old := *h
|
||||||
|
n := len(old)
|
||||||
|
if n == 0 {
|
||||||
|
return nil // Return nil if the heap is empty
|
||||||
|
}
|
||||||
|
item := old[n-1]
|
||||||
|
old[n-1] = nil // avoid memory leak
|
||||||
|
*h = old[0 : n-1]
|
||||||
|
if item != nil {
|
||||||
|
item.index = -1 // for safety
|
||||||
|
}
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
|
||||||
|
type TxOverflowPool struct {
|
||||||
|
txHeap txHeap
|
||||||
|
index map[common.Hash]*txHeapItem
|
||||||
|
mu sync.RWMutex
|
||||||
|
maxSize uint64
|
||||||
|
totalSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTxOverflowPoolHeap(estimatedMaxSize uint64) *TxOverflowPool {
|
||||||
|
return &TxOverflowPool{
|
||||||
|
txHeap: make(txHeap, 0, estimatedMaxSize),
|
||||||
|
index: make(map[common.Hash]*txHeapItem, estimatedMaxSize),
|
||||||
|
maxSize: estimatedMaxSize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *TxOverflowPool) Add(tx *types.Transaction) {
|
||||||
|
tp.mu.Lock()
|
||||||
|
defer tp.mu.Unlock()
|
||||||
|
|
||||||
|
if _, exists := tp.index[tx.Hash()]; exists {
|
||||||
|
// Transaction already in pool, ignore
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint64(len(tp.txHeap)) >= tp.maxSize {
|
||||||
|
// Remove the oldest transaction to make space
|
||||||
|
oldestItem, ok := heap.Pop(&tp.txHeap).(*txHeapItem)
|
||||||
|
if !ok || oldestItem == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(tp.index, oldestItem.tx.Hash())
|
||||||
|
tp.totalSize -= numSlots(oldestItem.tx)
|
||||||
|
OverflowPoolGauge.Dec(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
item := &txHeapItem{
|
||||||
|
tx: tx,
|
||||||
|
timestamp: time.Now().UnixNano(),
|
||||||
|
}
|
||||||
|
heap.Push(&tp.txHeap, item)
|
||||||
|
tp.index[tx.Hash()] = item
|
||||||
|
tp.totalSize += numSlots(tx)
|
||||||
|
OverflowPoolGauge.Inc(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *TxOverflowPool) Get(hash common.Hash) (*types.Transaction, bool) {
|
||||||
|
tp.mu.RLock()
|
||||||
|
defer tp.mu.RUnlock()
|
||||||
|
if item, ok := tp.index[hash]; ok {
|
||||||
|
return item.tx, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *TxOverflowPool) Remove(hash common.Hash) {
|
||||||
|
tp.mu.Lock()
|
||||||
|
defer tp.mu.Unlock()
|
||||||
|
if item, ok := tp.index[hash]; ok {
|
||||||
|
heap.Remove(&tp.txHeap, item.index)
|
||||||
|
delete(tp.index, hash)
|
||||||
|
tp.totalSize -= numSlots(item.tx)
|
||||||
|
OverflowPoolGauge.Dec(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *TxOverflowPool) Flush(n int) []*types.Transaction {
|
||||||
|
tp.mu.Lock()
|
||||||
|
defer tp.mu.Unlock()
|
||||||
|
if n > tp.txHeap.Len() {
|
||||||
|
n = tp.txHeap.Len()
|
||||||
|
}
|
||||||
|
txs := make([]*types.Transaction, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
item, ok := heap.Pop(&tp.txHeap).(*txHeapItem)
|
||||||
|
if !ok || item == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
txs[i] = item.tx
|
||||||
|
delete(tp.index, item.tx.Hash())
|
||||||
|
tp.totalSize -= numSlots(item.tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
OverflowPoolGauge.Dec(int64(n))
|
||||||
|
return txs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *TxOverflowPool) Len() int {
|
||||||
|
tp.mu.RLock()
|
||||||
|
defer tp.mu.RUnlock()
|
||||||
|
return tp.txHeap.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *TxOverflowPool) Size() int {
|
||||||
|
tp.mu.RLock()
|
||||||
|
defer tp.mu.RUnlock()
|
||||||
|
return tp.totalSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *TxOverflowPool) PrintTxStats() {
|
||||||
|
tp.mu.RLock()
|
||||||
|
defer tp.mu.RUnlock()
|
||||||
|
for _, item := range tp.txHeap {
|
||||||
|
tx := item.tx
|
||||||
|
fmt.Printf("Hash: %s, Timestamp: %d, GasFeeCap: %s, GasTipCap: %s\n",
|
||||||
|
tx.Hash().String(), item.timestamp, tx.GasFeeCap().String(), tx.GasTipCap().String())
|
||||||
|
}
|
||||||
|
}
|
266
core/txpool/legacypool/tx_overflowpool_test.go
Normal file
266
core/txpool/legacypool/tx_overflowpool_test.go
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
package legacypool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
rand2 "math/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cometbft/cometbft/libs/rand"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Helper function to create a test transaction
|
||||||
|
func createTestTx(nonce uint64, gasPrice *big.Int) *types.Transaction {
|
||||||
|
to := common.HexToAddress("0x1234567890123456789012345678901234567890")
|
||||||
|
return types.NewTransaction(nonce, to, big.NewInt(1000), 21000, gasPrice, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewTxOverflowPoolHeap(t *testing.T) {
|
||||||
|
pool := NewTxOverflowPoolHeap(0)
|
||||||
|
if pool == nil {
|
||||||
|
t.Fatal("NewTxOverflowPoolHeap returned nil")
|
||||||
|
}
|
||||||
|
if pool.Len() != 0 {
|
||||||
|
t.Errorf("New pool should be empty, got length %d", pool.Len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxOverflowPoolHeapAdd(t *testing.T) {
|
||||||
|
pool := NewTxOverflowPoolHeap(1)
|
||||||
|
tx := createTestTx(1, big.NewInt(1000))
|
||||||
|
|
||||||
|
pool.Add(tx)
|
||||||
|
if pool.Len() != 1 {
|
||||||
|
t.Errorf("Pool should have 1 transaction, got %d", pool.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the same transaction again
|
||||||
|
pool.Add(tx)
|
||||||
|
if pool.Len() != 1 {
|
||||||
|
t.Errorf("Pool should still have 1 transaction after adding duplicate, got %d", pool.Len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxOverflowPoolHeapGet(t *testing.T) {
|
||||||
|
pool := NewTxOverflowPoolHeap(1)
|
||||||
|
tx := createTestTx(1, big.NewInt(1000))
|
||||||
|
pool.Add(tx)
|
||||||
|
|
||||||
|
gotTx, exists := pool.Get(tx.Hash())
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Get returned false for existing transaction")
|
||||||
|
}
|
||||||
|
if gotTx.Hash() != tx.Hash() {
|
||||||
|
t.Errorf("Get returned wrong transaction. Want %v, got %v", tx.Hash(), gotTx.Hash())
|
||||||
|
}
|
||||||
|
|
||||||
|
_, exists = pool.Get(common.Hash{})
|
||||||
|
if exists {
|
||||||
|
t.Error("Get returned true for non-existent transaction")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxOverflowPoolHeapRemove(t *testing.T) {
|
||||||
|
pool := NewTxOverflowPoolHeap(1)
|
||||||
|
tx := createTestTx(1, big.NewInt(1000))
|
||||||
|
pool.Add(tx)
|
||||||
|
|
||||||
|
pool.Remove(tx.Hash())
|
||||||
|
if pool.Len() != 0 {
|
||||||
|
t.Errorf("Pool should be empty after removing the only transaction, got length %d", pool.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to remove non-existent transaction
|
||||||
|
pool.Remove(common.Hash{})
|
||||||
|
if pool.Len() != 0 {
|
||||||
|
t.Error("Removing non-existent transaction should not affect pool size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxOverflowPoolHeapPopN(t *testing.T) {
|
||||||
|
pool := NewTxOverflowPoolHeap(3)
|
||||||
|
tx1 := createTestTx(1, big.NewInt(1000))
|
||||||
|
tx2 := createTestTx(2, big.NewInt(2000))
|
||||||
|
tx3 := createTestTx(3, big.NewInt(3000))
|
||||||
|
|
||||||
|
pool.Add(tx1)
|
||||||
|
time.Sleep(time.Millisecond) // Ensure different timestamps
|
||||||
|
pool.Add(tx2)
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
pool.Add(tx3)
|
||||||
|
|
||||||
|
popped := pool.Flush(2)
|
||||||
|
if len(popped) != 2 {
|
||||||
|
t.Fatalf("PopN(2) should return 2 transactions, got %d", len(popped))
|
||||||
|
}
|
||||||
|
if popped[0].Hash() != tx1.Hash() || popped[1].Hash() != tx2.Hash() {
|
||||||
|
t.Error("PopN returned transactions in wrong order")
|
||||||
|
}
|
||||||
|
if pool.Len() != 1 {
|
||||||
|
t.Errorf("Pool should have 1 transaction left, got %d", pool.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pop more than available
|
||||||
|
popped = pool.Flush(2)
|
||||||
|
if len(popped) != 1 {
|
||||||
|
t.Fatalf("PopN(2) should return 1 transaction when only 1 is left, got %d", len(popped))
|
||||||
|
}
|
||||||
|
if popped[0].Hash() != tx3.Hash() {
|
||||||
|
t.Error("PopN returned wrong transaction")
|
||||||
|
}
|
||||||
|
if pool.Len() != 0 {
|
||||||
|
t.Errorf("Pool should be empty, got length %d", pool.Len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxOverflowPoolHeapOrdering(t *testing.T) {
|
||||||
|
pool := NewTxOverflowPoolHeap(3)
|
||||||
|
tx1 := createTestTx(1, big.NewInt(1000))
|
||||||
|
tx2 := createTestTx(2, big.NewInt(2000))
|
||||||
|
tx3 := createTestTx(3, big.NewInt(3000))
|
||||||
|
|
||||||
|
pool.Add(tx2)
|
||||||
|
time.Sleep(time.Millisecond) // Ensure different timestamps
|
||||||
|
pool.Add(tx1)
|
||||||
|
pool.Add(tx3) // Added immediately after tx1, should have same timestamp but higher sequence
|
||||||
|
|
||||||
|
popped := pool.Flush(3)
|
||||||
|
if len(popped) != 3 {
|
||||||
|
t.Fatalf("PopN(3) should return 3 transactions, got %d", len(popped))
|
||||||
|
}
|
||||||
|
if popped[0].Hash() != tx2.Hash() || popped[1].Hash() != tx1.Hash() || popped[2].Hash() != tx3.Hash() {
|
||||||
|
t.Error("Transactions not popped in correct order (earliest timestamp first, then by sequence)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxOverflowPoolHeapLen(t *testing.T) {
|
||||||
|
pool := NewTxOverflowPoolHeap(2)
|
||||||
|
if pool.Len() != 0 {
|
||||||
|
t.Errorf("New pool should have length 0, got %d", pool.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
pool.Add(createTestTx(1, big.NewInt(1000)))
|
||||||
|
if pool.Len() != 1 {
|
||||||
|
t.Errorf("Pool should have length 1 after adding a transaction, got %d", pool.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
pool.Add(createTestTx(2, big.NewInt(2000)))
|
||||||
|
if pool.Len() != 2 {
|
||||||
|
t.Errorf("Pool should have length 2 after adding another transaction, got %d", pool.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
pool.Flush(1)
|
||||||
|
if pool.Len() != 1 {
|
||||||
|
t.Errorf("Pool should have length 1 after popping a transaction, got %d", pool.Len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to create a random test transaction
|
||||||
|
func createRandomTestTx() *types.Transaction {
|
||||||
|
nonce := uint64(rand.Intn(1000000))
|
||||||
|
to := common.BytesToAddress(rand.Bytes(20))
|
||||||
|
amount := new(big.Int).Rand(rand2.New(rand2.NewSource(rand.Int63())), big.NewInt(1e18))
|
||||||
|
gasLimit := uint64(21000)
|
||||||
|
gasPrice := new(big.Int).Rand(rand2.New(rand2.NewSource(rand.Int63())), big.NewInt(1e9))
|
||||||
|
data := rand.Bytes(100)
|
||||||
|
return types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createRandomTestTxs(n int) []*types.Transaction {
|
||||||
|
txs := make([]*types.Transaction, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
txs[i] = createRandomTestTx()
|
||||||
|
}
|
||||||
|
return txs
|
||||||
|
}
|
||||||
|
|
||||||
|
// goos: darwin
|
||||||
|
// goarch: arm64
|
||||||
|
// pkg: github.com/ethereum/go-ethereum/core/txpool/legacypool
|
||||||
|
// BenchmarkTxOverflowPoolHeapAdd-8 813326 2858 ns/op
|
||||||
|
func BenchmarkTxOverflowPoolHeapAdd(b *testing.B) {
|
||||||
|
pool := NewTxOverflowPoolHeap(uint64(b.N))
|
||||||
|
txs := createRandomTestTxs(b.N)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
pool.Add(txs[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkTxOverflowPoolHeapGet-8 32613938 35.63 ns/op
|
||||||
|
func BenchmarkTxOverflowPoolHeapGet(b *testing.B) {
|
||||||
|
pool := NewTxOverflowPoolHeap(1000)
|
||||||
|
txs := createRandomTestTxs(1000)
|
||||||
|
for _, tx := range txs {
|
||||||
|
pool.Add(tx)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
pool.Get(txs[i%1000].Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkTxOverflowPoolHeapRemove-8 3020841 417.8 ns/op
|
||||||
|
func BenchmarkTxOverflowPoolHeapRemove(b *testing.B) {
|
||||||
|
pool := NewTxOverflowPoolHeap(uint64(b.N))
|
||||||
|
txs := createRandomTestTxs(b.N)
|
||||||
|
for _, tx := range txs {
|
||||||
|
pool.Add(tx)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
pool.Remove(txs[i].Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkTxOverflowPoolHeapFlush-8 42963656 29.90 ns/op
|
||||||
|
func BenchmarkTxOverflowPoolHeapFlush(b *testing.B) {
|
||||||
|
pool := NewTxOverflowPoolHeap(1000)
|
||||||
|
txs := createRandomTestTxs(1000)
|
||||||
|
for _, tx := range txs {
|
||||||
|
pool.Add(tx)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
pool.Flush(10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkTxOverflowPoolHeapLen-8 79147188 20.07 ns/op
|
||||||
|
func BenchmarkTxOverflowPoolHeapLen(b *testing.B) {
|
||||||
|
pool := NewTxOverflowPoolHeap(1000)
|
||||||
|
txs := createRandomTestTxs(1000)
|
||||||
|
for _, tx := range txs {
|
||||||
|
pool.Add(tx)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
pool.Len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkTxOverflowPoolHeapAddRemove-8 902896 1546 ns/op
|
||||||
|
func BenchmarkTxOverflowPoolHeapAddRemove(b *testing.B) {
|
||||||
|
pool := NewTxOverflowPoolHeap(uint64(b.N))
|
||||||
|
txs := createRandomTestTxs(b.N)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
pool.Add(txs[i])
|
||||||
|
pool.Remove(txs[i].Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkTxOverflowPoolHeapAddFlush-8 84417 14899 ns/op
|
||||||
|
func BenchmarkTxOverflowPoolHeapAddFlush(b *testing.B) {
|
||||||
|
pool := NewTxOverflowPoolHeap(uint64(b.N * 10))
|
||||||
|
txs := createRandomTestTxs(b.N * 10)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
for j := 0; j < 10; j++ {
|
||||||
|
pool.Add(txs[i*10+j])
|
||||||
|
}
|
||||||
|
pool.Flush(10)
|
||||||
|
}
|
||||||
|
}
|
@ -1,10 +1,9 @@
|
|||||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||||
|
|
||||||
package ethapi
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
)
|
)
|
||||||
|
|
43
core/types/transaction_options.go
Normal file
43
core/types/transaction_options.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AccountStorage struct {
|
||||||
|
StorageRoot *common.Hash
|
||||||
|
StorageSlots map[common.Hash]common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AccountStorage) UnmarshalJSON(data []byte) error {
|
||||||
|
var hash common.Hash
|
||||||
|
if err := json.Unmarshal(data, &hash); err == nil {
|
||||||
|
a.StorageRoot = &hash
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return json.Unmarshal(data, &a.StorageSlots)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a AccountStorage) MarshalJSON() ([]byte, error) {
|
||||||
|
if a.StorageRoot != nil {
|
||||||
|
return json.Marshal(*a.StorageRoot)
|
||||||
|
}
|
||||||
|
return json.Marshal(a.StorageSlots)
|
||||||
|
}
|
||||||
|
|
||||||
|
type KnownAccounts map[common.Address]AccountStorage
|
||||||
|
|
||||||
|
// It is known that marshaling is broken
|
||||||
|
// https://github.com/golang/go/issues/55890
|
||||||
|
|
||||||
|
//go:generate go run github.com/fjl/gencodec -type TransactionOpts -out gen_tx_opts_json.go
|
||||||
|
type TransactionOpts struct {
|
||||||
|
KnownAccounts KnownAccounts `json:"knownAccounts"`
|
||||||
|
BlockNumberMin *hexutil.Uint64 `json:"blockNumberMin,omitempty"`
|
||||||
|
BlockNumberMax *hexutil.Uint64 `json:"blockNumberMax,omitempty"`
|
||||||
|
TimestampMin *hexutil.Uint64 `json:"timestampMin,omitempty"`
|
||||||
|
TimestampMax *hexutil.Uint64 `json:"timestampMax,omitempty"`
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
package ethapi_test
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func ptr(hash common.Hash) *common.Hash {
|
func ptr(hash common.Hash) *common.Hash {
|
||||||
@ -23,15 +22,15 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
input string
|
input string
|
||||||
mustFail bool
|
mustFail bool
|
||||||
expected ethapi.TransactionOpts
|
expected TransactionOpts
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"StateRoot",
|
"StateRoot",
|
||||||
`{"knownAccounts":{"0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0":"0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"}}`,
|
`{"knownAccounts":{"0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0":"0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"}}`,
|
||||||
false,
|
false,
|
||||||
ethapi.TransactionOpts{
|
TransactionOpts{
|
||||||
KnownAccounts: map[common.Address]ethapi.AccountStorage{
|
KnownAccounts: map[common.Address]AccountStorage{
|
||||||
common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): ethapi.AccountStorage{
|
common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): AccountStorage{
|
||||||
StorageRoot: ptr(common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")),
|
StorageRoot: ptr(common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -41,9 +40,9 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
|||||||
"StorageSlots",
|
"StorageSlots",
|
||||||
`{"knownAccounts":{"0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0":{"0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8":"0x0000000000000000000000000000000000000000000000000000000000000000"}}}`,
|
`{"knownAccounts":{"0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0":{"0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8":"0x0000000000000000000000000000000000000000000000000000000000000000"}}}`,
|
||||||
false,
|
false,
|
||||||
ethapi.TransactionOpts{
|
TransactionOpts{
|
||||||
KnownAccounts: map[common.Address]ethapi.AccountStorage{
|
KnownAccounts: map[common.Address]AccountStorage{
|
||||||
common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): ethapi.AccountStorage{
|
common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): AccountStorage{
|
||||||
StorageRoot: nil,
|
StorageRoot: nil,
|
||||||
StorageSlots: map[common.Hash]common.Hash{
|
StorageSlots: map[common.Hash]common.Hash{
|
||||||
common.HexToHash("0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8"): common.HexToHash("0x"),
|
common.HexToHash("0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8"): common.HexToHash("0x"),
|
||||||
@ -56,15 +55,15 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
|||||||
"EmptyObject",
|
"EmptyObject",
|
||||||
`{"knownAccounts":{}}`,
|
`{"knownAccounts":{}}`,
|
||||||
false,
|
false,
|
||||||
ethapi.TransactionOpts{
|
TransactionOpts{
|
||||||
KnownAccounts: make(map[common.Address]ethapi.AccountStorage),
|
KnownAccounts: make(map[common.Address]AccountStorage),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"EmptyStrings",
|
"EmptyStrings",
|
||||||
`{"knownAccounts":{"":""}}`,
|
`{"knownAccounts":{"":""}}`,
|
||||||
true,
|
true,
|
||||||
ethapi.TransactionOpts{
|
TransactionOpts{
|
||||||
KnownAccounts: nil,
|
KnownAccounts: nil,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -72,7 +71,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
|||||||
"BlockNumberMin",
|
"BlockNumberMin",
|
||||||
`{"blockNumberMin":"0x1"}`,
|
`{"blockNumberMin":"0x1"}`,
|
||||||
false,
|
false,
|
||||||
ethapi.TransactionOpts{
|
TransactionOpts{
|
||||||
BlockNumberMin: u64Ptr(1),
|
BlockNumberMin: u64Ptr(1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -80,7 +79,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
|||||||
"BlockNumberMax",
|
"BlockNumberMax",
|
||||||
`{"blockNumberMin":"0x1", "blockNumberMax":"0x2"}`,
|
`{"blockNumberMin":"0x1", "blockNumberMax":"0x2"}`,
|
||||||
false,
|
false,
|
||||||
ethapi.TransactionOpts{
|
TransactionOpts{
|
||||||
BlockNumberMin: u64Ptr(1),
|
BlockNumberMin: u64Ptr(1),
|
||||||
BlockNumberMax: u64Ptr(2),
|
BlockNumberMax: u64Ptr(2),
|
||||||
},
|
},
|
||||||
@ -89,7 +88,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
|||||||
"TimestampMin",
|
"TimestampMin",
|
||||||
`{"timestampMin":"0xffff"}`,
|
`{"timestampMin":"0xffff"}`,
|
||||||
false,
|
false,
|
||||||
ethapi.TransactionOpts{
|
TransactionOpts{
|
||||||
TimestampMin: u64Ptr(0xffff),
|
TimestampMin: u64Ptr(0xffff),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -97,7 +96,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
|||||||
"TimestampMax",
|
"TimestampMax",
|
||||||
`{"timestampMax":"0xffffff"}`,
|
`{"timestampMax":"0xffffff"}`,
|
||||||
false,
|
false,
|
||||||
ethapi.TransactionOpts{
|
TransactionOpts{
|
||||||
TimestampMax: u64Ptr(0xffffff),
|
TimestampMax: u64Ptr(0xffffff),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -105,7 +104,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
var opts ethapi.TransactionOpts
|
var opts TransactionOpts
|
||||||
err := json.Unmarshal([]byte(test.input), &opts)
|
err := json.Unmarshal([]byte(test.input), &opts)
|
||||||
if test.mustFail && err == nil {
|
if test.mustFail && err == nil {
|
||||||
t.Errorf("Test %s should fail", test.name)
|
t.Errorf("Test %s should fail", test.name)
|
@ -17,7 +17,7 @@
|
|||||||
# - A constraint describing the requirements of the law, called "require"
|
# - A constraint describing the requirements of the law, called "require"
|
||||||
# * Implementations are transliterated into functions that operate as well on
|
# * Implementations are transliterated into functions that operate as well on
|
||||||
# algebraic input points, and are called once per combination of branches
|
# algebraic input points, and are called once per combination of branches
|
||||||
# exectured. Each execution returns:
|
# executed. Each execution returns:
|
||||||
# - A constraint describing the assumptions this implementation requires
|
# - A constraint describing the assumptions this implementation requires
|
||||||
# (such as Z1=1), called "assumeFormula"
|
# (such as Z1=1), called "assumeFormula"
|
||||||
# - A constraint describing the assumptions this specific branch requires,
|
# - A constraint describing the assumptions this specific branch requires,
|
||||||
|
@ -193,11 +193,16 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||||||
chainConfig.CancunTime = config.OverridePassedForkTime
|
chainConfig.CancunTime = config.OverridePassedForkTime
|
||||||
chainConfig.HaberTime = config.OverridePassedForkTime
|
chainConfig.HaberTime = config.OverridePassedForkTime
|
||||||
chainConfig.HaberFixTime = config.OverridePassedForkTime
|
chainConfig.HaberFixTime = config.OverridePassedForkTime
|
||||||
|
chainConfig.BohrTime = config.OverridePassedForkTime
|
||||||
overrides.OverridePassedForkTime = config.OverridePassedForkTime
|
overrides.OverridePassedForkTime = config.OverridePassedForkTime
|
||||||
}
|
}
|
||||||
if config.OverrideBohr != nil {
|
if config.OverridePascal != nil {
|
||||||
chainConfig.BohrTime = config.OverrideBohr
|
chainConfig.PascalTime = config.OverridePascal
|
||||||
overrides.OverrideBohr = config.OverrideBohr
|
overrides.OverridePascal = config.OverridePascal
|
||||||
|
}
|
||||||
|
if config.OverridePrague != nil {
|
||||||
|
chainConfig.PragueTime = config.OverridePrague
|
||||||
|
overrides.OverridePrague = config.OverridePrague
|
||||||
}
|
}
|
||||||
if config.OverrideVerkle != nil {
|
if config.OverrideVerkle != nil {
|
||||||
chainConfig.VerkleTime = config.OverrideVerkle
|
chainConfig.VerkleTime = config.OverrideVerkle
|
||||||
@ -291,9 +296,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
bcOps := make([]core.BlockChainOption, 0)
|
bcOps := make([]core.BlockChainOption, 0)
|
||||||
if config.PipeCommit {
|
|
||||||
bcOps = append(bcOps, core.EnablePipelineCommit)
|
|
||||||
}
|
|
||||||
if config.PersistDiff {
|
if config.PersistDiff {
|
||||||
bcOps = append(bcOps, core.EnablePersistDiff(config.DiffBlock))
|
bcOps = append(bcOps, core.EnablePersistDiff(config.DiffBlock))
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,6 @@ type Config struct {
|
|||||||
DirectBroadcast bool
|
DirectBroadcast bool
|
||||||
DisableSnapProtocol bool // Whether disable snap protocol
|
DisableSnapProtocol bool // Whether disable snap protocol
|
||||||
EnableTrustProtocol bool // Whether enable trust protocol
|
EnableTrustProtocol bool // Whether enable trust protocol
|
||||||
PipeCommit bool
|
|
||||||
RangeLimit bool
|
RangeLimit bool
|
||||||
|
|
||||||
// Deprecated, use 'TransactionHistory' instead.
|
// Deprecated, use 'TransactionHistory' instead.
|
||||||
@ -191,8 +190,11 @@ type Config struct {
|
|||||||
// OverridePassedForkTime
|
// OverridePassedForkTime
|
||||||
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
||||||
|
|
||||||
// OverrideBohr (TODO: remove after the fork)
|
// OverridePascal (TODO: remove after the fork)
|
||||||
OverrideBohr *uint64 `toml:",omitempty"`
|
OverridePascal *uint64 `toml:",omitempty"`
|
||||||
|
|
||||||
|
// OverridePrague (TODO: remove after the fork)
|
||||||
|
OverridePrague *uint64 `toml:",omitempty"`
|
||||||
|
|
||||||
// OverrideVerkle (TODO: remove after the fork)
|
// OverrideVerkle (TODO: remove after the fork)
|
||||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||||
|
@ -30,7 +30,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||||||
DirectBroadcast bool
|
DirectBroadcast bool
|
||||||
DisableSnapProtocol bool
|
DisableSnapProtocol bool
|
||||||
EnableTrustProtocol bool
|
EnableTrustProtocol bool
|
||||||
PipeCommit bool
|
|
||||||
RangeLimit bool
|
RangeLimit bool
|
||||||
TxLookupLimit uint64 `toml:",omitempty"`
|
TxLookupLimit uint64 `toml:",omitempty"`
|
||||||
TransactionHistory uint64 `toml:",omitempty"`
|
TransactionHistory uint64 `toml:",omitempty"`
|
||||||
@ -70,8 +69,9 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||||||
RPCGasCap uint64
|
RPCGasCap uint64
|
||||||
RPCEVMTimeout time.Duration
|
RPCEVMTimeout time.Duration
|
||||||
RPCTxFeeCap float64
|
RPCTxFeeCap float64
|
||||||
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
||||||
OverrideBohr *uint64 `toml:",omitempty"`
|
OverridePascal *uint64 `toml:",omitempty"`
|
||||||
|
OverridePrague *uint64 `toml:",omitempty"`
|
||||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||||
BlobExtraReserve uint64
|
BlobExtraReserve uint64
|
||||||
}
|
}
|
||||||
@ -89,7 +89,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||||||
enc.DirectBroadcast = c.DirectBroadcast
|
enc.DirectBroadcast = c.DirectBroadcast
|
||||||
enc.DisableSnapProtocol = c.DisableSnapProtocol
|
enc.DisableSnapProtocol = c.DisableSnapProtocol
|
||||||
enc.EnableTrustProtocol = c.EnableTrustProtocol
|
enc.EnableTrustProtocol = c.EnableTrustProtocol
|
||||||
enc.PipeCommit = c.PipeCommit
|
|
||||||
enc.RangeLimit = c.RangeLimit
|
enc.RangeLimit = c.RangeLimit
|
||||||
enc.TxLookupLimit = c.TxLookupLimit
|
enc.TxLookupLimit = c.TxLookupLimit
|
||||||
enc.TransactionHistory = c.TransactionHistory
|
enc.TransactionHistory = c.TransactionHistory
|
||||||
@ -130,7 +129,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||||||
enc.RPCEVMTimeout = c.RPCEVMTimeout
|
enc.RPCEVMTimeout = c.RPCEVMTimeout
|
||||||
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
||||||
enc.OverridePassedForkTime = c.OverridePassedForkTime
|
enc.OverridePassedForkTime = c.OverridePassedForkTime
|
||||||
enc.OverrideBohr = c.OverrideBohr
|
enc.OverridePascal = c.OverridePascal
|
||||||
|
enc.OverridePrague = c.OverridePrague
|
||||||
enc.OverrideVerkle = c.OverrideVerkle
|
enc.OverrideVerkle = c.OverrideVerkle
|
||||||
enc.BlobExtraReserve = c.BlobExtraReserve
|
enc.BlobExtraReserve = c.BlobExtraReserve
|
||||||
return &enc, nil
|
return &enc, nil
|
||||||
@ -152,7 +152,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||||||
DirectBroadcast *bool
|
DirectBroadcast *bool
|
||||||
DisableSnapProtocol *bool
|
DisableSnapProtocol *bool
|
||||||
EnableTrustProtocol *bool
|
EnableTrustProtocol *bool
|
||||||
PipeCommit *bool
|
|
||||||
RangeLimit *bool
|
RangeLimit *bool
|
||||||
TxLookupLimit *uint64 `toml:",omitempty"`
|
TxLookupLimit *uint64 `toml:",omitempty"`
|
||||||
TransactionHistory *uint64 `toml:",omitempty"`
|
TransactionHistory *uint64 `toml:",omitempty"`
|
||||||
@ -192,8 +191,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||||||
RPCGasCap *uint64
|
RPCGasCap *uint64
|
||||||
RPCEVMTimeout *time.Duration
|
RPCEVMTimeout *time.Duration
|
||||||
RPCTxFeeCap *float64
|
RPCTxFeeCap *float64
|
||||||
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
OverridePassedForkTime *uint64 `toml:",omitempty"`
|
||||||
OverrideBohr *uint64 `toml:",omitempty"`
|
OverridePascal *uint64 `toml:",omitempty"`
|
||||||
|
OverridePrague *uint64 `toml:",omitempty"`
|
||||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||||
BlobExtraReserve *uint64
|
BlobExtraReserve *uint64
|
||||||
}
|
}
|
||||||
@ -240,9 +240,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||||||
if dec.EnableTrustProtocol != nil {
|
if dec.EnableTrustProtocol != nil {
|
||||||
c.EnableTrustProtocol = *dec.EnableTrustProtocol
|
c.EnableTrustProtocol = *dec.EnableTrustProtocol
|
||||||
}
|
}
|
||||||
if dec.PipeCommit != nil {
|
|
||||||
c.PipeCommit = *dec.PipeCommit
|
|
||||||
}
|
|
||||||
if dec.RangeLimit != nil {
|
if dec.RangeLimit != nil {
|
||||||
c.RangeLimit = *dec.RangeLimit
|
c.RangeLimit = *dec.RangeLimit
|
||||||
}
|
}
|
||||||
@ -363,8 +360,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||||||
if dec.OverridePassedForkTime != nil {
|
if dec.OverridePassedForkTime != nil {
|
||||||
c.OverridePassedForkTime = dec.OverridePassedForkTime
|
c.OverridePassedForkTime = dec.OverridePassedForkTime
|
||||||
}
|
}
|
||||||
if dec.OverrideBohr != nil {
|
if dec.OverridePascal != nil {
|
||||||
c.OverrideBohr = dec.OverrideBohr
|
c.OverridePascal = dec.OverridePascal
|
||||||
|
}
|
||||||
|
if dec.OverridePrague != nil {
|
||||||
|
c.OverridePrague = dec.OverridePrague
|
||||||
}
|
}
|
||||||
if dec.OverrideVerkle != nil {
|
if dec.OverrideVerkle != nil {
|
||||||
c.OverrideVerkle = dec.OverrideVerkle
|
c.OverrideVerkle = dec.OverrideVerkle
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -737,7 +736,7 @@ func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) er
|
|||||||
//
|
//
|
||||||
// If the transaction was a contract creation use the TransactionReceipt method to get the
|
// If the transaction was a contract creation use the TransactionReceipt method to get the
|
||||||
// contract address after the transaction has been mined.
|
// contract address after the transaction has been mined.
|
||||||
func (ec *Client) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error {
|
func (ec *Client) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error {
|
||||||
data, err := tx.MarshalBinary()
|
data, err := tx.MarshalBinary()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -770,9 +769,9 @@ func sendTransactionConditional(ec *Client) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
return ec.SendTransactionConditional(context.Background(), tx, ethapi.TransactionOpts{
|
return ec.SendTransactionConditional(context.Background(), tx, types.TransactionOpts{
|
||||||
KnownAccounts: map[common.Address]ethapi.AccountStorage{
|
KnownAccounts: map[common.Address]types.AccountStorage{
|
||||||
testAddr: ethapi.AccountStorage{
|
testAddr: types.AccountStorage{
|
||||||
StorageRoot: &root,
|
StorageRoot: &root,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/eth/filters"
|
"github.com/ethereum/go-ethereum/eth/filters"
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@ -39,7 +38,7 @@ import (
|
|||||||
|
|
||||||
// TransactionConditionalSender injects the conditional transaction into the pending pool for execution after verification.
|
// TransactionConditionalSender injects the conditional transaction into the pending pool for execution after verification.
|
||||||
type TransactionConditionalSender interface {
|
type TransactionConditionalSender interface {
|
||||||
SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error
|
SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Client exposes the methods provided by the Ethereum RPC client.
|
// Client exposes the methods provided by the Ethereum RPC client.
|
||||||
|
@ -2314,7 +2314,7 @@ func (s *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.B
|
|||||||
|
|
||||||
// SendRawTransactionConditional will add the signed transaction to the transaction pool.
|
// SendRawTransactionConditional will add the signed transaction to the transaction pool.
|
||||||
// The sender/bundler is responsible for signing the transaction
|
// The sender/bundler is responsible for signing the transaction
|
||||||
func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, input hexutil.Bytes, opts TransactionOpts) (common.Hash, error) {
|
func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, input hexutil.Bytes, opts types.TransactionOpts) (common.Hash, error) {
|
||||||
tx := new(types.Transaction)
|
tx := new(types.Transaction)
|
||||||
if err := tx.UnmarshalBinary(input); err != nil {
|
if err := tx.UnmarshalBinary(input); err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
@ -2324,7 +2324,7 @@ func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, inpu
|
|||||||
if state == nil || err != nil {
|
if state == nil || err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
if err := opts.Check(header.Number.Uint64(), header.Time, state); err != nil {
|
if err := TxOptsCheck(opts, header.Number.Uint64(), header.Time, state); err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
return SubmitTransaction(ctx, s.b, tx)
|
return SubmitTransaction(ctx, s.b, tx)
|
||||||
|
@ -2,52 +2,15 @@ package ethapi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AccountStorage struct {
|
|
||||||
StorageRoot *common.Hash
|
|
||||||
StorageSlots map[common.Hash]common.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AccountStorage) UnmarshalJSON(data []byte) error {
|
|
||||||
var hash common.Hash
|
|
||||||
if err := json.Unmarshal(data, &hash); err == nil {
|
|
||||||
a.StorageRoot = &hash
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return json.Unmarshal(data, &a.StorageSlots)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a AccountStorage) MarshalJSON() ([]byte, error) {
|
|
||||||
if a.StorageRoot != nil {
|
|
||||||
return json.Marshal(*a.StorageRoot)
|
|
||||||
}
|
|
||||||
return json.Marshal(a.StorageSlots)
|
|
||||||
}
|
|
||||||
|
|
||||||
type KnownAccounts map[common.Address]AccountStorage
|
|
||||||
|
|
||||||
// It is known that marshaling is broken
|
|
||||||
// https://github.com/golang/go/issues/55890
|
|
||||||
|
|
||||||
//go:generate go run github.com/fjl/gencodec -type TransactionOpts -out gen_tx_opts_json.go
|
|
||||||
type TransactionOpts struct {
|
|
||||||
KnownAccounts KnownAccounts `json:"knownAccounts"`
|
|
||||||
BlockNumberMin *hexutil.Uint64 `json:"blockNumberMin,omitempty"`
|
|
||||||
BlockNumberMax *hexutil.Uint64 `json:"blockNumberMax,omitempty"`
|
|
||||||
TimestampMin *hexutil.Uint64 `json:"timestampMin,omitempty"`
|
|
||||||
TimestampMax *hexutil.Uint64 `json:"timestampMax,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const MaxNumberOfEntries = 1000
|
const MaxNumberOfEntries = 1000
|
||||||
|
|
||||||
func (o *TransactionOpts) Check(blockNumber uint64, timeStamp uint64, statedb *state.StateDB) error {
|
func TxOptsCheck(o types.TransactionOpts, blockNumber uint64, timeStamp uint64, statedb *state.StateDB) error {
|
||||||
if o.BlockNumberMin != nil && blockNumber < uint64(*o.BlockNumberMin) {
|
if o.BlockNumberMin != nil && blockNumber < uint64(*o.BlockNumberMin) {
|
||||||
return errors.New("BlockNumberMin condition not met")
|
return errors.New("BlockNumberMin condition not met")
|
||||||
}
|
}
|
||||||
@ -71,10 +34,10 @@ func (o *TransactionOpts) Check(blockNumber uint64, timeStamp uint64, statedb *s
|
|||||||
if counter > MaxNumberOfEntries {
|
if counter > MaxNumberOfEntries {
|
||||||
return errors.New("knownAccounts too large")
|
return errors.New("knownAccounts too large")
|
||||||
}
|
}
|
||||||
return o.CheckStorage(statedb)
|
return TxOptsCheckStorage(o, statedb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *TransactionOpts) CheckStorage(statedb *state.StateDB) error {
|
func TxOptsCheckStorage(o types.TransactionOpts, statedb *state.StateDB) error {
|
||||||
for address, accountStorage := range o.KnownAccounts {
|
for address, accountStorage := range o.KnownAccounts {
|
||||||
if accountStorage.StorageRoot != nil {
|
if accountStorage.StorageRoot != nil {
|
||||||
rootHash := statedb.GetRoot(address)
|
rootHash := statedb.GetRoot(address)
|
@ -1430,15 +1430,6 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti
|
|||||||
if interval != nil {
|
if interval != nil {
|
||||||
interval()
|
interval()
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
|
|
||||||
err := env.state.WaitPipeVerification()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
env.state.CorrectAccountsRoot(w.chain.CurrentBlock().Root)
|
|
||||||
*/
|
|
||||||
|
|
||||||
fees := env.state.GetBalance(consensus.SystemAddress).ToBig()
|
fees := env.state.GetBalance(consensus.SystemAddress).ToBig()
|
||||||
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether))
|
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether))
|
||||||
// Withdrawals are set to nil here, because this is only called in PoW.
|
// Withdrawals are set to nil here, because this is only called in PoW.
|
||||||
|
@ -155,6 +155,9 @@ var (
|
|||||||
HaberTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC
|
HaberTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC
|
||||||
HaberFixTime: newUint64(1727316120), // 2024-09-26 02:02:00 AM UTC
|
HaberFixTime: newUint64(1727316120), // 2024-09-26 02:02:00 AM UTC
|
||||||
BohrTime: newUint64(1727317200), // 2024-09-26 02:20:00 AM UTC
|
BohrTime: newUint64(1727317200), // 2024-09-26 02:20:00 AM UTC
|
||||||
|
// TODO
|
||||||
|
PascalTime: nil,
|
||||||
|
PragueTime: nil,
|
||||||
|
|
||||||
Parlia: &ParliaConfig{
|
Parlia: &ParliaConfig{
|
||||||
Period: 3,
|
Period: 3,
|
||||||
@ -196,6 +199,9 @@ var (
|
|||||||
HaberTime: newUint64(1716962820), // 2024-05-29 06:07:00 AM UTC
|
HaberTime: newUint64(1716962820), // 2024-05-29 06:07:00 AM UTC
|
||||||
HaberFixTime: newUint64(1719986788), // 2024-07-03 06:06:28 AM UTC
|
HaberFixTime: newUint64(1719986788), // 2024-07-03 06:06:28 AM UTC
|
||||||
BohrTime: newUint64(1724116996), // 2024-08-20 01:23:16 AM UTC
|
BohrTime: newUint64(1724116996), // 2024-08-20 01:23:16 AM UTC
|
||||||
|
// TODO
|
||||||
|
PascalTime: nil,
|
||||||
|
PragueTime: nil,
|
||||||
|
|
||||||
Parlia: &ParliaConfig{
|
Parlia: &ParliaConfig{
|
||||||
Period: 3,
|
Period: 3,
|
||||||
@ -238,6 +244,9 @@ var (
|
|||||||
HaberTime: newUint64(0),
|
HaberTime: newUint64(0),
|
||||||
HaberFixTime: newUint64(0),
|
HaberFixTime: newUint64(0),
|
||||||
BohrTime: newUint64(0),
|
BohrTime: newUint64(0),
|
||||||
|
// TODO: set them to `0` when passed on the mainnet
|
||||||
|
PascalTime: nil,
|
||||||
|
PragueTime: nil,
|
||||||
|
|
||||||
Parlia: &ParliaConfig{
|
Parlia: &ParliaConfig{
|
||||||
Period: 3,
|
Period: 3,
|
||||||
@ -517,6 +526,7 @@ type ChainConfig struct {
|
|||||||
HaberTime *uint64 `json:"haberTime,omitempty"` // Haber switch time (nil = no fork, 0 = already on haber)
|
HaberTime *uint64 `json:"haberTime,omitempty"` // Haber switch time (nil = no fork, 0 = already on haber)
|
||||||
HaberFixTime *uint64 `json:"haberFixTime,omitempty"` // HaberFix switch time (nil = no fork, 0 = already on haberFix)
|
HaberFixTime *uint64 `json:"haberFixTime,omitempty"` // HaberFix switch time (nil = no fork, 0 = already on haberFix)
|
||||||
BohrTime *uint64 `json:"bohrTime,omitempty"` // Bohr switch time (nil = no fork, 0 = already on bohr)
|
BohrTime *uint64 `json:"bohrTime,omitempty"` // Bohr switch time (nil = no fork, 0 = already on bohr)
|
||||||
|
PascalTime *uint64 `json:"pascalTime,omitempty"` // Pascal switch time (nil = no fork, 0 = already on pascal)
|
||||||
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
||||||
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
||||||
|
|
||||||
@ -637,7 +647,17 @@ func (c *ChainConfig) String() string {
|
|||||||
BohrTime = big.NewInt(0).SetUint64(*c.BohrTime)
|
BohrTime = big.NewInt(0).SetUint64(*c.BohrTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, CancunTime: %v, HaberTime: %v, HaberFixTime: %v, BohrTime: %v, Engine: %v}",
|
var PascalTime *big.Int
|
||||||
|
if c.PascalTime != nil {
|
||||||
|
PascalTime = big.NewInt(0).SetUint64(*c.PascalTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
var PragueTime *big.Int
|
||||||
|
if c.PragueTime != nil {
|
||||||
|
PragueTime = big.NewInt(0).SetUint64(*c.PragueTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, CancunTime: %v, HaberTime: %v, HaberFixTime: %v, BohrTime: %v, PascalTime: %v, PragueTime: %v, Engine: %v}",
|
||||||
c.ChainID,
|
c.ChainID,
|
||||||
c.HomesteadBlock,
|
c.HomesteadBlock,
|
||||||
c.DAOForkBlock,
|
c.DAOForkBlock,
|
||||||
@ -677,6 +697,8 @@ func (c *ChainConfig) String() string {
|
|||||||
HaberTime,
|
HaberTime,
|
||||||
HaberFixTime,
|
HaberFixTime,
|
||||||
BohrTime,
|
BohrTime,
|
||||||
|
PascalTime,
|
||||||
|
PragueTime,
|
||||||
engine,
|
engine,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -977,6 +999,20 @@ func (c *ChainConfig) IsOnBohr(currentBlockNumber *big.Int, lastBlockTime uint64
|
|||||||
return !c.IsBohr(lastBlockNumber, lastBlockTime) && c.IsBohr(currentBlockNumber, currentBlockTime)
|
return !c.IsBohr(lastBlockNumber, lastBlockTime) && c.IsBohr(currentBlockNumber, currentBlockTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsPascal returns whether time is either equal to the Pascal fork time or greater.
|
||||||
|
func (c *ChainConfig) IsPascal(num *big.Int, time uint64) bool {
|
||||||
|
return c.IsLondon(num) && isTimestampForked(c.PascalTime, time)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsOnPascal returns whether currentBlockTime is either equal to the Pascal fork time or greater firstly.
|
||||||
|
func (c *ChainConfig) IsOnPascal(currentBlockNumber *big.Int, lastBlockTime uint64, currentBlockTime uint64) bool {
|
||||||
|
lastBlockNumber := new(big.Int)
|
||||||
|
if currentBlockNumber.Cmp(big.NewInt(1)) >= 0 {
|
||||||
|
lastBlockNumber.Sub(currentBlockNumber, big.NewInt(1))
|
||||||
|
}
|
||||||
|
return !c.IsPascal(lastBlockNumber, lastBlockTime) && c.IsPascal(currentBlockNumber, currentBlockTime)
|
||||||
|
}
|
||||||
|
|
||||||
// IsPrague returns whether num is either equal to the Prague fork time or greater.
|
// IsPrague returns whether num is either equal to the Prague fork time or greater.
|
||||||
func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool {
|
func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool {
|
||||||
return c.IsLondon(num) && isTimestampForked(c.PragueTime, time)
|
return c.IsLondon(num) && isTimestampForked(c.PragueTime, time)
|
||||||
@ -1043,7 +1079,8 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
|
|||||||
{name: "haberTime", timestamp: c.HaberTime},
|
{name: "haberTime", timestamp: c.HaberTime},
|
||||||
{name: "haberFixTime", timestamp: c.HaberFixTime},
|
{name: "haberFixTime", timestamp: c.HaberFixTime},
|
||||||
{name: "bohrTime", timestamp: c.BohrTime},
|
{name: "bohrTime", timestamp: c.BohrTime},
|
||||||
{name: "pragueTime", timestamp: c.PragueTime, optional: true},
|
{name: "pascalTime", timestamp: c.PascalTime},
|
||||||
|
{name: "pragueTime", timestamp: c.PragueTime},
|
||||||
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
|
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
|
||||||
} {
|
} {
|
||||||
if lastFork.name != "" {
|
if lastFork.name != "" {
|
||||||
@ -1199,6 +1236,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int,
|
|||||||
if isForkTimestampIncompatible(c.BohrTime, newcfg.BohrTime, headTimestamp) {
|
if isForkTimestampIncompatible(c.BohrTime, newcfg.BohrTime, headTimestamp) {
|
||||||
return newTimestampCompatError("Bohr fork timestamp", c.BohrTime, newcfg.BohrTime)
|
return newTimestampCompatError("Bohr fork timestamp", c.BohrTime, newcfg.BohrTime)
|
||||||
}
|
}
|
||||||
|
if isForkTimestampIncompatible(c.PascalTime, newcfg.PascalTime, headTimestamp) {
|
||||||
|
return newTimestampCompatError("Pascal fork timestamp", c.PascalTime, newcfg.PascalTime)
|
||||||
|
}
|
||||||
if isForkTimestampIncompatible(c.PragueTime, newcfg.PragueTime, headTimestamp) {
|
if isForkTimestampIncompatible(c.PragueTime, newcfg.PragueTime, headTimestamp) {
|
||||||
return newTimestampCompatError("Prague fork timestamp", c.PragueTime, newcfg.PragueTime)
|
return newTimestampCompatError("Prague fork timestamp", c.PragueTime, newcfg.PragueTime)
|
||||||
}
|
}
|
||||||
@ -1382,7 +1422,7 @@ type Rules struct {
|
|||||||
IsHertz bool
|
IsHertz bool
|
||||||
IsHertzfix bool
|
IsHertzfix bool
|
||||||
IsShanghai, IsKepler, IsFeynman, IsCancun, IsHaber bool
|
IsShanghai, IsKepler, IsFeynman, IsCancun, IsHaber bool
|
||||||
IsBohr, IsPrague, IsVerkle bool
|
IsBohr, IsPascal, IsPrague, IsVerkle bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rules ensures c's ChainID is not nil.
|
// Rules ensures c's ChainID is not nil.
|
||||||
@ -1419,6 +1459,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules
|
|||||||
IsCancun: c.IsCancun(num, timestamp),
|
IsCancun: c.IsCancun(num, timestamp),
|
||||||
IsHaber: c.IsHaber(num, timestamp),
|
IsHaber: c.IsHaber(num, timestamp),
|
||||||
IsBohr: c.IsBohr(num, timestamp),
|
IsBohr: c.IsBohr(num, timestamp),
|
||||||
|
IsPascal: c.IsPascal(num, timestamp),
|
||||||
IsPrague: c.IsPrague(num, timestamp),
|
IsPrague: c.IsPrague(num, timestamp),
|
||||||
IsVerkle: c.IsVerkle(num, timestamp),
|
IsVerkle: c.IsVerkle(num, timestamp),
|
||||||
}
|
}
|
||||||
|
@ -347,33 +347,27 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
|||||||
|
|
||||||
// Keep committing nodes from the flush-list until we're below allowance
|
// Keep committing nodes from the flush-list until we're below allowance
|
||||||
oldest := db.oldest
|
oldest := db.oldest
|
||||||
err := func() error {
|
for size > limit && oldest != (common.Hash{}) {
|
||||||
for size > limit && oldest != (common.Hash{}) {
|
// Fetch the oldest referenced node and push into the batch
|
||||||
// Fetch the oldest referenced node and push into the batch
|
node := db.dirties[oldest]
|
||||||
node := db.dirties[oldest]
|
rawdb.WriteLegacyTrieNode(batch, oldest, node.node)
|
||||||
rawdb.WriteLegacyTrieNode(batch, oldest, node.node)
|
|
||||||
|
|
||||||
// If we exceeded the ideal batch size, commit and reset
|
// If we exceeded the ideal batch size, commit and reset
|
||||||
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
log.Error("Failed to write flush list to disk", "err", err)
|
log.Error("Failed to write flush list to disk", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
|
||||||
batch.Reset()
|
|
||||||
}
|
}
|
||||||
// Iterate to the next flush item, or abort if the size cap was achieved. Size
|
batch.Reset()
|
||||||
// is the total size, including the useful cached data (hash -> blob), the
|
|
||||||
// cache item metadata, as well as external children mappings.
|
|
||||||
size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize)
|
|
||||||
if node.external != nil {
|
|
||||||
size -= common.StorageSize(len(node.external) * common.HashLength)
|
|
||||||
}
|
|
||||||
oldest = node.flushNext
|
|
||||||
}
|
}
|
||||||
return nil
|
// Iterate to the next flush item, or abort if the size cap was achieved. Size
|
||||||
}()
|
// is the total size, including the useful cached data (hash -> blob), the
|
||||||
if err != nil {
|
// cache item metadata, as well as external children mappings.
|
||||||
return err
|
size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize)
|
||||||
|
if node.external != nil {
|
||||||
|
size -= common.StorageSize(len(node.external) * common.HashLength)
|
||||||
|
}
|
||||||
|
oldest = node.flushNext
|
||||||
}
|
}
|
||||||
// Flush out any remainder data from the last batch
|
// Flush out any remainder data from the last batch
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user