Merge pull request #2320 from bnb-chain/develop
Draft release v1.4.3-alpha
This commit is contained in:
commit
7f3f72ed41
33
CHANGELOG.md
33
CHANGELOG.md
@ -1,6 +1,33 @@
|
||||
# Changelog
|
||||
## v1.4.3
|
||||
### FEATURE
|
||||
* [\#2241](https://github.com/bnb-chain/bsc/pull/2241) cmd/utils, core/rawdb, triedb/pathdb: flip hash to path scheme
|
||||
* [\#2312](https://github.com/bnb-chain/bsc/pull/2312) cmd/utils, node: switch to Pebble as the default db if none exists
|
||||
|
||||
### IMPROVEMENT
|
||||
* [\#2228](https://github.com/bnb-chain/bsc/pull/2228) core: rephrase TriesInMemory log
|
||||
* [\#2234](https://github.com/bnb-chain/bsc/pull/2234) cmd/utils: disable snap protocol for fast node
|
||||
* [\#2236](https://github.com/bnb-chain/bsc/pull/2236) build(deps): bump github.com/quic-go/quic-go from 0.39.3 to 0.39.4
|
||||
* [\#2240](https://github.com/bnb-chain/bsc/pull/2240) core/state: fix taskResult typo
|
||||
|
||||
* [\#2280](https://github.com/bnb-chain/bsc/pull/2280) cmd/utils, core: only full sync for fast nodes
|
||||
* [\#2298](https://github.com/bnb-chain/bsc/pull/2298) cmd, node: initialize ports with --instance
|
||||
* [\#2302](https://github.com/bnb-chain/bsc/pull/2302) cmd/geth, core/rawdb: add dbDeleteTrieState
|
||||
* [\#2304](https://github.com/bnb-chain/bsc/pull/2304) eth/ethconfig: remove overridekepler and overrideshanghai
|
||||
* [\#2307](https://github.com/bnb-chain/bsc/pull/2307) internal/ethapi: add net_nodeInfo
|
||||
* [\#2311](https://github.com/bnb-chain/bsc/pull/2311) Port cancun related changes from unreleased v1.14.0
|
||||
* [\#2313](https://github.com/bnb-chain/bsc/pull/2313) tests/truffle: use hbss to run test
|
||||
* [\#2314](https://github.com/bnb-chain/bsc/pull/2314) cmd/jsutil: dump MinGasPrice for validator
|
||||
* [\#2317](https://github.com/bnb-chain/bsc/pull/2317) feat: add mev metrics
|
||||
|
||||
### BUGFIX
|
||||
* [\#2272](https://github.com/bnb-chain/bsc/pull/2272) parlia: add state prepare for internal SC transaction
|
||||
* [\#2277](https://github.com/bnb-chain/bsc/pull/2277) fix: systemTx should be always at the end of block
|
||||
* [\#2299](https://github.com/bnb-chain/bsc/pull/2299) fix: add FeynmanFix upgrade for a testnet issue
|
||||
* [\#2310](https://github.com/bnb-chain/bsc/pull/2310) core/vm: fix PrecompiledContractsCancun
|
||||
|
||||
## v1.4.2
|
||||
### Feature
|
||||
### FEATURE
|
||||
* [\#2021](https://github.com/bnb-chain/bsc/pull/2021) feat: support separate trie database
|
||||
* [\#2224](https://github.com/bnb-chain/bsc/pull/2224) feat: support MEV
|
||||
|
||||
@ -120,6 +147,10 @@ NA
|
||||
[event: fix Resubscribe deadlock when unsubscribing after inner sub ends (#28359)](https://github.com/bnb-chain/bsc/commit/ffc6a0f36edda396a8421cf7a3c0feb88be20d0b)
|
||||
[all: replace log15 with slog (#28187)](https://github.com/bnb-chain/bsc/commit/28e73717016cdc9ebdb5fdb3474cfbd3bd2d2524)
|
||||
|
||||
## v1.3.11
|
||||
BUGFIX
|
||||
* [\#2288](https://github.com/bnb-chain/bsc/pull/2288) fix: add FeynmanFix upgrade for a testnet issue
|
||||
|
||||
## v1.3.10
|
||||
FEATURE
|
||||
* [\#2047](https://github.com/bnb-chain/bsc/pull/2047) feat: add new fork block and precompile contract for BEP294 and BEP299
|
||||
|
@ -169,7 +169,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
// Calculate the BlobBaseFee
|
||||
var excessBlobGas uint64
|
||||
if pre.Env.ExcessBlobGas != nil {
|
||||
excessBlobGas := *pre.Env.ExcessBlobGas
|
||||
excessBlobGas = *pre.Env.ExcessBlobGas
|
||||
vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
|
||||
} else {
|
||||
// If it is not explicitly defined, but we have the parent values, we try
|
||||
|
@ -183,14 +183,6 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
params.RialtoGenesisHash = common.HexToHash(v)
|
||||
}
|
||||
|
||||
if ctx.IsSet(utils.OverrideShanghai.Name) {
|
||||
v := ctx.Uint64(utils.OverrideShanghai.Name)
|
||||
cfg.Eth.OverrideShanghai = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideKepler.Name) {
|
||||
v := ctx.Uint64(utils.OverrideKepler.Name)
|
||||
cfg.Eth.OverrideKepler = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideCancun.Name) {
|
||||
v := ctx.Uint64(utils.OverrideCancun.Name)
|
||||
cfg.Eth.OverrideCancun = &v
|
||||
@ -203,6 +195,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
v := ctx.Uint64(utils.OverrideFeynman.Name)
|
||||
cfg.Eth.OverrideFeynman = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideFeynmanFix.Name) {
|
||||
v := ctx.Uint64(utils.OverrideFeynmanFix.Name)
|
||||
cfg.Eth.OverrideFeynmanFix = &v
|
||||
}
|
||||
if ctx.IsSet(utils.SeparateDBFlag.Name) && !stack.IsSeparatedDB() {
|
||||
utils.Fatalf("Failed to locate separate database subdirectory when separatedb parameter has been set")
|
||||
}
|
||||
|
@ -76,6 +76,7 @@ Remove blockchain and state databases`,
|
||||
dbCompactCmd,
|
||||
dbGetCmd,
|
||||
dbDeleteCmd,
|
||||
dbDeleteTrieStateCmd,
|
||||
dbInspectTrieCmd,
|
||||
dbPutCmd,
|
||||
dbGetSlotsCmd,
|
||||
@ -206,6 +207,15 @@ corruption if it is aborted during execution'!`,
|
||||
Description: `This command deletes the specified database key from the database.
|
||||
WARNING: This is a low-level operation which may cause database corruption!`,
|
||||
}
|
||||
dbDeleteTrieStateCmd = &cli.Command{
|
||||
Action: dbDeleteTrieState,
|
||||
Name: "delete-trie-state",
|
||||
Usage: "Delete all trie state key-value pairs from the database and the ancient state. Does not support hash-based state scheme.",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
utils.SyncModeFlag,
|
||||
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||
Description: `This command deletes all trie state key-value pairs from the database and the ancient state.`,
|
||||
}
|
||||
dbPutCmd = &cli.Command{
|
||||
Action: dbPut,
|
||||
Name: "put",
|
||||
@ -810,6 +820,82 @@ func dbDelete(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbDeleteTrieState deletes all trie state related key-value pairs from the database and the ancient state store.
|
||||
func dbDeleteTrieState(ctx *cli.Context) error {
|
||||
if ctx.NArg() > 0 {
|
||||
return fmt.Errorf("no arguments required")
|
||||
}
|
||||
|
||||
stack, config := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, false, false)
|
||||
defer db.Close()
|
||||
|
||||
var (
|
||||
err error
|
||||
start = time.Now()
|
||||
)
|
||||
|
||||
// If separate trie db exists, delete all files in the db folder
|
||||
if db.StateStore() != nil {
|
||||
statePath := filepath.Join(stack.ResolvePath("chaindata"), "state")
|
||||
log.Info("Removing separate trie database", "path", statePath)
|
||||
err = filepath.Walk(statePath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if path != statePath {
|
||||
fileInfo, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !fileInfo.IsDir() {
|
||||
os.Remove(path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
log.Info("Separate trie database deleted", "err", err, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete KV pairs from the database
|
||||
err = rawdb.DeleteTrieState(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove the full node ancient database
|
||||
dbPath := config.Eth.DatabaseFreezer
|
||||
switch {
|
||||
case dbPath == "":
|
||||
dbPath = filepath.Join(stack.ResolvePath("chaindata"), "ancient/state")
|
||||
case !filepath.IsAbs(dbPath):
|
||||
dbPath = config.Node.ResolvePath(dbPath)
|
||||
}
|
||||
|
||||
if !common.FileExist(dbPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("Removing ancient state database", "path", dbPath)
|
||||
start = time.Now()
|
||||
filepath.Walk(dbPath, func(path string, info os.FileInfo, err error) error {
|
||||
if dbPath == path {
|
||||
return nil
|
||||
}
|
||||
if !info.IsDir() {
|
||||
os.Remove(path)
|
||||
return nil
|
||||
}
|
||||
return filepath.SkipDir
|
||||
})
|
||||
log.Info("State database successfully deleted", "path", dbPath, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbPut overwrite a value in the database
|
||||
func dbPut(ctx *cli.Context) error {
|
||||
if ctx.NArg() != 2 {
|
||||
|
@ -151,8 +151,8 @@ func TestCustomBackend(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
for i, tt := range []backendTest{
|
||||
{ // When not specified, it should default to leveldb
|
||||
execArgs: []string{"--db.engine", "leveldb"},
|
||||
{ // When not specified, it should default to pebble
|
||||
execArgs: []string{"--db.engine", "pebble"},
|
||||
execExpect: "0x0000000000001338",
|
||||
},
|
||||
{ // Explicit leveldb
|
||||
|
@ -70,11 +70,10 @@ var (
|
||||
utils.USBFlag,
|
||||
utils.SmartCardDaemonPathFlag,
|
||||
utils.RialtoHash,
|
||||
utils.OverrideShanghai,
|
||||
utils.OverrideKepler,
|
||||
utils.OverrideCancun,
|
||||
utils.OverrideVerkle,
|
||||
utils.OverrideFeynman,
|
||||
utils.OverrideFeynmanFix,
|
||||
utils.EnablePersonal,
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
@ -144,6 +143,7 @@ var (
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV4Flag,
|
||||
utils.DiscoveryV5Flag,
|
||||
utils.InstanceFlag,
|
||||
utils.LegacyDiscoveryV5Flag, // deprecated
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
|
@ -11,6 +11,7 @@ Install node.js dependency:
|
||||
npm install
|
||||
```
|
||||
## Run
|
||||
### 1.Get Validator's Information: Version, MinGasPrice
|
||||
mainnet validators version
|
||||
```bash
|
||||
npm run startMainnet
|
||||
@ -19,7 +20,8 @@ testnet validators version
|
||||
```bash
|
||||
npm run startTestnet
|
||||
```
|
||||
Transaction count
|
||||
|
||||
### 2.Get Transaction Count
|
||||
```bash
|
||||
node gettxcount.js --rpc ${url} --startNum ${start} --endNum ${end} --miner ${miner} (optional)
|
||||
```
|
@ -4,6 +4,9 @@ import program from "commander";
|
||||
program.option("--rpc <rpc>", "Rpc");
|
||||
program.option("--startNum <startNum>", "start num")
|
||||
program.option("--endNum <endNum>", "end num")
|
||||
// --miner:
|
||||
// specified: find the max txCounter from the specified validator
|
||||
// not specified: find the max txCounter from all validators
|
||||
program.option("--miner <miner>", "miner", "")
|
||||
program.parse(process.argv);
|
||||
|
||||
|
@ -12,10 +12,23 @@ const main = async () => {
|
||||
console.log(blockNum);
|
||||
for (let i = 0; i < program.Num; i++) {
|
||||
let blockData = await provider.getBlock(blockNum - i);
|
||||
// 1.get Geth client version
|
||||
let major = ethers.toNumber(ethers.dataSlice(blockData.extraData, 2, 3))
|
||||
let minor = ethers.toNumber(ethers.dataSlice(blockData.extraData, 3, 4))
|
||||
let patch = ethers.toNumber(ethers.dataSlice(blockData.extraData, 4, 5))
|
||||
console.log(blockData.miner, "version =", major + "." + minor + "." + patch)
|
||||
|
||||
// 2.get minimum txGasPrice based on the last non-zero-gasprice transaction
|
||||
let lastGasPrice = 0
|
||||
for (let txIndex = blockData.transactions.length - 1; txIndex >= 0; txIndex--) {
|
||||
let txHash = blockData.transactions[txIndex]
|
||||
let txData = await provider.getTransaction(txHash);
|
||||
if (txData.gasPrice == 0) {
|
||||
continue
|
||||
}
|
||||
lastGasPrice = txData.gasPrice
|
||||
break
|
||||
}
|
||||
console.log(blockData.miner, "version =", major + "." + minor + "." + patch, " MinGasPrice = " + lastGasPrice)
|
||||
}
|
||||
};
|
||||
main().then(() => process.exit(0))
|
||||
|
@ -150,6 +150,12 @@ var (
|
||||
Usage: "Minimum free disk space in MB, once reached triggers auto shut down (default = --cache.gc converted to MB, 0 = disabled)",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
InstanceFlag = &cli.IntFlag{
|
||||
Name: "instance",
|
||||
Usage: "Configures the ports to avoid conflicts when running multiple nodes on the same machine. Maximum is 200. Only applicable for: port, authrpc.port, discovery,port, http.port, ws.port",
|
||||
Value: 1,
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
KeyStoreDirFlag = &flags.DirectoryFlag{
|
||||
Name: "keystore",
|
||||
Usage: "Directory for the keystore (default = inside the datadir)",
|
||||
@ -299,16 +305,6 @@ var (
|
||||
Usage: "Manually specify the Rialto Genesis Hash, to trigger builtin network logic",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverrideShanghai = &cli.Uint64Flag{
|
||||
Name: "override.shanghai",
|
||||
Usage: "Manually specify the Shanghai fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverrideKepler = &cli.Uint64Flag{
|
||||
Name: "override.kepler",
|
||||
Usage: "Manually specify the Kepler fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverrideCancun = &cli.Uint64Flag{
|
||||
Name: "override.cancun",
|
||||
Usage: "Manually specify the Cancun fork timestamp, overriding the bundled setting",
|
||||
@ -324,6 +320,11 @@ var (
|
||||
Usage: "Manually specify the Feynman fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverrideFeynmanFix = &cli.Uint64Flag{
|
||||
Name: "override.feynmanfix",
|
||||
Usage: "Manually specify the FeynmanFix fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
SyncModeFlag = &flags.TextMarshalerFlag{
|
||||
Name: "syncmode",
|
||||
Usage: `Blockchain sync mode ("snap" or "full")`,
|
||||
@ -1542,6 +1543,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
||||
|
||||
// SetNodeConfig applies node-related command line flags to the config.
|
||||
func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
setInstance(ctx, cfg)
|
||||
SetP2PConfig(ctx, &cfg.P2P)
|
||||
setIPC(ctx, cfg)
|
||||
setHTTP(ctx, cfg)
|
||||
@ -1938,6 +1940,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
if ctx.String(GCModeFlag.Name) == "archive" && cfg.TransactionHistory != 0 {
|
||||
cfg.TransactionHistory = 0
|
||||
log.Warn("Disabled transaction unindexing for archive node")
|
||||
|
||||
cfg.StateScheme = rawdb.HashScheme
|
||||
log.Warn("Forcing hash state-scheme for archive mode")
|
||||
}
|
||||
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
|
||||
@ -1955,6 +1960,16 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
if cfg.TriesVerifyMode.NeedRemoteVerify() {
|
||||
cfg.EnableTrustProtocol = true
|
||||
}
|
||||
// A node without trie is not able to provide snap data, so it should disable snap protocol.
|
||||
if cfg.TriesVerifyMode != core.LocalVerify {
|
||||
log.Info("Automatically disables snap protocol due to verify mode", "mode", cfg.TriesVerifyMode)
|
||||
cfg.DisableSnapProtocol = true
|
||||
}
|
||||
|
||||
if cfg.SyncMode == downloader.SnapSync && cfg.TriesVerifyMode.NoTries() {
|
||||
log.Warn("Only local TriesVerifyMode can support snap sync, resetting to full sync", "mode", cfg.TriesVerifyMode)
|
||||
cfg.SyncMode = downloader.FullSync
|
||||
}
|
||||
}
|
||||
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheSnapshotFlag.Name) {
|
||||
cfg.SnapshotCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheSnapshotFlag.Name) / 100
|
||||
@ -2086,7 +2101,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
||||
cfg.Genesis = nil // fallback to db content
|
||||
|
||||
//validate genesis has PoS enabled in block 0
|
||||
// validate genesis has PoS enabled in block 0
|
||||
genesis, err := core.ReadGenesis(chaindb)
|
||||
if err != nil {
|
||||
Fatalf("Could not read genesis from database: %v", err)
|
||||
@ -2528,3 +2543,24 @@ func ParseCLIAndConfigStateScheme(cliScheme, cfgScheme string) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("incompatible state scheme, CLI: %s, config: %s", cliScheme, cfgScheme)
|
||||
}
|
||||
|
||||
// setInstance configures the port numbers for the given instance.
|
||||
func setInstance(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.IsSet(InstanceFlag.Name) {
|
||||
cfg.Instance = ctx.Int(InstanceFlag.Name)
|
||||
}
|
||||
|
||||
if cfg.Instance > 200 {
|
||||
Fatalf("Instance number %d is too high, maximum is 200", cfg.Instance)
|
||||
}
|
||||
|
||||
if cfg.Instance == 1 { // using default ports
|
||||
return
|
||||
}
|
||||
|
||||
cfg.AuthPort = node.DefaultConfig.AuthPort + cfg.Instance*100 - 100
|
||||
cfg.HTTPPort = node.DefaultHTTPPort - cfg.Instance + 1
|
||||
cfg.WSPort = node.DefaultWSPort + cfg.Instance*2 - 2
|
||||
cfg.P2P.ListenAddr = fmt.Sprintf(":%d", node.DefaultListenPort+cfg.Instance-1)
|
||||
cfg.P2P.DiscAddr = fmt.Sprintf(":%d", node.DefaultDiscPort+cfg.Instance-1)
|
||||
}
|
||||
|
@ -1988,16 +1988,19 @@ func applyMessage(
|
||||
chainConfig *params.ChainConfig,
|
||||
chainContext core.ChainContext,
|
||||
) (uint64, error) {
|
||||
// TODO(Nathan): state.Prepare should be called here, now accessList related EIP not affect systemtxs
|
||||
// EIP1153 may cause a critical issue in the future
|
||||
// Create a new context to be used in the EVM environment
|
||||
context := core.NewEVMBlockContext(header, chainContext, nil)
|
||||
// Create a new environment which holds all relevant information
|
||||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(context, vm.TxContext{Origin: msg.From(), GasPrice: big.NewInt(0)}, state, chainConfig, vm.Config{})
|
||||
// Apply the transaction to the current state (included in the env)
|
||||
if chainConfig.IsCancun(header.Number, header.Time) {
|
||||
rules := vmenv.ChainConfig().Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil, vmenv.Context.Time)
|
||||
state.Prepare(rules, msg.From(), vmenv.Context.Coinbase, msg.To(), vm.ActivePrecompiles(rules), msg.AccessList)
|
||||
}
|
||||
// Increment the nonce for the next transaction
|
||||
state.SetNonce(msg.From(), state.GetNonce(msg.From())+1)
|
||||
|
||||
ret, returnGas, err := vmenv.Call(
|
||||
vm.AccountRef(msg.From()),
|
||||
*msg.To(),
|
||||
|
@ -316,9 +316,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
if cacheConfig == nil {
|
||||
cacheConfig = defaultCacheConfig
|
||||
}
|
||||
if cacheConfig.TriesInMemory != 128 {
|
||||
log.Warn("TriesInMemory isn't the default value(128), you need specify exact same TriesInMemory when prune data",
|
||||
"triesInMemory", cacheConfig.TriesInMemory)
|
||||
if cacheConfig.StateScheme == rawdb.HashScheme && cacheConfig.TriesInMemory != 128 {
|
||||
log.Warn("TriesInMemory isn't the default value (128), you need specify the same TriesInMemory when pruning data",
|
||||
"triesInMemory", cacheConfig.TriesInMemory, "scheme", cacheConfig.StateScheme)
|
||||
}
|
||||
|
||||
diffLayerCache, _ := exlru.New(diffLayerCacheLimit)
|
||||
@ -2008,7 +2008,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
||||
go throwaway.TriePrefetchInAdvance(block, signer)
|
||||
}
|
||||
|
||||
//Process block using the parent state as reference point
|
||||
// Process block using the parent state as reference point
|
||||
if bc.pipeCommit {
|
||||
statedb.EnablePipeCommit()
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func TestGeneratePOSChain(t *testing.T) {
|
||||
Config: &config,
|
||||
Alloc: types.GenesisAlloc{
|
||||
address: {Balance: funds},
|
||||
params.BeaconRootsStorageAddress: {Balance: common.Big0, Code: asm4788},
|
||||
params.BeaconRootsAddress: {Balance: common.Big0, Code: asm4788},
|
||||
},
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
Difficulty: common.Big1,
|
||||
@ -180,7 +180,7 @@ func TestGeneratePOSChain(t *testing.T) {
|
||||
}
|
||||
state, _ := blockchain.State()
|
||||
idx := block.Time()%8191 + 8191
|
||||
got := state.GetState(params.BeaconRootsStorageAddress, common.BigToHash(new(big.Int).SetUint64(idx)))
|
||||
got := state.GetState(params.BeaconRootsAddress, common.BigToHash(new(big.Int).SetUint64(idx)))
|
||||
if got != want {
|
||||
t.Fatalf("block %d, wrong parent beacon root in state: got %s, want %s", i, got, want)
|
||||
}
|
||||
|
@ -216,11 +216,10 @@ func (e *GenesisMismatchError) Error() string {
|
||||
// ChainOverrides contains the changes to chain config
|
||||
// Typically, these modifications involve hardforks that are not enabled on the BSC mainnet, intended for testing purposes.
|
||||
type ChainOverrides struct {
|
||||
OverrideShanghai *uint64
|
||||
OverrideKepler *uint64
|
||||
OverrideCancun *uint64
|
||||
OverrideVerkle *uint64
|
||||
OverrideFeynman *uint64
|
||||
OverrideFeynmanFix *uint64
|
||||
}
|
||||
|
||||
// SetupGenesisBlock writes or updates the genesis block in db.
|
||||
@ -246,12 +245,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
|
||||
}
|
||||
applyOverrides := func(config *params.ChainConfig) {
|
||||
if config != nil {
|
||||
if overrides != nil && overrides.OverrideShanghai != nil {
|
||||
config.ShanghaiTime = overrides.OverrideShanghai
|
||||
}
|
||||
if overrides != nil && overrides.OverrideKepler != nil {
|
||||
config.KeplerTime = overrides.OverrideKepler
|
||||
}
|
||||
if overrides != nil && overrides.OverrideCancun != nil {
|
||||
config.CancunTime = overrides.OverrideCancun
|
||||
}
|
||||
@ -261,6 +254,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
|
||||
if overrides != nil && overrides.OverrideFeynman != nil {
|
||||
config.FeynmanTime = overrides.OverrideFeynman
|
||||
}
|
||||
if overrides != nil && overrides.OverrideFeynmanFix != nil {
|
||||
config.FeynmanFixTime = overrides.OverrideFeynmanFix
|
||||
}
|
||||
}
|
||||
}
|
||||
// Just commit the new block if there is no stored genesis block.
|
||||
@ -285,7 +281,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
|
||||
// is initialized with an external ancient store. Commit genesis state
|
||||
// in this case.
|
||||
header := rawdb.ReadHeader(db, stored, 0)
|
||||
if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) {
|
||||
if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) && !triedb.Config().NoTries {
|
||||
if genesis == nil {
|
||||
genesis = DefaultBSCGenesisBlock()
|
||||
}
|
||||
|
@ -324,7 +324,7 @@ func ValidateStateScheme(stateScheme string) bool {
|
||||
// the stored state.
|
||||
//
|
||||
// - If the provided scheme is none, use the scheme consistent with persistent
|
||||
// state, or fallback to hash-based scheme if state is empty.
|
||||
// state, or fallback to path-based scheme if state is empty.
|
||||
//
|
||||
// - If the provided scheme is hash, use hash-based scheme or error out if not
|
||||
// compatible with persistent state scheme.
|
||||
@ -338,10 +338,8 @@ func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
|
||||
stored := ReadStateScheme(disk)
|
||||
if provided == "" {
|
||||
if stored == "" {
|
||||
// use default scheme for empty database, flip it when
|
||||
// path mode is chosen as default
|
||||
log.Info("State scheme set to default", "scheme", "hash")
|
||||
return HashScheme, nil
|
||||
log.Info("State scheme set to default", "scheme", "path")
|
||||
return PathScheme, nil // use default scheme for empty database
|
||||
}
|
||||
log.Info("State scheme set to already existing disk db", "scheme", stored)
|
||||
return stored, nil // reuse scheme of persistent scheme
|
||||
|
@ -506,7 +506,7 @@ type OpenOptions struct {
|
||||
//
|
||||
// type == null type != null
|
||||
// +----------------------------------------
|
||||
// db is non-existent | leveldb default | specified type
|
||||
// db is non-existent | pebble default | specified type
|
||||
// db is existent | from db | specified type (if compatible)
|
||||
func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
|
||||
// Reject any unsupported database type
|
||||
@ -527,12 +527,9 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
|
||||
log.Info("Using leveldb as the backing database")
|
||||
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
}
|
||||
// No pre-existing database, no user-requested one either. Default to Pebble
|
||||
// on supported platforms and LevelDB on anything else.
|
||||
// log.Info("Defaulting to pebble as the backing database")
|
||||
// return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
log.Info("Defaulting to leveldb as the backing database")
|
||||
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||
// No pre-existing database, no user-requested one either. Default to Pebble.
|
||||
log.Info("Defaulting to pebble as the backing database")
|
||||
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
|
||||
}
|
||||
|
||||
// Open opens both a disk-based key-value database such as leveldb or pebble, but also
|
||||
@ -887,6 +884,62 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteTrieState(db ethdb.Database) error {
|
||||
var (
|
||||
it ethdb.Iterator
|
||||
batch = db.NewBatch()
|
||||
start = time.Now()
|
||||
logged = time.Now()
|
||||
count int64
|
||||
key []byte
|
||||
)
|
||||
|
||||
prefixKeys := map[string]func([]byte) bool{
|
||||
string(trieNodeAccountPrefix): IsAccountTrieNode,
|
||||
string(trieNodeStoragePrefix): IsStorageTrieNode,
|
||||
string(stateIDPrefix): func(key []byte) bool { return len(key) == len(stateIDPrefix)+common.HashLength },
|
||||
}
|
||||
|
||||
for prefix, isValid := range prefixKeys {
|
||||
it = db.NewIterator([]byte(prefix), nil)
|
||||
|
||||
for it.Next() {
|
||||
key = it.Key()
|
||||
if !isValid(key) {
|
||||
continue
|
||||
}
|
||||
|
||||
batch.Delete(it.Key())
|
||||
if batch.ValueSize() > ethdb.IdealBatchSize {
|
||||
if err := batch.Write(); err != nil {
|
||||
it.Release()
|
||||
return err
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
|
||||
count++
|
||||
if time.Since(logged) > 8*time.Second {
|
||||
log.Info("Deleting trie state", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
logged = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
it.Release()
|
||||
}
|
||||
|
||||
if batch.ValueSize() > 0 {
|
||||
if err := batch.Write(); err != nil {
|
||||
return err
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
|
||||
log.Info("Deleted trie state", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// printChainMetadata prints out chain metadata to stderr.
|
||||
func printChainMetadata(db ethdb.KeyValueStore) {
|
||||
fmt.Fprintf(os.Stderr, "Chain metadata\n")
|
||||
|
@ -444,6 +444,10 @@ func (mode VerifyMode) NeedRemoteVerify() bool {
|
||||
return mode == FullVerify || mode == InsecureVerify
|
||||
}
|
||||
|
||||
func (mode VerifyMode) NoTries() bool {
|
||||
return mode != LocalVerify
|
||||
}
|
||||
|
||||
func newVerifyMsgTypeGauge(msgType uint16, peerId string) metrics.Gauge {
|
||||
m := fmt.Sprintf("verifymanager/message/%d/peer/%s", msgType, peerId)
|
||||
return metrics.GetOrRegisterGauge(m, nil)
|
||||
|
@ -1535,11 +1535,11 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
||||
}
|
||||
|
||||
tasks := make(chan func())
|
||||
type tastResult struct {
|
||||
type taskResult struct {
|
||||
err error
|
||||
nodeSet *trienode.NodeSet
|
||||
}
|
||||
taskResults := make(chan tastResult, len(s.stateObjectsDirty))
|
||||
taskResults := make(chan taskResult, len(s.stateObjectsDirty))
|
||||
tasksNum := 0
|
||||
finishCh := make(chan struct{})
|
||||
|
||||
@ -1566,13 +1566,13 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
||||
// Write any storage changes in the state object to its storage trie
|
||||
if !s.noTrie {
|
||||
if set, err := obj.commit(); err != nil {
|
||||
taskResults <- tastResult{err, nil}
|
||||
taskResults <- taskResult{err, nil}
|
||||
return
|
||||
} else {
|
||||
taskResults <- tastResult{nil, set}
|
||||
taskResults <- taskResult{nil, set}
|
||||
}
|
||||
} else {
|
||||
taskResults <- tastResult{nil, nil}
|
||||
taskResults <- taskResult{nil, nil}
|
||||
}
|
||||
}
|
||||
tasksNum++
|
||||
|
@ -113,6 +113,12 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||
continue
|
||||
}
|
||||
}
|
||||
if p.config.IsCancun(block.Number(), block.Time()) {
|
||||
if len(systemTxs) > 0 {
|
||||
// systemTxs should be always at the end of block.
|
||||
return statedb, nil, nil, 0, fmt.Errorf("normal tx %d [%v] after systemTx", i, tx.Hash().Hex())
|
||||
}
|
||||
}
|
||||
|
||||
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
|
||||
if err != nil {
|
||||
@ -233,11 +239,11 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *stat
|
||||
GasPrice: common.Big0,
|
||||
GasFeeCap: common.Big0,
|
||||
GasTipCap: common.Big0,
|
||||
To: ¶ms.BeaconRootsStorageAddress,
|
||||
To: ¶ms.BeaconRootsAddress,
|
||||
Data: beaconRoot[:],
|
||||
}
|
||||
vmenv.Reset(NewEVMTxContext(msg), statedb)
|
||||
statedb.AddAddressToAccessList(params.BeaconRootsStorageAddress)
|
||||
statedb.AddAddressToAccessList(params.BeaconRootsAddress)
|
||||
_, _, _ = vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
|
||||
statedb.Finalise(true)
|
||||
}
|
||||
|
File diff suppressed because one or more lines are too long
@ -1133,8 +1133,12 @@ func (p *BlobPool) validateTx(tx *types.Transaction) error {
|
||||
next = p.state.GetNonce(from)
|
||||
)
|
||||
if uint64(len(p.index[from])) > tx.Nonce()-next {
|
||||
// Account can support the replacement, but the price bump must also be met
|
||||
prev := p.index[from][int(tx.Nonce()-next)]
|
||||
// Ensure the transaction is different than the one tracked locally
|
||||
if prev.hash == tx.Hash() {
|
||||
return txpool.ErrAlreadyKnown
|
||||
}
|
||||
// Account can support the replacement, but the price bump must also be met
|
||||
switch {
|
||||
case tx.GasFeeCapIntCmp(prev.execFeeCap.ToBig()) <= 0:
|
||||
return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap)
|
||||
|
@ -992,9 +992,14 @@ func TestAdd(t *testing.T) {
|
||||
},
|
||||
},
|
||||
adds: []addtx{
|
||||
{ // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now)
|
||||
{ // New account, 1 tx pending: reject duplicate nonce 0
|
||||
from: "alice",
|
||||
tx: makeUnsignedTx(0, 1, 1, 1),
|
||||
err: txpool.ErrAlreadyKnown,
|
||||
},
|
||||
{ // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now)
|
||||
from: "alice",
|
||||
tx: makeUnsignedTx(0, 1, 1, 2),
|
||||
err: txpool.ErrReplaceUnderpriced,
|
||||
},
|
||||
{ // New account, 1 tx pending: accept nonce 1
|
||||
@ -1017,10 +1022,10 @@ func TestAdd(t *testing.T) {
|
||||
tx: makeUnsignedTx(3, 1, 1, 1),
|
||||
err: nil,
|
||||
},
|
||||
{ // Old account, 1 tx in chain, 1 tx pending: reject replacement nonce 1 (ignore price for now)
|
||||
{ // Old account, 1 tx in chain, 1 tx pending: reject duplicate nonce 1
|
||||
from: "bob",
|
||||
tx: makeUnsignedTx(1, 1, 1, 1),
|
||||
err: txpool.ErrReplaceUnderpriced,
|
||||
err: txpool.ErrAlreadyKnown,
|
||||
},
|
||||
{ // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now)
|
||||
from: "bob",
|
||||
|
@ -133,20 +133,6 @@ var PrecompiledContractsPlanck = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{101}): &iavlMerkleProofValidatePlanck{},
|
||||
}
|
||||
|
||||
// PrecompiledContractsBerlin contains the default set of pre-compiled Ethereum
|
||||
// contracts used in the Berlin release.
|
||||
var PrecompiledContractsBerlin = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{1}): &ecrecover{},
|
||||
common.BytesToAddress([]byte{2}): &sha256hash{},
|
||||
common.BytesToAddress([]byte{3}): &ripemd160hash{},
|
||||
common.BytesToAddress([]byte{4}): &dataCopy{},
|
||||
common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true},
|
||||
common.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
|
||||
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
|
||||
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
||||
}
|
||||
|
||||
// PrecompiledContractsLuban contains the default set of pre-compiled Ethereum
|
||||
// contracts used in the Luban release.
|
||||
var PrecompiledContractsLuban = map[common.Address]PrecompiledContract{
|
||||
@ -185,6 +171,20 @@ var PrecompiledContractsPlato = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{103}): &cometBFTLightBlockValidate{},
|
||||
}
|
||||
|
||||
// PrecompiledContractsBerlin contains the default set of pre-compiled Ethereum
|
||||
// contracts used in the Berlin release.
|
||||
var PrecompiledContractsBerlin = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{1}): &ecrecover{},
|
||||
common.BytesToAddress([]byte{2}): &sha256hash{},
|
||||
common.BytesToAddress([]byte{3}): &ripemd160hash{},
|
||||
common.BytesToAddress([]byte{4}): &dataCopy{},
|
||||
common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true},
|
||||
common.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
|
||||
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
|
||||
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
||||
}
|
||||
|
||||
// PrecompiledContractsHertz contains the default set of pre-compiled Ethereum
|
||||
// contracts used in the Hertz release.
|
||||
var PrecompiledContractsHertz = map[common.Address]PrecompiledContract{
|
||||
@ -204,6 +204,27 @@ var PrecompiledContractsHertz = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{103}): &cometBFTLightBlockValidateHertz{},
|
||||
}
|
||||
|
||||
// PrecompiledContractsFeynman contains the default set of pre-compiled Ethereum
|
||||
// contracts used in the Feynman release.
|
||||
var PrecompiledContractsFeynman = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{1}): &ecrecover{},
|
||||
common.BytesToAddress([]byte{2}): &sha256hash{},
|
||||
common.BytesToAddress([]byte{3}): &ripemd160hash{},
|
||||
common.BytesToAddress([]byte{4}): &dataCopy{},
|
||||
common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true},
|
||||
common.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
|
||||
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
|
||||
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
||||
|
||||
common.BytesToAddress([]byte{100}): &tmHeaderValidate{},
|
||||
common.BytesToAddress([]byte{101}): &iavlMerkleProofValidatePlato{},
|
||||
common.BytesToAddress([]byte{102}): &blsSignatureVerify{},
|
||||
common.BytesToAddress([]byte{103}): &cometBFTLightBlockValidateHertz{},
|
||||
common.BytesToAddress([]byte{104}): &verifyDoubleSignEvidence{},
|
||||
common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{},
|
||||
}
|
||||
|
||||
// PrecompiledContractsCancun contains the default set of pre-compiled Ethereum
|
||||
// contracts used in the Cancun release.
|
||||
var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
|
||||
@ -218,27 +239,6 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
||||
common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
|
||||
|
||||
common.BytesToAddress([]byte{100}): &tmHeaderValidate{},
|
||||
common.BytesToAddress([]byte{101}): &iavlMerkleProofValidatePlato{},
|
||||
common.BytesToAddress([]byte{102}): &blsSignatureVerify{},
|
||||
common.BytesToAddress([]byte{103}): &cometBFTLightBlockValidate{},
|
||||
common.BytesToAddress([]byte{104}): &verifyDoubleSignEvidence{},
|
||||
common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{},
|
||||
}
|
||||
|
||||
// PrecompiledContractsFeynman contains the default set of pre-compiled Ethereum
|
||||
// contracts used in the Feynman release.
|
||||
var PrecompiledContractsFeynman = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{1}): &ecrecover{},
|
||||
common.BytesToAddress([]byte{2}): &sha256hash{},
|
||||
common.BytesToAddress([]byte{3}): &ripemd160hash{},
|
||||
common.BytesToAddress([]byte{4}): &dataCopy{},
|
||||
common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true},
|
||||
common.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
|
||||
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
|
||||
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
||||
|
||||
common.BytesToAddress([]byte{100}): &tmHeaderValidate{},
|
||||
common.BytesToAddress([]byte{101}): &iavlMerkleProofValidatePlato{},
|
||||
common.BytesToAddress([]byte{102}): &blsSignatureVerify{},
|
||||
@ -263,17 +263,17 @@ var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{
|
||||
|
||||
var (
|
||||
PrecompiledAddressesCancun []common.Address
|
||||
PrecompiledAddressesFeynman []common.Address
|
||||
PrecompiledAddressesHertz []common.Address
|
||||
PrecompiledAddressesBerlin []common.Address
|
||||
PrecompiledAddressesPlato []common.Address
|
||||
PrecompiledAddressesLuban []common.Address
|
||||
PrecompiledAddressesPlanck []common.Address
|
||||
PrecompiledAddressesMoran []common.Address
|
||||
PrecompiledAddressesNano []common.Address
|
||||
PrecompiledAddressesBerlin []common.Address
|
||||
PrecompiledAddressesIstanbul []common.Address
|
||||
PrecompiledAddressesByzantium []common.Address
|
||||
PrecompiledAddressesHomestead []common.Address
|
||||
PrecompiledAddressesFeynman []common.Address
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -286,9 +286,6 @@ func init() {
|
||||
for k := range PrecompiledContractsIstanbul {
|
||||
PrecompiledAddressesIstanbul = append(PrecompiledAddressesIstanbul, k)
|
||||
}
|
||||
for k := range PrecompiledContractsBerlin {
|
||||
PrecompiledAddressesBerlin = append(PrecompiledAddressesBerlin, k)
|
||||
}
|
||||
for k := range PrecompiledContractsNano {
|
||||
PrecompiledAddressesNano = append(PrecompiledAddressesNano, k)
|
||||
}
|
||||
@ -304,15 +301,18 @@ func init() {
|
||||
for k := range PrecompiledContractsPlato {
|
||||
PrecompiledAddressesPlato = append(PrecompiledAddressesPlato, k)
|
||||
}
|
||||
for k := range PrecompiledContractsBerlin {
|
||||
PrecompiledAddressesBerlin = append(PrecompiledAddressesBerlin, k)
|
||||
}
|
||||
for k := range PrecompiledContractsHertz {
|
||||
PrecompiledAddressesHertz = append(PrecompiledAddressesHertz, k)
|
||||
}
|
||||
for k := range PrecompiledContractsCancun {
|
||||
PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k)
|
||||
}
|
||||
for k := range PrecompiledContractsFeynman {
|
||||
PrecompiledAddressesFeynman = append(PrecompiledAddressesFeynman, k)
|
||||
}
|
||||
for k := range PrecompiledContractsCancun {
|
||||
PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k)
|
||||
}
|
||||
}
|
||||
|
||||
// ActivePrecompiles returns the precompiles enabled with the current configuration.
|
||||
@ -324,6 +324,8 @@ func ActivePrecompiles(rules params.Rules) []common.Address {
|
||||
return PrecompiledAddressesFeynman
|
||||
case rules.IsHertz:
|
||||
return PrecompiledAddressesHertz
|
||||
case rules.IsBerlin:
|
||||
return PrecompiledAddressesBerlin
|
||||
case rules.IsPlato:
|
||||
return PrecompiledAddressesPlato
|
||||
case rules.IsLuban:
|
||||
@ -334,8 +336,6 @@ func ActivePrecompiles(rules params.Rules) []common.Address {
|
||||
return PrecompiledAddressesMoran
|
||||
case rules.IsNano:
|
||||
return PrecompiledAddressesNano
|
||||
case rules.IsBerlin:
|
||||
return PrecompiledAddressesBerlin
|
||||
case rules.IsIstanbul:
|
||||
return PrecompiledAddressesIstanbul
|
||||
case rules.IsByzantium:
|
||||
|
@ -39,9 +39,7 @@ web3.eth.sendTransaction({
|
||||
web3.eth.sendTransaction({
|
||||
from: "consensus address of your validator",
|
||||
to: "0x0000000000000000000000000000000000001000",
|
||||
gas: "1000000",
|
||||
data: "0x04c4fec6"
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
@ -178,14 +178,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
}
|
||||
// Override the chain config with provided settings.
|
||||
var overrides core.ChainOverrides
|
||||
if config.OverrideShanghai != nil {
|
||||
chainConfig.ShanghaiTime = config.OverrideShanghai
|
||||
overrides.OverrideShanghai = config.OverrideShanghai
|
||||
}
|
||||
if config.OverrideKepler != nil {
|
||||
chainConfig.KeplerTime = config.OverrideKepler
|
||||
overrides.OverrideKepler = config.OverrideKepler
|
||||
}
|
||||
if config.OverrideCancun != nil {
|
||||
chainConfig.CancunTime = config.OverrideCancun
|
||||
overrides.OverrideCancun = config.OverrideCancun
|
||||
@ -198,6 +190,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
chainConfig.FeynmanTime = config.OverrideFeynman
|
||||
overrides.OverrideFeynman = config.OverrideFeynman
|
||||
}
|
||||
if config.OverrideFeynmanFix != nil {
|
||||
chainConfig.FeynmanFixTime = config.OverrideFeynmanFix
|
||||
overrides.OverrideFeynmanFix = config.OverrideFeynmanFix
|
||||
}
|
||||
|
||||
networkID := config.NetworkId
|
||||
if networkID == 0 {
|
||||
|
@ -18,6 +18,7 @@ package catalyst
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"math/big"
|
||||
"sync"
|
||||
@ -27,6 +28,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@ -161,14 +163,14 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal, timestamp u
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
Withdrawals: withdrawals,
|
||||
Random: random,
|
||||
}, engine.PayloadV2, true)
|
||||
BeaconRoot: &common.Hash{},
|
||||
}, engine.PayloadV3, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fcResponse == engine.STATUS_SYNCING {
|
||||
return errors.New("chain rewind prevented invocation of payload creation")
|
||||
}
|
||||
|
||||
envelope, err := c.engineAPI.getPayload(*fcResponse.PayloadID, true)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -186,8 +188,21 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal, timestamp u
|
||||
}
|
||||
}
|
||||
|
||||
// Independently calculate the blob hashes from sidecars.
|
||||
blobHashes := make([]common.Hash, 0)
|
||||
if envelope.BlobsBundle != nil {
|
||||
hasher := sha256.New()
|
||||
for _, commit := range envelope.BlobsBundle.Commitments {
|
||||
var c kzg4844.Commitment
|
||||
if len(commit) != len(c) {
|
||||
return errors.New("invalid commitment length")
|
||||
}
|
||||
copy(c[:], commit)
|
||||
blobHashes = append(blobHashes, kzg4844.CalcBlobHashV1(hasher, &c))
|
||||
}
|
||||
}
|
||||
// Mark the payload as canon
|
||||
if _, err = c.engineAPI.NewPayloadV2(*payload); err != nil {
|
||||
if _, err = c.engineAPI.NewPayloadV3(*payload, blobHashes, &common.Hash{}); err != nil {
|
||||
return err
|
||||
}
|
||||
c.setCurrentState(payload.BlockHash, finalizedHash)
|
||||
|
@ -186,12 +186,6 @@ type Config struct {
|
||||
// send-transaction variants. The unit is ether.
|
||||
RPCTxFeeCap float64
|
||||
|
||||
// OverrideShanghai (TODO: remove after the fork)
|
||||
OverrideShanghai *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverrideKepler (TODO: remove after the fork)
|
||||
OverrideKepler *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverrideCancun (TODO: remove after the fork)
|
||||
OverrideCancun *uint64 `toml:",omitempty"`
|
||||
|
||||
@ -200,6 +194,9 @@ type Config struct {
|
||||
|
||||
// OverrideFeynman (TODO: remove after the fork)
|
||||
OverrideFeynman *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverrideFeynmanFix (TODO: remove after the fork)
|
||||
OverrideFeynmanFix *uint64 `toml:",omitempty"`
|
||||
}
|
||||
|
||||
// CreateConsensusEngine creates a consensus engine for the given chain config.
|
||||
|
@ -69,10 +69,10 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
RPCGasCap uint64
|
||||
RPCEVMTimeout time.Duration
|
||||
RPCTxFeeCap float64
|
||||
OverrideShanghai *uint64 `toml:",omitempty"`
|
||||
OverrideKepler *uint64 `toml:",omitempty"`
|
||||
OverrideCancun *uint64 `toml:",omitempty"`
|
||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||
OverrideFeynman *uint64 `toml:",omitempty"`
|
||||
OverrideFeynmanFix *uint64 `toml:",omitempty"`
|
||||
}
|
||||
var enc Config
|
||||
enc.Genesis = c.Genesis
|
||||
@ -127,10 +127,10 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.RPCGasCap = c.RPCGasCap
|
||||
enc.RPCEVMTimeout = c.RPCEVMTimeout
|
||||
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
||||
enc.OverrideShanghai = c.OverrideShanghai
|
||||
enc.OverrideKepler = c.OverrideKepler
|
||||
enc.OverrideCancun = c.OverrideCancun
|
||||
enc.OverrideVerkle = c.OverrideVerkle
|
||||
enc.OverrideFeynman = c.OverrideFeynman
|
||||
enc.OverrideFeynmanFix = c.OverrideFeynmanFix
|
||||
return &enc, nil
|
||||
}
|
||||
|
||||
@ -189,10 +189,10 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
RPCGasCap *uint64
|
||||
RPCEVMTimeout *time.Duration
|
||||
RPCTxFeeCap *float64
|
||||
OverrideShanghai *uint64 `toml:",omitempty"`
|
||||
OverrideKepler *uint64 `toml:",omitempty"`
|
||||
OverrideCancun *uint64 `toml:",omitempty"`
|
||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||
OverrideFeynman *uint64 `toml:",omitempty"`
|
||||
OverrideFeynmanFix *uint64 `toml:",omitempty"`
|
||||
}
|
||||
var dec Config
|
||||
if err := unmarshal(&dec); err != nil {
|
||||
@ -354,17 +354,17 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.RPCTxFeeCap != nil {
|
||||
c.RPCTxFeeCap = *dec.RPCTxFeeCap
|
||||
}
|
||||
if dec.OverrideShanghai != nil {
|
||||
c.OverrideShanghai = dec.OverrideShanghai
|
||||
}
|
||||
if dec.OverrideKepler != nil {
|
||||
c.OverrideKepler = dec.OverrideKepler
|
||||
}
|
||||
if dec.OverrideCancun != nil {
|
||||
c.OverrideCancun = dec.OverrideCancun
|
||||
}
|
||||
if dec.OverrideVerkle != nil {
|
||||
c.OverrideVerkle = dec.OverrideVerkle
|
||||
}
|
||||
if dec.OverrideFeynman != nil {
|
||||
c.OverrideFeynman = dec.OverrideFeynman
|
||||
}
|
||||
if dec.OverrideFeynmanFix != nil {
|
||||
c.OverrideFeynmanFix = dec.OverrideFeynmanFix
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -107,6 +108,11 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
|
||||
)
|
||||
defer state.Close()
|
||||
|
||||
if test.Genesis.ExcessBlobGas != nil && test.Genesis.BlobGasUsed != nil {
|
||||
excessBlobGas := eip4844.CalcExcessBlobGas(*test.Genesis.ExcessBlobGas, *test.Genesis.BlobGasUsed)
|
||||
context.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
|
||||
}
|
||||
|
||||
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create call tracer: %v", err)
|
||||
|
63
eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json
vendored
Normal file
63
eth/tracers/internal/tracetest/testdata/prestate_tracer/blob_tx.json
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
{
|
||||
"genesis": {
|
||||
"baseFeePerGas": "7",
|
||||
"blobGasUsed": "0",
|
||||
"difficulty": "0",
|
||||
"excessBlobGas": "36306944",
|
||||
"extraData": "0xd983010e00846765746888676f312e32312e308664617277696e",
|
||||
"gasLimit": "15639172",
|
||||
"hash": "0xc682259fda061bb9ce8ccb491d5b2d436cb73daf04e1025dd116d045ce4ad28c",
|
||||
"miner": "0x0000000000000000000000000000000000000000",
|
||||
"mixHash": "0xae1a5ba939a4c9ac38aabeff361169fb55a6fc2c9511457e0be6eff9514faec0",
|
||||
"nonce": "0x0000000000000000",
|
||||
"number": "315",
|
||||
"parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"stateRoot": "0x577f42ab21ccfd946511c57869ace0bdf7c217c36f02b7cd3459df0ed1cffc1a",
|
||||
"timestamp": "1709626771",
|
||||
"totalDifficulty": "1",
|
||||
"withdrawals": [],
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"alloc": {
|
||||
"0x0000000000000000000000000000000000000000": {
|
||||
"balance": "0x272e0528"
|
||||
},
|
||||
"0x0c2c51a0990aee1d73c1228de158688341557508": {
|
||||
"balance": "0xde0b6b3a7640000"
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"chainId": 1337,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0,
|
||||
"istanbulBlock": 0,
|
||||
"muirGlacierBlock": 0,
|
||||
"berlinBlock": 0,
|
||||
"londonBlock": 0,
|
||||
"arrowGlacierBlock": 0,
|
||||
"grayGlacierBlock": 0,
|
||||
"shanghaiTime": 0,
|
||||
"cancunTime": 0,
|
||||
"terminalTotalDifficulty": 0,
|
||||
"terminalTotalDifficultyPassed": true
|
||||
}
|
||||
},
|
||||
"context": {
|
||||
"number": "316",
|
||||
"difficulty": "0",
|
||||
"timestamp": "1709626785",
|
||||
"gasLimit": "15654443",
|
||||
"miner": "0x0000000000000000000000000000000000000000"
|
||||
},
|
||||
"input": "0x03f8b1820539806485174876e800825208940c2c51a0990aee1d73c1228de1586883415575088080c083020000f842a00100c9fbdf97f747e85847b4f3fff408f89c26842f77c882858bf2c89923849aa00138e3896f3c27f2389147507f8bcec52028b0efca6ee842ed83c9158873943880a0dbac3f97a532c9b00e6239b29036245a5bfbb96940b9d848634661abee98b945a03eec8525f261c2e79798f7b45a5d6ccaefa24576d53ba5023e919b86841c0675",
|
||||
"result": {
|
||||
"0x0000000000000000000000000000000000000000": { "balance": "0x272e0528" },
|
||||
"0x0c2c51a0990aee1d73c1228de158688341557508": {
|
||||
"balance": "0xde0b6b3a7640000"
|
||||
}
|
||||
}
|
||||
}
|
@ -28,6 +28,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type account -field-override accountMarshaling -out gen_account_json.go
|
||||
@ -109,6 +110,12 @@ func (t *prestateTracer) CaptureStart(env *vm.EVM, from common.Address, to commo
|
||||
gasPrice := env.TxContext.GasPrice
|
||||
consumedGas := new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(t.gasLimit))
|
||||
fromBal.Add(fromBal, new(big.Int).Add(value, consumedGas))
|
||||
|
||||
// Add blob fee to the sender's balance.
|
||||
if env.Context.BlobBaseFee != nil && len(env.TxContext.BlobHashes) > 0 {
|
||||
blobGas := uint64(params.BlobTxBlobGasPerBlob * len(env.TxContext.BlobHashes))
|
||||
fromBal.Add(fromBal, new(big.Int).Mul(env.Context.BlobBaseFee, new(big.Int).SetUint64(blobGas)))
|
||||
}
|
||||
t.pre[from].Balance = fromBal
|
||||
t.pre[from].Nonce--
|
||||
|
||||
|
@ -245,6 +245,12 @@ func toCallArg(msg ethereum.CallMsg) interface{} {
|
||||
if msg.AccessList != nil {
|
||||
arg["accessList"] = msg.AccessList
|
||||
}
|
||||
if msg.BlobGasFeeCap != nil {
|
||||
arg["maxFeePerBlobGas"] = (*hexutil.Big)(msg.BlobGasFeeCap)
|
||||
}
|
||||
if msg.BlobHashes != nil {
|
||||
arg["blobVersionedHashes"] = msg.BlobHashes
|
||||
}
|
||||
return arg
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus/beacon"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -452,6 +453,7 @@ func newGQLService(t *testing.T, stack *node.Node, shanghai bool, gspec *core.Ge
|
||||
TrieDirtyCache: 5,
|
||||
TrieTimeout: 60 * time.Minute,
|
||||
SnapshotCache: 5,
|
||||
StateScheme: rawdb.HashScheme,
|
||||
}
|
||||
var engine consensus.Engine = ethash.NewFaker()
|
||||
if shanghai {
|
||||
|
@ -2527,6 +2527,16 @@ func (s *NetAPI) Version() string {
|
||||
return fmt.Sprintf("%d", s.networkVersion)
|
||||
}
|
||||
|
||||
// NodeInfo retrieves all the information we know about the host node at the
|
||||
// protocol granularity. This is the same as the `admin_nodeInfo` method.
|
||||
func (s *NetAPI) NodeInfo() (*p2p.NodeInfo, error) {
|
||||
server := s.net
|
||||
if server == nil {
|
||||
return nil, errors.New("server not found")
|
||||
}
|
||||
return s.net.NodeInfo(), nil
|
||||
}
|
||||
|
||||
// checkTxFee is an internal function used to check whether the fee of
|
||||
// the given transaction is _reasonable_(under the cap).
|
||||
func checkTxFee(gasPrice *big.Int, gas uint64, cap float64) error {
|
||||
|
@ -3734,7 +3734,7 @@ var inputCallFormatter = function (options){
|
||||
options.to = inputAddressFormatter(options.to);
|
||||
}
|
||||
|
||||
['maxFeePerGas', 'maxPriorityFeePerGas', 'gasPrice', 'gas', 'value', 'nonce'].filter(function (key) {
|
||||
['maxFeePerBlobGas', 'maxFeePerGas', 'maxPriorityFeePerGas', 'gasPrice', 'gas', 'value', 'nonce'].filter(function (key) {
|
||||
return options[key] !== undefined;
|
||||
}).forEach(function(key){
|
||||
options[key] = utils.fromDecimal(options[key]);
|
||||
@ -3759,7 +3759,7 @@ var inputTransactionFormatter = function (options){
|
||||
options.to = inputAddressFormatter(options.to);
|
||||
}
|
||||
|
||||
['maxFeePerGas', 'maxPriorityFeePerGas', 'gasPrice', 'gas', 'value', 'nonce'].filter(function (key) {
|
||||
['maxFeePerBlobGas', 'maxFeePerGas', 'maxPriorityFeePerGas', 'gasPrice', 'gas', 'value', 'nonce'].filter(function (key) {
|
||||
return options[key] !== undefined;
|
||||
}).forEach(function(key){
|
||||
options[key] = utils.fromDecimal(options[key]);
|
||||
@ -3789,6 +3789,9 @@ var outputTransactionFormatter = function (tx){
|
||||
if(tx.maxPriorityFeePerGas !== undefined) {
|
||||
tx.maxPriorityFeePerGas = utils.toBigNumber(tx.maxPriorityFeePerGas);
|
||||
}
|
||||
if(tx.maxFeePerBlobGas !== undefined) {
|
||||
tx.maxFeePerBlobGas = utils.toBigNumber(tx.maxFeePerBlobGas);
|
||||
}
|
||||
tx.value = utils.toBigNumber(tx.value);
|
||||
return tx;
|
||||
};
|
||||
@ -3810,6 +3813,12 @@ var outputTransactionReceiptFormatter = function (receipt){
|
||||
if(receipt.effectiveGasPrice !== undefined) {
|
||||
receipt.effectiveGasPrice = utils.toBigNumber(receipt.effectiveGasPrice);
|
||||
}
|
||||
if(receipt.blobGasPrice !== undefined) {
|
||||
receipt.blobGasPrice = utils.toBigNumber(receipt.blobGasPrice);
|
||||
}
|
||||
if(receipt.blobGasUsed !== undefined) {
|
||||
receipt.blobGasUsed = utils.toBigNumber(receipt.blobGasUsed);
|
||||
}
|
||||
if(utils.isArray(receipt.logs)) {
|
||||
receipt.logs = receipt.logs.map(function(log){
|
||||
return outputLogFormatter(log);
|
||||
@ -3864,11 +3873,17 @@ var outputBlockFormatter = function(block) {
|
||||
if (block.baseFeePerGas !== undefined) {
|
||||
block.baseFeePerGas = utils.toBigNumber(block.baseFeePerGas);
|
||||
}
|
||||
if (block.blobGasUsed !== undefined) {
|
||||
block.blobGasUsed = utils.toBigNumber(block.blobGasUsed);
|
||||
}
|
||||
if (block.excessBlobGas !== undefined) {
|
||||
block.excessBlobGas = utils.toBigNumber(block.excessBlobGas);
|
||||
}
|
||||
block.gasLimit = utils.toDecimal(block.gasLimit);
|
||||
block.gasUsed = utils.toDecimal(block.gasUsed);
|
||||
block.size = utils.toDecimal(block.size);
|
||||
block.timestamp = utils.toDecimal(block.timestamp);
|
||||
if(block.number !== null)
|
||||
if (block.number !== null)
|
||||
block.number = utils.toDecimal(block.number);
|
||||
|
||||
block.difficulty = utils.toBigNumber(block.difficulty);
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/miner/builderclient"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
@ -36,10 +37,12 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
diffInTurn = big.NewInt(2) // the difficulty of a block that proposed by an in-turn validator
|
||||
bidSimTimer = metrics.NewRegisteredTimer("bid/sim/duration", nil)
|
||||
)
|
||||
|
||||
var (
|
||||
diffInTurn = big.NewInt(2) // the difficulty of a block that proposed by an in-turn validator
|
||||
|
||||
dialer = &net.Dialer{
|
||||
Timeout: time.Second,
|
||||
KeepAlive: 60 * time.Second,
|
||||
@ -503,6 +506,7 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
|
||||
// ensure simulation exited then start next simulation
|
||||
b.SetSimulatingBid(parentHash, bidRuntime)
|
||||
start := time.Now()
|
||||
|
||||
defer func(simStart time.Time) {
|
||||
logCtx := []any{
|
||||
@ -532,6 +536,7 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
}
|
||||
|
||||
b.RemoveSimulatingBid(parentHash)
|
||||
bidSimTimer.UpdateSince(start)
|
||||
}(time.Now())
|
||||
|
||||
// prepareWork will configure header with a suitable time according to consensus
|
||||
@ -606,6 +611,8 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
|
||||
// reportIssue reports the issue to the mev-sentry
|
||||
func (b *bidSimulator) reportIssue(bidRuntime *BidRuntime, err error) {
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("bid/err/%v", bidRuntime.bid.Builder), nil).Inc(1)
|
||||
|
||||
cli := b.builders[bidRuntime.bid.Builder]
|
||||
if cli != nil {
|
||||
cli.ReportIssue(context.Background(), &types.BidIssue{
|
||||
|
@ -1296,6 +1296,7 @@ LOOP:
|
||||
|
||||
// when out-turn, use bestWork to prevent bundle leakage.
|
||||
// when in-turn, compare with remote work.
|
||||
from := bestWork.coinbase
|
||||
if w.bidFetcher != nil && bestWork.header.Difficulty.Cmp(diffInTurn) == 0 {
|
||||
bestBid := w.bidFetcher.GetBestBid(bestWork.header.ParentHash)
|
||||
|
||||
@ -1307,10 +1308,13 @@ LOOP:
|
||||
// blockReward(benefits delegators) and validatorReward(benefits the validator) are both optimal
|
||||
if localValidatorReward.CmpBig(bestBid.packedValidatorReward) < 0 {
|
||||
bestWork = bestBid.env
|
||||
from = bestBid.bid.Builder
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("block/from/%v", from), nil).Inc(1)
|
||||
|
||||
w.commit(bestWork, w.fullTaskHook, true, start)
|
||||
|
||||
// Swap out the old work with the new one, terminating any leftover
|
||||
|
@ -239,6 +239,8 @@ type Config struct {
|
||||
EnablePersonal bool `toml:"-"`
|
||||
|
||||
DBEngine string `toml:",omitempty"`
|
||||
|
||||
Instance int `toml:",omitempty"`
|
||||
}
|
||||
|
||||
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||
|
@ -34,6 +34,8 @@ const (
|
||||
DefaultWSPort = 8546 // Default TCP port for the websocket RPC server
|
||||
DefaultAuthHost = "localhost" // Default host interface for the authenticated apis
|
||||
DefaultAuthPort = 8551 // Default port for the authenticated apis
|
||||
DefaultListenPort = 30303 // Default port for the TCP listening address
|
||||
DefaultDiscPort = 30303 // Default port for the UDP discovery address
|
||||
)
|
||||
|
||||
const (
|
||||
@ -73,7 +75,8 @@ var DefaultConfig = Config{
|
||||
MaxPeersPerIP: 0, // by default, it will be same as MaxPeers
|
||||
NAT: nat.Any(),
|
||||
},
|
||||
DBEngine: "", // Use whatever exists, will default to Leveldb if non-existent and supported
|
||||
DBEngine: "", // Use whatever exists, will default to Pebble if non-existent and supported
|
||||
Instance: 1,
|
||||
}
|
||||
|
||||
// DefaultDataDir is the default data directory to use for the databases and other
|
||||
|
@ -190,6 +190,7 @@ var (
|
||||
ShanghaiTime: newUint64(1702972800),
|
||||
KeplerTime: newUint64(1702972800),
|
||||
FeynmanTime: newUint64(1710136800),
|
||||
FeynmanFixTime: newUint64(1711342800),
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@ -227,6 +228,7 @@ var (
|
||||
ShanghaiTime: newUint64(0),
|
||||
KeplerTime: newUint64(0),
|
||||
FeynmanTime: newUint64(0),
|
||||
FeynmanFixTime: newUint64(0),
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@ -312,6 +314,7 @@ var (
|
||||
ArrowGlacierBlock: big.NewInt(0),
|
||||
GrayGlacierBlock: big.NewInt(0),
|
||||
ShanghaiTime: newUint64(0),
|
||||
CancunTime: newUint64(0),
|
||||
TerminalTotalDifficulty: big.NewInt(0),
|
||||
TerminalTotalDifficultyPassed: true,
|
||||
}
|
||||
@ -494,6 +497,7 @@ type ChainConfig struct {
|
||||
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
|
||||
KeplerTime *uint64 `json:"keplerTime,omitempty"` // Kepler switch time (nil = no fork, 0 = already activated)
|
||||
FeynmanTime *uint64 `json:"feynmanTime,omitempty"` // Feynman switch time (nil = no fork, 0 = already activated)
|
||||
FeynmanFixTime *uint64 `json:"feynmanFixTime,omitempty"` // FeynmanFix switch time (nil = no fork, 0 = already activated)
|
||||
CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
|
||||
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
||||
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
||||
@ -590,7 +594,12 @@ func (c *ChainConfig) String() string {
|
||||
FeynmanTime = big.NewInt(0).SetUint64(*c.FeynmanTime)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, Engine: %v}",
|
||||
var FeynmanFixTime *big.Int
|
||||
if c.FeynmanFixTime != nil {
|
||||
FeynmanFixTime = big.NewInt(0).SetUint64(*c.FeynmanFixTime)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, Engine: %v}",
|
||||
c.ChainID,
|
||||
c.HomesteadBlock,
|
||||
c.DAOForkBlock,
|
||||
@ -625,6 +634,7 @@ func (c *ChainConfig) String() string {
|
||||
ShanghaiTime,
|
||||
KeplerTime,
|
||||
FeynmanTime,
|
||||
FeynmanFixTime,
|
||||
engine,
|
||||
)
|
||||
}
|
||||
@ -873,6 +883,20 @@ func (c *ChainConfig) IsOnFeynman(currentBlockNumber *big.Int, lastBlockTime uin
|
||||
return !c.IsFeynman(lastBlockNumber, lastBlockTime) && c.IsFeynman(currentBlockNumber, currentBlockTime)
|
||||
}
|
||||
|
||||
// IsFeynmanFix returns whether time is either equal to the FeynmanFix fork time or greater.
|
||||
func (c *ChainConfig) IsFeynmanFix(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.FeynmanFixTime, time)
|
||||
}
|
||||
|
||||
// IsOnFeynmanFix returns whether currentBlockTime is either equal to the FeynmanFix fork time or greater firstly.
|
||||
func (c *ChainConfig) IsOnFeynmanFix(currentBlockNumber *big.Int, lastBlockTime uint64, currentBlockTime uint64) bool {
|
||||
lastBlockNumber := new(big.Int)
|
||||
if currentBlockNumber.Cmp(big.NewInt(1)) >= 0 {
|
||||
lastBlockNumber.Sub(currentBlockNumber, big.NewInt(1))
|
||||
}
|
||||
return !c.IsFeynmanFix(lastBlockNumber, lastBlockTime) && c.IsFeynmanFix(currentBlockNumber, currentBlockTime)
|
||||
}
|
||||
|
||||
// IsCancun returns whether num is either equal to the Cancun fork time or greater.
|
||||
func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.CancunTime, time)
|
||||
@ -939,6 +963,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
|
||||
{name: "hertzfixBlock", block: c.HertzfixBlock},
|
||||
{name: "keplerTime", timestamp: c.KeplerTime},
|
||||
{name: "feynmanTime", timestamp: c.FeynmanTime},
|
||||
{name: "feynmanFixTime", timestamp: c.FeynmanFixTime},
|
||||
{name: "cancunTime", timestamp: c.CancunTime, optional: true},
|
||||
{name: "pragueTime", timestamp: c.PragueTime, optional: true},
|
||||
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
|
||||
@ -1081,6 +1106,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int,
|
||||
if isForkTimestampIncompatible(c.FeynmanTime, newcfg.FeynmanTime, headTimestamp) {
|
||||
return newTimestampCompatError("Feynman fork timestamp", c.FeynmanTime, newcfg.FeynmanTime)
|
||||
}
|
||||
if isForkTimestampIncompatible(c.FeynmanFixTime, newcfg.FeynmanFixTime, headTimestamp) {
|
||||
return newTimestampCompatError("FeynmanFix fork timestamp", c.FeynmanFixTime, newcfg.FeynmanFixTime)
|
||||
}
|
||||
if isForkTimestampIncompatible(c.CancunTime, newcfg.CancunTime, headTimestamp) {
|
||||
return newTimestampCompatError("Cancun fork timestamp", c.CancunTime, newcfg.CancunTime)
|
||||
}
|
||||
|
@ -194,8 +194,8 @@ var (
|
||||
MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be.
|
||||
DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not.
|
||||
|
||||
// BeaconRootsStorageAddress is the address where historical beacon roots are stored as per EIP-4788
|
||||
BeaconRootsStorageAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
|
||||
// BeaconRootsAddress is the address where historical beacon roots are stored as per EIP-4788
|
||||
BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
|
||||
// SystemAddress is where the system-transaction is sent from as per EIP-4788
|
||||
SystemAddress common.Address = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe")
|
||||
SystemAddress = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe")
|
||||
)
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 4 // Minor version component of the current release
|
||||
VersionPatch = 2 // Patch version component of the current release
|
||||
VersionPatch = 3 // Patch version component of the current release
|
||||
VersionMeta = "" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
|
||||
index 36d33e41c..adb10883e 100644
|
||||
index 5988bb15f..c92cbf542 100644
|
||||
--- a/core/vm/contracts.go
|
||||
+++ b/core/vm/contracts.go
|
||||
@@ -78,9 +78,6 @@ var PrecompiledContractsIstanbul = map[common.Address]PrecompiledContract{
|
||||
@@ -83,9 +83,6 @@ var PrecompiledContractsIstanbul = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
|
||||
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
||||
@ -12,6 +12,20 @@ index 36d33e41c..adb10883e 100644
|
||||
}
|
||||
|
||||
var PrecompiledContractsNano = map[common.Address]PrecompiledContract{
|
||||
@@ -238,13 +235,6 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
|
||||
common.BytesToAddress([]byte{9}): &blake2F{},
|
||||
common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
|
||||
-
|
||||
- common.BytesToAddress([]byte{100}): &tmHeaderValidate{},
|
||||
- common.BytesToAddress([]byte{101}): &iavlMerkleProofValidatePlato{},
|
||||
- common.BytesToAddress([]byte{102}): &blsSignatureVerify{},
|
||||
- common.BytesToAddress([]byte{103}): &cometBFTLightBlockValidateHertz{},
|
||||
- common.BytesToAddress([]byte{104}): &verifyDoubleSignEvidence{},
|
||||
- common.BytesToAddress([]byte{105}): &secp256k1SignatureRecover{},
|
||||
}
|
||||
|
||||
// PrecompiledContractsBLS contains the set of pre-compiled Ethereum
|
||||
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
|
||||
index 70c543f14..65716f944 100644
|
||||
--- a/core/vm/jump_table.go
|
||||
@ -26,7 +40,7 @@ index 70c543f14..65716f944 100644
|
||||
enable3860(&instructionSet) // Limit and meter initcode
|
||||
|
||||
diff --git a/params/protocol_params.go b/params/protocol_params.go
|
||||
index b32b4d943..8b544af08 100644
|
||||
index b84fa148f..97bf6c4d2 100644
|
||||
--- a/params/protocol_params.go
|
||||
+++ b/params/protocol_params.go
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
|
@ -252,10 +252,6 @@ func (tm *testMatcher) runTestFile(t *testing.T, path, name string, runTest inte
|
||||
if r, _ := tm.findSkip(name); r != "" {
|
||||
t.Skip(r)
|
||||
}
|
||||
// TODO(Nathan): fix before enable Cancun
|
||||
if strings.Contains(key, "Cancun") {
|
||||
return
|
||||
}
|
||||
runTestFunc(runTest, t, name, m, key)
|
||||
})
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ function generate_genesis() {
|
||||
function init_genesis_data() {
|
||||
node_type=$1
|
||||
node_id=$2
|
||||
geth --datadir ${workspace}/storage/${node_id} init ${workspace}/genesis/genesis.json
|
||||
geth --datadir ${workspace}/storage/${node_id} init --state.scheme "hash" ${workspace}/genesis/genesis.json
|
||||
cp ${workspace}/config/config-${node_type}.toml ${workspace}/storage/${node_id}/config.toml
|
||||
sed -i -e "s/{{NetworkId}}/${BSC_CHAIN_ID}/g" ${workspace}/storage/${node_id}/config.toml
|
||||
if [ "${node_id}" == "bsc-rpc" ]; then
|
||||
|
@ -132,12 +132,12 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
|
||||
*/
|
||||
if config.HashDB != nil {
|
||||
if rawdb.ReadStateScheme(triediskdb) == rawdb.PathScheme {
|
||||
log.Warn("incompatible state scheme", "old", rawdb.PathScheme, "new", rawdb.HashScheme)
|
||||
log.Warn("Incompatible state scheme", "old", rawdb.PathScheme, "new", rawdb.HashScheme)
|
||||
}
|
||||
db.backend = hashdb.New(triediskdb, config.HashDB, trie.MerkleResolver{})
|
||||
} else if config.PathDB != nil {
|
||||
if rawdb.ReadStateScheme(triediskdb) == rawdb.HashScheme {
|
||||
log.Warn("incompatible state scheme", "old", rawdb.HashScheme, "new", rawdb.PathScheme)
|
||||
log.Warn("Incompatible state scheme", "old", rawdb.HashScheme, "new", rawdb.PathScheme)
|
||||
}
|
||||
db.backend = pathdb.New(triediskdb, config.PathDB)
|
||||
} else if strings.Compare(dbScheme, rawdb.PathScheme) == 0 {
|
||||
|
@ -72,7 +72,7 @@ func (a *asyncnodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node
|
||||
|
||||
err := a.current.commit(nodes)
|
||||
if err != nil {
|
||||
log.Crit("[BUG] failed to commit nodes to asyncnodebuffer", "error", err)
|
||||
log.Crit("[BUG] Failed to commit nodes to asyncnodebuffer", "error", err)
|
||||
}
|
||||
return a
|
||||
}
|
||||
@ -87,7 +87,7 @@ func (a *asyncnodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]
|
||||
var err error
|
||||
a.current, err = a.current.merge(a.background)
|
||||
if err != nil {
|
||||
log.Crit("[BUG] failed to merge node cache under revert async node buffer", "error", err)
|
||||
log.Crit("[BUG] Failed to merge node cache under revert async node buffer", "error", err)
|
||||
}
|
||||
a.background.reset()
|
||||
return a.current.revert(db, nodes)
|
||||
@ -129,7 +129,7 @@ func (a *asyncnodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache,
|
||||
for {
|
||||
if atomic.LoadUint64(&a.background.immutable) == 1 {
|
||||
time.Sleep(time.Duration(DefaultBackgroundFlushInterval) * time.Second)
|
||||
log.Info("waiting background memory table flushed into disk for forcing flush node buffer")
|
||||
log.Info("Waiting background memory table flushed into disk for forcing flush node buffer")
|
||||
continue
|
||||
}
|
||||
atomic.StoreUint64(&a.current.immutable, 1)
|
||||
@ -155,10 +155,10 @@ func (a *asyncnodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache,
|
||||
for {
|
||||
err := a.background.flush(db, clean, persistID)
|
||||
if err == nil {
|
||||
log.Debug("succeed to flush background nodecache to disk", "state_id", persistID)
|
||||
log.Debug("Succeed to flush background nodecache to disk", "state_id", persistID)
|
||||
return
|
||||
}
|
||||
log.Error("failed to flush background nodecache to disk", "state_id", persistID, "error", err)
|
||||
log.Error("Failed to flush background nodecache to disk", "state_id", persistID, "error", err)
|
||||
}
|
||||
}(id)
|
||||
return nil
|
||||
@ -168,7 +168,7 @@ func (a *asyncnodebuffer) waitAndStopFlushing() {
|
||||
a.stopFlushing.Store(true)
|
||||
for a.isFlushing.Load() {
|
||||
time.Sleep(time.Second)
|
||||
log.Warn("waiting background memory table flushed into disk")
|
||||
log.Warn("Waiting background memory table flushed into disk")
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ func (a *asyncnodebuffer) getAllNodes() map[common.Hash]map[string]*trienode.Nod
|
||||
|
||||
cached, err := a.current.merge(a.background)
|
||||
if err != nil {
|
||||
log.Crit("[BUG] failed to merge node cache under revert async node buffer", "error", err)
|
||||
log.Crit("[BUG] Failed to merge node cache under revert async node buffer", "error", err)
|
||||
}
|
||||
return cached.nodes
|
||||
}
|
||||
|
@ -215,7 +215,6 @@ func New(diskdb ethdb.Database, config *Config) *Database {
|
||||
log.Crit("Failed to disable database", "err", err) // impossible to happen
|
||||
}
|
||||
}
|
||||
log.Warn("Path-based state scheme is an experimental feature", "sync", db.config.SyncFlush)
|
||||
return db
|
||||
}
|
||||
|
||||
|
@ -79,10 +79,10 @@ type trienodebuffer interface {
|
||||
|
||||
func NewTrieNodeBuffer(sync bool, limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) trienodebuffer {
|
||||
if sync {
|
||||
log.Info("new sync node buffer", "limit", common.StorageSize(limit), "layers", layers)
|
||||
log.Info("New sync node buffer", "limit", common.StorageSize(limit), "layers", layers)
|
||||
return newNodeBuffer(limit, nodes, layers)
|
||||
}
|
||||
log.Info("new async node buffer", "limit", common.StorageSize(limit), "layers", layers)
|
||||
log.Info("New async node buffer", "limit", common.StorageSize(limit), "layers", layers)
|
||||
return newAsyncNodeBuffer(limit, nodes, layers)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user