Merge pull request #2096 from Pythonberg1997/bc-fusion

chore: merge with develop branch
This commit is contained in:
zjubfd 2023-12-21 16:30:35 +08:00 committed by GitHub
commit aab5ad94b8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
66 changed files with 1360 additions and 697 deletions

@ -1,4 +1,32 @@
# Changelog
## v1.3.7
FEATURE
* [\#2067](https://github.com/bnb-chain/bsc/pull/2067) cmd/geth: add check func to validate state scheme
* [\#2068](https://github.com/bnb-chain/bsc/pull/2068) internal/ethapi: implement eth_getBlockReceipts
BUGFIX
* [\#2035](https://github.com/bnb-chain/bsc/pull/2035) all: pull snap sync PRs from upstream v1.13.5
* [\#2072](https://github.com/bnb-chain/bsc/pull/2072) fix: fix the pebble config of level option
* [\#2078](https://github.com/bnb-chain/bsc/pull/2078) core: LoadChainConfig return the predefined config for built-in networks firstly
## v1.3.6
FEATURE
* [\#2012](https://github.com/bnb-chain/bsc/pull/2012) cmd, core, ethdb: enable Pebble on 32 bits and OpenBSD
* [\#2063](https://github.com/bnb-chain/bsc/pull/2063) log: support to disable log rotate by hours
* [\#2064](https://github.com/bnb-chain/bsc/pull/2064) log: limit rotateHours in range [0,23]
BUGFIX
* [\#2058](https://github.com/bnb-chain/bsc/pull/2058) params: set default hardfork times
IMPROVEMENT
* [\#2015](https://github.com/bnb-chain/bsc/pull/2015) cmd, core, eth: change default network from ETH to BSC
* [\#2036](https://github.com/bnb-chain/bsc/pull/2036) cmd/jsutils: add 2 tools get validator version and block txs number
* [\#2037](https://github.com/bnb-chain/bsc/pull/2037) core/txpool/legacypool: respect nolocals-setting
* [\#2042](https://github.com/bnb-chain/bsc/pull/2042) core/systemcontracts: update CommitUrl for keplerUpgrade
* [\#2043](https://github.com/bnb-chain/bsc/pull/2043) tests/truffle: adapt changes in bsc-genesis-contracts
* [\#2051](https://github.com/bnb-chain/bsc/pull/2051) core/vote: wait some blocks before voting since mining begin
* [\#2060](https://github.com/bnb-chain/bsc/pull/2060) cmd/utils: allow HTTPHost and WSHost flags precede
## v1.3.5
FEATURE
* [\#1970](https://github.com/bnb-chain/bsc/pull/1970) core: enable Shanghai EIPs

@ -110,15 +110,15 @@ on how you can run your own `geth` instance.
The hardware must meet certain requirements to run a full node on mainnet:
- VPS running recent versions of Mac OS X, Linux, or Windows.
- IMPORTANT 2.5 TB(May 2023) of free disk space, solid-state drive(SSD), gp3, 8k IOPS, 250 MB/S throughput, read latency <1ms. (if node is started with snap sync, it will need NVMe SSD)
- IMPORTANT 3 TB(Dec 2023) of free disk space, solid-state drive(SSD), gp3, 8k IOPS, 500 MB/S throughput, read latency <1ms. (if node is started with snap sync, it will need NVMe SSD)
- 16 cores of CPU and 64 GB of memory (RAM)
- Suggest m5zn.3xlarge instance type on AWS, c2-standard-16 on Google cloud.
- Suggest m5zn.6xlarge or r7iz.4xlarge instance type on AWS, c2-standard-16 on Google cloud.
- A broadband Internet connection with upload/download speeds of 5 MB/S
The requirement for testnet:
- VPS running recent versions of Mac OS X, Linux, or Windows.
- 500G of storage for testnet.
- 4 cores of CPU and 8 gigabytes of memory (RAM).
- 4 cores of CPU and 16 gigabytes of memory (RAM).
### Steps to Run a Fullnode

@ -1206,7 +1206,7 @@ func GenDoc(ctx *cli.Context) error {
URL: accounts.URL{Path: ".. ignored .."},
},
{
Address: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"),
Address: common.MaxAddress,
},
}})
}

@ -44,7 +44,7 @@ set to standard output. The following filters are supported:
- `-limit <N>` limits the output set to N entries, taking the top N nodes by score
- `-ip <CIDR>` filters nodes by IP subnet
- `-min-age <duration>` filters nodes by 'first seen' time
- `-eth-network <mainnet/goerli/sepolia>` filters nodes by "eth" ENR entry
- `-eth-network <mainnet/goerli/sepolia/holesky>` filters nodes by "eth" ENR entry
- `-les-server` filters nodes by LES server support
- `-snap` filters nodes by snap protocol support

@ -58,7 +58,7 @@ type accRangeTest struct {
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
var (
root = s.chain.RootAt(999)
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
ffHash = common.MaxHash
zero = common.Hash{}
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
@ -125,7 +125,7 @@ type stRangesTest struct {
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
var (
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
ffHash = common.MaxHash
zero = common.Hash{}
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")

@ -134,6 +134,12 @@ func loadBaseConfig(ctx *cli.Context) gethConfig {
utils.Fatalf("%v", err)
}
}
if !utils.ValidateStateScheme(cfg.Eth.StateScheme) {
utils.Fatalf("invalid state scheme param in config: %s", cfg.Eth.StateScheme)
}
if cfg.Eth.Genesis != nil && cfg.Eth.Genesis.Config != nil {
log.Warn("Chain config in the configuration file is ignored!")
}
// Apply flags.
utils.SetNodeConfig(ctx, &cfg.Node)

@ -60,7 +60,7 @@ func TestConsoleWelcome(t *testing.T) {
geth.SetTemplateFunc("gover", runtime.Version)
geth.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
geth.SetTemplateFunc("niltime", func() string {
return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
return time.Unix(0x5e9da7ce, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
})
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
@ -131,7 +131,7 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
attach.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") })
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
attach.SetTemplateFunc("niltime", func() string {
return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
return time.Unix(0x5e9da7ce, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
})
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") })
attach.SetTemplateFunc("datadir", func() string { return geth.Datadir })

@ -119,7 +119,7 @@ a data corruption.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.BSCMainnetFlag,
utils.StateSchemeFlag,
},
Description: "This command looks up the specified trie node key from the database.",
@ -132,7 +132,7 @@ a data corruption.`,
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.BSCMainnetFlag,
utils.StateSchemeFlag,
},
Description: "This command delete the specify trie node from the database.",

@ -188,7 +188,7 @@ func TestCustomBackend(t *testing.T) {
initExpect: `Fatal: Invalid choice for db.engine 'mssql', allowed 'leveldb' or 'pebble'`,
// Since the init fails, this will return the (default) mainnet genesis
// block nonce
execExpect: `0x0000000000000042`,
execExpect: `0x0000000000000000`,
},
} {
if err := testfunc(t, tt); err != nil {

@ -311,7 +311,7 @@ func prepare(ctx *cli.Context) {
`)
case !ctx.IsSet(utils.NetworkIdFlag.Name):
log.Info("Starting Geth on Ethereum mainnet...")
log.Info("Starting Geth on BSC mainnet...")
}
// If we're a full node on mainnet without --cache specified, bump default cache allowance
if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {

25
cmd/jsutils/README.md Normal file

@ -0,0 +1,25 @@
## Requirement
- nodejs: v20.10.0
- npm: v10.2.3
## Prepare
Recommend use [nvm](https://github.com/nvm-sh/nvm) to manage node version.
Install node.js dependency:
```shell script
npm install
```
## Run
mainnet validators version
```bash
npm run startMainnet
```
testnet validators version
```bash
npm run startTestnet
```
Transaction count
```bash
node gettxcount.js --rpc ${url} --startNum ${start} --endNum ${end}
```

31
cmd/jsutils/gettxcount.js Normal file

@ -0,0 +1,31 @@
import { ethers } from "ethers";
import program from "commander";
program.option("--rpc <rpc>", "Rpc");
program.option("--startNum <startNum>", "start num")
program.option("--endNum <endNum>", "end num")
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc)
const main = async () => {
let txCount = 0;
let num = 0;
console.log("Find the max txs count between", program.startNum, "and", program.endNum);
for (let i = program.startNum; i < program.endNum; i++) {
let x = await provider.send("eth_getBlockTransactionCountByNumber", [
ethers.toQuantity(i)]);
let a = ethers.toNumber(x)
if (a > txCount) {
num = i;
txCount = a;
}
}
console.log("BlockNum = ", num, "TxCount =", txCount);
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

@ -0,0 +1,25 @@
import { ethers } from "ethers";
import program from "commander";
program.option("--Rpc <Rpc>", "Rpc");
program.option("--Num <Num>", "validator num", 21)
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.Rpc);
const main = async () => {
const blockNum = await provider.getBlockNumber();
console.log(blockNum);
for (let i = 0; i < program.Num; i++) {
let blockData = await provider.getBlock(blockNum - i);
let major = ethers.toNumber(ethers.dataSlice(blockData.extraData, 2, 3))
let minor = ethers.toNumber(ethers.dataSlice(blockData.extraData, 3, 4))
let patch = ethers.toNumber(ethers.dataSlice(blockData.extraData, 4, 5))
console.log(blockData.miner, "version =", major + "." + minor + "." + patch)
}
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

16
cmd/jsutils/package.json Normal file

@ -0,0 +1,16 @@
{
"name": "jsutils",
"version": "1.0.0",
"type": "module",
"description": "jsUtils for bsc",
"main": "index.js",
"scripts": {
"startMainnet": "node getvalidatorversion.js --Rpc https://bsc-dataseed.bnbchain.org --Num 21",
"startTestnet": "node getvalidatorversion.js --Rpc https://bsc-testnet-dataseed.bnbchain.org --Num 7"
},
"dependencies": {
"commander": "^3.0.1",
"ethers": "^6.2.3"
},
"author": "BNB Chain"
}

@ -51,7 +51,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters"
@ -165,13 +164,13 @@ var (
}
NetworkIdFlag = &cli.Uint64Flag{
Name: "networkid",
Usage: "Explicitly set network id (integer)(For testnets: use --goerli, --sepolia instead)",
Usage: "Explicitly set network id (integer)(For testnets: use --goerli, --sepolia, --holesky instead)",
Value: ethconfig.Defaults.NetworkId,
Category: flags.EthCategory,
}
MainnetFlag = &cli.BoolFlag{
Name: "mainnet",
Usage: "Ethereum mainnet",
BSCMainnetFlag = &cli.BoolFlag{
Name: "bsc",
Usage: "BSC mainnet",
Category: flags.EthCategory,
}
DeveloperFlag = &cli.BoolFlag{
@ -1113,23 +1112,18 @@ var (
// TestnetFlags is the flag group of all built-in supported testnets.
TestnetFlags = []cli.Flag{}
// NetworkFlags is the flag group of all built-in supported networks.
NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...)
NetworkFlags = append([]cli.Flag{BSCMainnetFlag}, TestnetFlags...)
// DatabasePathFlags is the flag group of all database path flags.
DatabasePathFlags = []cli.Flag{
DataDirFlag,
AncientFlag,
RemoteDBFlag,
DBEngineFlag,
HttpHeaderFlag,
}
)
func init() {
if rawdb.PebbleEnabled {
DatabasePathFlags = append(DatabasePathFlags, DBEngineFlag)
}
}
// MakeDataDir retrieves the currently requested data directory, terminating
// if none (or the empty string) is specified. If the node is starting a testnet,
// then a subdirectory of the specified datadir will be used.
@ -1259,8 +1253,10 @@ func SplitAndTrim(input string) (ret []string) {
// setHTTP creates the HTTP RPC listener interface string from the set
// command line flags, returning empty if the HTTP endpoint is disabled.
func setHTTP(ctx *cli.Context, cfg *node.Config) {
if ctx.Bool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" {
if ctx.Bool(HTTPEnabledFlag.Name) {
if cfg.HTTPHost == "" {
cfg.HTTPHost = "127.0.0.1"
}
if ctx.IsSet(HTTPListenAddrFlag.Name) {
cfg.HTTPHost = ctx.String(HTTPListenAddrFlag.Name)
}
@ -1324,8 +1320,10 @@ func setGraphQL(ctx *cli.Context, cfg *node.Config) {
// setWS creates the WebSocket RPC listener interface string from the set
// command line flags, returning empty if the HTTP endpoint is disabled.
func setWS(ctx *cli.Context, cfg *node.Config) {
if ctx.Bool(WSEnabledFlag.Name) && cfg.WSHost == "" {
if ctx.Bool(WSEnabledFlag.Name) {
if cfg.WSHost == "" {
cfg.WSHost = "127.0.0.1"
}
if ctx.IsSet(WSListenAddrFlag.Name) {
cfg.WSHost = ctx.String(WSListenAddrFlag.Name)
}
@ -1845,7 +1843,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
// SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag)
CheckExclusive(ctx, BSCMainnetFlag, DeveloperFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
@ -1941,7 +1939,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
// Parse state scheme, abort the process if it's not compatible.
chaindb := tryMakeReadOnlyDatabase(ctx, stack)
scheme, err := ParseStateScheme(ctx, chaindb)
scheme, err := ResolveStateScheme(ctx, cfg.StateScheme, chaindb)
chaindb.Close()
if err != nil {
Fatalf("%v", err)
@ -2034,12 +2032,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
// Override any default configs for hard coded networks.
switch {
case ctx.Bool(MainnetFlag.Name):
case ctx.Bool(BSCMainnetFlag.Name):
if !ctx.IsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1
cfg.NetworkId = 56
}
cfg.Genesis = core.DefaultGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash)
cfg.Genesis = core.DefaultBSCGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.BSCGenesisHash)
case ctx.Bool(DeveloperFlag.Name):
if !ctx.IsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337
@ -2399,8 +2397,8 @@ func DialRPCWithHeaders(endpoint string, headers []string) (*rpc.Client, error)
func MakeGenesis(ctx *cli.Context) *core.Genesis {
var genesis *core.Genesis
switch {
case ctx.Bool(MainnetFlag.Name):
genesis = core.DefaultGenesisBlock()
case ctx.Bool(BSCMainnetFlag.Name):
genesis = core.DefaultBSCGenesisBlock()
case ctx.Bool(DeveloperFlag.Name):
Fatalf("Developer chains are ephemeral")
}
@ -2487,6 +2485,52 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
return preloads
}
// ResolveStateScheme resolve state scheme from CLI flag, config file and persistent state.
// The differences between ResolveStateScheme and ParseStateScheme are:
// - ResolveStateScheme adds config to compare with CLI and persistent state to ensure correctness.
// - ResolveStateScheme is only used in SetEthConfig function.
//
// 1. If config isn't provided, write hash mode to config by default, so in current function, config is nonempty.
// 2. If persistent state and cli is empty, use config param.
// 3. If persistent state is empty, provide CLI flag and config, choose CLI to return.
// 4. If persistent state is nonempty and CLI isn't provided, persistent state should be equal to config.
// 5. If all three items are provided: if any two of the three are not equal, return error.
func ResolveStateScheme(ctx *cli.Context, stateSchemeCfg string, disk ethdb.Database) (string, error) {
stored := rawdb.ReadStateScheme(disk)
if stored == "" {
// there is no persistent state data in disk db(e.g. geth init)
if !ctx.IsSet(StateSchemeFlag.Name) {
log.Info("State scheme set by config", "scheme", stateSchemeCfg)
return stateSchemeCfg, nil
}
// if both CLI flag and config are set, choose CLI
scheme := ctx.String(StateSchemeFlag.Name)
if !ValidateStateScheme(scheme) {
return "", fmt.Errorf("invalid state scheme param in CLI: %s", scheme)
}
log.Info("State scheme set by CLI", "scheme", scheme)
return scheme, nil
}
if !ctx.IsSet(StateSchemeFlag.Name) {
if stored != stateSchemeCfg {
return "", fmt.Errorf("incompatible state scheme, stored: %s, config: %s", stored, stateSchemeCfg)
}
log.Info("State scheme set to already existing", "scheme", stored)
return stored, nil
}
scheme := ctx.String(StateSchemeFlag.Name)
if !ValidateStateScheme(scheme) {
return "", fmt.Errorf("invalid state scheme param in CLI: %s", scheme)
}
// if there is persistent state data in disk db, and CLI flag, config are set,
// when they all are different, return error
if scheme != stored || scheme != stateSchemeCfg || stored != stateSchemeCfg {
return "", fmt.Errorf("incompatible state scheme, stored: %s, config: %s, CLI: %s", stored, stateSchemeCfg, scheme)
}
log.Info("All are provided, state scheme set to already existing", "scheme", stored)
return stored, nil
}
// ParseStateScheme resolves scheme identifier from CLI flag. If the provided
// state scheme is not compatible with the one of persistent scheme, an error
// will be returned.
@ -2506,7 +2550,7 @@ func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
if stored == "" {
// use default scheme for empty database, flip it when
// path mode is chosen as default
log.Info("State schema set to default", "scheme", "hash")
log.Info("State scheme set to default", "scheme", "hash")
return rawdb.HashScheme, nil
}
log.Info("State scheme set to already existing", "scheme", stored)
@ -2515,6 +2559,9 @@ func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
// If state scheme is specified, ensure it's compatible with
// persistent state.
scheme := ctx.String(StateSchemeFlag.Name)
if !ValidateStateScheme(scheme) {
return "", fmt.Errorf("invalid state scheme param in CLI: %s", scheme)
}
if stored == "" || scheme == stored {
log.Info("State scheme set by user", "scheme", scheme)
return scheme, nil
@ -2545,3 +2592,12 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
}
return trie.NewDatabase(disk, config)
}
// ValidateStateScheme used to check state scheme whether is valid.
// Valid state scheme: hash and path.
func ValidateStateScheme(stateScheme string) bool {
if stateScheme == rawdb.HashScheme || stateScheme == rawdb.PathScheme {
return true
}
return false
}

@ -20,6 +20,8 @@ package utils
import (
"reflect"
"testing"
"github.com/ethereum/go-ethereum/core/rawdb"
)
func Test_SplitTagsFlag(t *testing.T) {
@ -62,3 +64,34 @@ func Test_SplitTagsFlag(t *testing.T) {
})
}
}
func TestValidateStateScheme(t *testing.T) {
tests := []struct {
name string
arg string
wantResult bool
}{
{
name: "hash scheme",
arg: rawdb.HashScheme,
wantResult: true,
},
{
name: "path scheme",
arg: rawdb.PathScheme,
wantResult: true,
},
{
name: "invalid scheme",
arg: "mockScheme",
wantResult: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := ValidateStateScheme(tt.arg); got != tt.wantResult {
t.Errorf("ValidateStateScheme() = %v, want %v", got, tt.wantResult)
}
})
}
}

@ -44,6 +44,12 @@ const (
var (
hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{})
// MaxAddress represents the maximum possible address value.
MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")
// MaxHash represents the maximum possible hash value.
MaxHash = HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
// Hash represents the 32 byte Keccak256 hash of arbitrary data.

@ -245,6 +245,7 @@ func New(
) *Parlia {
// get parlia config
parliaConfig := chainConfig.Parlia
log.Info("Parlia", "chainConfig", chainConfig)
// Set any missing consensus parameters to their defaults
if parliaConfig != nil && parliaConfig.Epoch == 0 {

@ -23,8 +23,6 @@ import (
"errors"
"fmt"
"math/big"
"reflect"
"regexp"
"strings"
"github.com/ethereum/go-ethereum/common"
@ -202,8 +200,8 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash comm
// - private network, can't recover
var genesis *Genesis
switch blockhash {
case params.MainnetGenesisHash:
genesis = DefaultGenesisBlock()
case params.BSCGenesisHash:
genesis = DefaultBSCGenesisBlock()
}
if genesis != nil {
alloc = genesis.Alloc
@ -318,8 +316,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
systemcontracts.GenesisHash = stored
if (stored == common.Hash{}) {
if genesis == nil {
log.Info("Writing default main-net genesis block")
genesis = DefaultGenesisBlock()
log.Info("Writing default BSC mainnet genesis block")
genesis = DefaultBSCGenesisBlock()
} else {
log.Info("Writing custom genesis block")
}
@ -328,6 +326,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
return genesis.Config, common.Hash{}, err
}
applyOverrides(genesis.Config)
log.Info("genesis block hash", "hash", block.Hash())
return genesis.Config, block.Hash(), nil
}
// The genesis block is present(perhaps in ancient database) while the
@ -337,7 +336,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
header := rawdb.ReadHeader(db, stored, 0)
if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) {
if genesis == nil {
genesis = DefaultGenesisBlock()
genesis = DefaultBSCGenesisBlock()
}
// Ensure the stored genesis matches with the given one.
hash := genesis.ToBlock().Hash()
@ -398,14 +397,19 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
return newcfg, stored, nil
}
// LoadChainConfig loads the stored chain config if it is already present in
// database, otherwise, return the config in the provided genesis specification.
// LoadChainConfig retrieves the predefined chain configuration for the built-in network.
// For non-built-in networks, it first attempts to load the stored chain configuration from the database.
// If the configuration is not present, it returns the configuration specified in the provided genesis specification.
func LoadChainConfig(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
// Load the stored chain config from the database. It can be nil
// in case the database is empty. Notably, we only care about the
// chain config corresponds to the canonical chain.
stored := rawdb.ReadCanonicalHash(db, 0)
if stored != (common.Hash{}) {
builtInConf := params.GetBuiltInChainConfig(stored)
if builtInConf != nil {
return builtInConf, stored, nil
}
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg != nil {
return storedcfg, stored, nil
@ -431,71 +435,15 @@ func LoadChainConfig(db ethdb.Database, genesis *Genesis) (*params.ChainConfig,
return params.BSCChainConfig, params.BSCGenesisHash, nil
}
// For any block in g.Config which is nil but the same block in defaultConfig is not
// set the block in genesis config to the block in defaultConfig.
// Reflection is used to avoid a long series of if statements with hardcoded block names.
func (g *Genesis) setDefaultBlockValues(defaultConfig *params.ChainConfig) {
// Regex to match block names
blockRegex := regexp.MustCompile(`.*Block$`)
// Get reflect values
gConfigElem := reflect.ValueOf(g.Config).Elem()
defaultConfigElem := reflect.ValueOf(defaultConfig).Elem()
// Iterate over fields in config
for i := 0; i < gConfigElem.NumField(); i++ {
gConfigField := gConfigElem.Field(i)
defaultConfigField := defaultConfigElem.Field(i)
fieldName := gConfigElem.Type().Field(i).Name
// Use the regex to check if the field is a Block field
if gConfigField.Kind() == reflect.Ptr && blockRegex.MatchString(fieldName) {
if gConfigField.IsNil() {
gConfigField.Set(defaultConfigField)
}
}
}
}
// Hard fork block height specified in config.toml has higher priority, but
// if it is not specified in config.toml, use the default height in code.
func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
var defaultConfig *params.ChainConfig
switch {
case ghash == params.MainnetGenesisHash:
defaultConfig = params.MainnetChainConfig
case ghash == params.BSCGenesisHash:
defaultConfig = params.BSCChainConfig
case ghash == params.ChapelGenesisHash:
defaultConfig = params.ChapelChainConfig
case ghash == params.RialtoGenesisHash:
defaultConfig = params.RialtoChainConfig
default:
conf := params.GetBuiltInChainConfig(ghash)
if conf != nil {
return conf
}
if g != nil {
// it could be a custom config for QA test, just return
return g.Config
return g.Config // it could be a custom config for QA test, just return
}
defaultConfig = params.AllEthashProtocolChanges
}
if g == nil || g.Config == nil {
return defaultConfig
}
g.setDefaultBlockValues(defaultConfig)
// BSC Parlia set up
if g.Config.Parlia == nil {
g.Config.Parlia = defaultConfig.Parlia
} else {
if g.Config.Parlia.Period == 0 {
g.Config.Parlia.Period = defaultConfig.Parlia.Period
}
if g.Config.Parlia.Epoch == 0 {
g.Config.Parlia.Epoch = defaultConfig.Parlia.Epoch
}
}
return g.Config
return params.AllEthashProtocolChanges
}
// ToBlock returns the genesis block according to genesis specification.
@ -608,6 +556,22 @@ func DefaultGenesisBlock() *Genesis {
}
}
// DefaultBSCGenesisBlock returns the BSC mainnet genesis block.
func DefaultBSCGenesisBlock() *Genesis {
alloc := decodePrealloc(bscMainnetAllocData)
return &Genesis{
Config: params.BSCChainConfig,
Nonce: 0,
ExtraData: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000002a7cdd959bfe8d9487b2a43b33565295a698f7e26488aa4d1955ee33403f8ccb1d4de5fb97c7ade29ef9f4360c606c7ab4db26b016007d3ad0ab86a0ee01c3b1283aa067c58eab4709f85e99d46de5fe685b1ded8013785d6623cc18d214320b6bb6475978f3adfc719c99674c072166708589033e2d9afec2be4ec20253b8642161bc3f444f53679c1f3d472f7be8361c80a4c1e7e9aaf001d0877f1cfde218ce2fd7544e0b2cc94692d4a704debef7bcb61328b8f7166496996a7da21cf1f1b04d9b3e26a3d0772d4c407bbe49438ed859fe965b140dcf1aab71a96bbad7cf34b5fa511d8e963dbba288b1960e75d64430b3230294d12c6ab2aac5c2cd68e80b16b581ea0a6e3c511bbd10f4519ece37dc24887e11b55d7ae2f5b9e386cd1b50a4550696d957cb4900f03a82012708dafc9e1b880fd083b32182b869be8e0922b81f8e175ffde54d797fe11eb03f9e3bf75f1d68bf0b8b6fb4e317a0f9d6f03eaf8ce6675bc60d8c4d90829ce8f72d0163c1d5cf348a862d55063035e7a025f4da968de7e4d7e4004197917f4070f1d6caa02bbebaebb5d7e581e4b66559e635f805ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
GasLimit: 40000000,
Difficulty: big.NewInt(1),
Mixhash: common.Hash(hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000000")),
Coinbase: common.HexToAddress("0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE"),
Timestamp: 0x5e9da7ce,
Alloc: alloc,
}
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(gasLimit uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one
@ -635,13 +599,34 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet common.Address) *Genesis {
}
func decodePrealloc(data string) GenesisAlloc {
var p []struct{ Addr, Balance *big.Int }
var p []struct {
Addr *big.Int
Balance *big.Int
Misc *struct {
Nonce uint64
Code []byte
Slots []struct {
Key common.Hash
Val common.Hash
}
} `rlp:"optional"`
}
if err := rlp.NewStream(strings.NewReader(data), 0).Decode(&p); err != nil {
panic(err)
}
ga := make(GenesisAlloc, len(p))
for _, account := range p {
ga[common.BigToAddress(account.Addr)] = GenesisAccount{Balance: account.Balance}
acc := GenesisAccount{Balance: account.Balance}
if account.Misc != nil {
acc.Nonce = account.Misc.Nonce
acc.Code = account.Misc.Code
acc.Storage = make(map[common.Hash]common.Hash)
for _, slot := range account.Misc.Slots {
acc.Storage[slot.Key] = slot.Val
}
}
ga[common.BigToAddress(account.Addr)] = acc
}
return ga
}

File diff suppressed because one or more lines are too long

@ -71,8 +71,8 @@ func testSetupGenesis(t *testing.T, scheme string) {
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil)
},
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
wantHash: params.BSCGenesisHash,
wantConfig: params.BSCChainConfig,
},
{
name: "mainnet block in DB, genesis == nil",
@ -241,12 +241,12 @@ func TestConfigOrDefault(t *testing.T) {
gHash := params.BSCGenesisHash
config := defaultGenesis.configOrDefault(gHash)
if config.ChainID.Cmp(params.MainnetChainConfig.ChainID) != 0 {
if config.ChainID.Cmp(params.BSCChainConfig.ChainID) != 0 {
t.Errorf("ChainID of resulting config should be %v, but is %v instead", params.BSCChainConfig.ChainID, config.ChainID)
}
if config.HomesteadBlock.Cmp(params.MainnetChainConfig.HomesteadBlock) != 0 {
t.Errorf("resulting config should have HomesteadBlock = %v, but instead is %v", params.MainnetChainConfig, config.HomesteadBlock)
if config.HomesteadBlock.Cmp(params.BSCChainConfig.HomesteadBlock) != 0 {
t.Errorf("resulting config should have HomesteadBlock = %v, but instead is %v", params.BSCChainConfig, config.HomesteadBlock)
}
if config.PlanckBlock == nil {
@ -258,34 +258,6 @@ func TestConfigOrDefault(t *testing.T) {
}
}
func TestSetDefaultBlockValues(t *testing.T) {
genesis := &Genesis{Config: &params.ChainConfig{ChainID: big.NewInt(66), HomesteadBlock: big.NewInt(11)}}
genesis.setDefaultBlockValues(params.BSCChainConfig)
// Make sure the non-nil block was not modified
if genesis.Config.HomesteadBlock.Cmp(big.NewInt(11)) != 0 {
t.Errorf("Homestead block should not have been modified. HomesteadBlock = %v", genesis.Config.HomesteadBlock)
}
// Spot check a few blocks
if genesis.Config.NielsBlock.Cmp(params.BSCChainConfig.NielsBlock) != 0 {
t.Errorf("Niels block not matching: in genesis = %v , in defaultConfig = %v", genesis.Config.NielsBlock, params.BSCChainConfig.NielsBlock)
}
if genesis.Config.NanoBlock.Cmp(params.BSCChainConfig.NanoBlock) != 0 {
t.Errorf("Nano block not matching: in genesis = %v , in defaultConfig = %v", genesis.Config.NanoBlock, params.BSCChainConfig.NanoBlock)
}
if genesis.Config.PlanckBlock.Cmp(params.BSCChainConfig.PlanckBlock) != 0 {
t.Errorf("Nano block not matching: in genesis = %v , in defaultConfig = %v", genesis.Config.PlanckBlock, params.BSCChainConfig.PlanckBlock)
}
// Lastly make sure non-block fields such as ChainID have not been modified
if genesis.Config.ChainID.Cmp(big.NewInt(66)) != 0 {
t.Errorf("ChainID should not have been modified. ChainID = %v", genesis.Config.ChainID)
}
}
func newDbConfig(scheme string) *trie.Config {
if scheme == rawdb.HashScheme {
return trie.HashDefaults

@ -32,24 +32,51 @@ import (
"os"
"strconv"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/exp/slices"
)
type allocItem struct{ Addr, Balance *big.Int }
type allocItem struct {
Addr *big.Int
Balance *big.Int
Misc *allocItemMisc `rlp:"optional"`
}
type allocItemMisc struct {
Nonce uint64
Code []byte
Slots []allocItemStorageItem
}
type allocItemStorageItem struct {
Key common.Hash
Val common.Hash
}
func makelist(g *core.Genesis) []allocItem {
items := make([]allocItem, 0, len(g.Alloc))
for addr, account := range g.Alloc {
var misc *allocItemMisc
if len(account.Storage) > 0 || len(account.Code) > 0 || account.Nonce != 0 {
panic(fmt.Sprintf("can't encode account %x", addr))
misc = &allocItemMisc{
Nonce: account.Nonce,
Code: account.Code,
Slots: make([]allocItemStorageItem, 0, len(account.Storage)),
}
for key, val := range account.Storage {
misc.Slots = append(misc.Slots, allocItemStorageItem{key, val})
}
slices.SortFunc(misc.Slots, func(a, b allocItemStorageItem) int {
return a.Key.Cmp(b.Key)
})
}
bigAddr := new(big.Int).SetBytes(addr.Bytes())
items = append(items, allocItem{bigAddr, account.Balance})
items = append(items, allocItem{bigAddr, account.Balance, misc})
}
slices.SortFunc(items, func(a, b allocItem) bool {
return a.Addr.Cmp(b.Addr) < 0
slices.SortFunc(items, func(a, b allocItem) int {
return a.Addr.Cmp(b.Addr)
})
return items
}

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/ethdb/pebble"
"github.com/ethereum/go-ethereum/log"
)
@ -425,6 +426,16 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient
return frdb, nil
}
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
db, err := pebble.New(file, cache, handles, namespace, readonly)
if err != nil {
return nil, err
}
return NewDatabase(db), nil
}
const (
dbPebble = "pebble"
dbLeveldb = "leveldb"
@ -480,12 +491,8 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
}
if o.Type == dbPebble || existingDb == dbPebble {
if PebbleEnabled {
log.Info("Using pebble as the backing database")
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
} else {
return nil, errors.New("db.engine 'pebble' not supported on this platform")
}
}
if o.Type == dbLeveldb || existingDb == dbLeveldb {
log.Info("Using leveldb as the backing database")
@ -493,10 +500,8 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
}
// No pre-existing database, no user-requested one either. Default to Pebble
// on supported platforms and LevelDB on anything else.
// if PebbleEnabled {
// log.Info("Defaulting to pebble as the backing database")
// return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
// }
log.Info("Defaulting to leveldb as the backing database")
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
}

@ -1,37 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
//go:build (arm64 || amd64) && !openbsd
package rawdb
import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/pebble"
)
// Pebble is unsuported on 32bit architecture
const PebbleEnabled = true
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
db, err := pebble.New(file, cache, handles, namespace, readonly)
if err != nil {
return nil, err
}
return NewDatabase(db), nil
}

@ -1,34 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !((arm64 || amd64) && !openbsd)
package rawdb
import (
"errors"
"github.com/ethereum/go-ethereum/ethdb"
)
// Pebble is unsuported on 32bit architecture
const PebbleEnabled = false
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
return nil, errors.New("pebble is not supported on this platform")
}

@ -22,6 +22,7 @@ import (
"sync"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
const tmpSuffix = ".tmp"
@ -240,6 +241,7 @@ func cleanup(path string) error {
}
for _, name := range names {
if name == filepath.Base(path)+tmpSuffix {
log.Info("Removed leftover freezer directory", "name", name)
return os.RemoveAll(filepath.Join(parent, name))
}
}

@ -265,6 +265,12 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
lastIndex.unmarshalBinary(buffer)
}
// Print an error log if the index is corrupted due to an incorrect
// last index item. While it is theoretically possible to have a zero offset
// by storing all zero-size items, it is highly unlikely to occur in practice.
if lastIndex.offset == 0 && offsetsSize%indexEntrySize > 1 {
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "items", offsetsSize%indexEntrySize-1)
}
if t.readonly {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
} else {
@ -357,7 +363,7 @@ func (t *freezerTable) repair() error {
return err
}
if verbose {
t.logger.Info("Chain freezer table opened", "items", t.items.Load(), "size", t.headBytes)
t.logger.Info("Chain freezer table opened", "items", t.items.Load(), "deleted", t.itemOffset.Load(), "hidden", t.itemHidden.Load(), "tailId", t.tailId, "headId", t.headId, "size", t.headBytes)
} else {
t.logger.Debug("Chain freezer table opened", "items", t.items.Load(), "size", common.StorageSize(t.headBytes))
}
@ -530,6 +536,10 @@ func (t *freezerTable) truncateTail(items uint64) error {
if err := t.meta.Sync(); err != nil {
return err
}
// Close the index file before shorten it.
if err := t.index.Close(); err != nil {
return err
}
// Truncate the deleted index entries from the index file.
err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
tailIndex := indexEntry{
@ -543,13 +553,14 @@ func (t *freezerTable) truncateTail(items uint64) error {
return err
}
// Reopen the modified index file to load the changes
if err := t.index.Close(); err != nil {
return err
}
t.index, err = openFreezerFileForAppend(t.index.Name())
if err != nil {
return err
}
// Sync the file to ensure changes are flushed to disk
if err := t.index.Sync(); err != nil {
return err
}
// Release any files before the current tail
t.tailId = newTailId
t.itemOffset.Store(newDeleted)
@ -782,7 +793,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return fmt.Errorf("missing data file %d", fileId)
}
if _, err := dataFile.ReadAt(output[len(output)-length:], int64(start)); err != nil {
return err
return fmt.Errorf("%w, fileid: %d, start: %d, length: %d", err, fileId, start, length)
}
return nil
}

@ -365,21 +365,15 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
}
func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
var nodeWriter trie.NodeWriteFunc
options := trie.NewStackTrieOptions()
if db != nil {
nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
})
}
}
t := trie.NewStackTrieWithOwner(nodeWriter, owner)
t := trie.NewStackTrie(options)
for leaf := range in {
t.Update(leaf.key[:], leaf.value)
}
var root common.Hash
if db == nil {
root = t.Hash()
} else {
root, _ = t.Commit()
}
out <- root
out <- t.Commit()
}

@ -1363,10 +1363,12 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
options := trie.NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted())
size += common.StorageSize(len(path))
})
stack := trie.NewStackTrie(options)
for iter.Next() {
if size > storageDeleteLimit {
return true, size, nil, nil, nil

@ -138,7 +138,7 @@ func TestStateProcessorErrors(t *testing.T) {
)
defer blockchain.Stop()
bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
bigNumber := new(big.Int).SetBytes(common.MaxHash.Bytes())
tooBigNumber := new(big.Int).Set(bigNumber)
tooBigNumber.Add(tooBigNumber, common.Big1)
for i, tt := range []struct {

File diff suppressed because one or more lines are too long

@ -15,6 +15,8 @@ import (
"github.com/ethereum/go-ethereum/metrics"
)
const blocksNumberSinceMining = 5 // the number of blocks need to wait before voting, counting from the validator begin to mine
var votesManagerCounter = metrics.NewRegisteredCounter("votesManager/local", nil)
// Backend wraps all methods required for voting.
@ -95,6 +97,7 @@ func (voteManager *VoteManager) loop() {
dlEventCh := events.Chan()
startVote := true
blockCountSinceMining := 0
var once sync.Once
for {
select {
@ -120,9 +123,15 @@ func (voteManager *VoteManager) loop() {
continue
}
if !voteManager.eth.IsMining() {
blockCountSinceMining = 0
log.Debug("skip voting because mining is disabled, continue")
continue
}
blockCountSinceMining++
if blockCountSinceMining <= blocksNumberSinceMining {
log.Debug("skip voting", "blockCountSinceMining", blockCountSinceMining, "blocksNumberSinceMining", blocksNumberSinceMining)
continue
}
if cHead.Block == nil {
log.Debug("cHead.Block is nil, continue")

@ -190,7 +190,7 @@ func testVotePool(t *testing.T, isValidRules bool) {
if _, err := chain.InsertChain(bs); err != nil {
panic(err)
}
for i := 0; i < 10; i++ {
for i := 0; i < 10+blocksNumberSinceMining; i++ {
bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil)
if _, err := chain.InsertChain(bs); err != nil {
panic(err)

@ -56,10 +56,10 @@ var LightClientGPO = gasprice.Config{
IgnorePrice: gasprice.DefaultIgnorePrice,
}
// Defaults contains default settings for use on the Ethereum main net.
// Defaults contains default settings for use on the BSC main net.
var Defaults = Config{
SyncMode: downloader.SnapSync,
NetworkId: 1,
NetworkId: 56,
TxLookupLimit: 2350000,
TransactionHistory: 2350000,
StateHistory: params.FullImmutabilityThreshold,

@ -367,7 +367,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
if len(req.Origin) > 0 {
origin, req.Origin = common.BytesToHash(req.Origin), nil
}
var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
var limit = common.MaxHash
if len(req.Limit) > 0 {
limit, req.Limit = common.BytesToHash(req.Limit), nil
}

@ -26,4 +26,32 @@ var (
IngressRegistrationErrorMeter = metrics.NewRegisteredMeter(ingressRegistrationErrorName, nil)
EgressRegistrationErrorMeter = metrics.NewRegisteredMeter(egressRegistrationErrorName, nil)
// deletionGauge is the metric to track how many trie node deletions
// are performed in total during the sync process.
deletionGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/delete", nil)
// lookupGauge is the metric to track how many trie node lookups are
// performed to determine if node needs to be deleted.
lookupGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/lookup", nil)
// boundaryAccountNodesGauge is the metric to track how many boundary trie
// nodes in account trie are met.
boundaryAccountNodesGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/boundary/account", nil)
// boundaryAccountNodesGauge is the metric to track how many boundary trie
// nodes in storage tries are met.
boundaryStorageNodesGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/boundary/storage", nil)
// smallStorageGauge is the metric to track how many storages are small enough
// to retrieved in one or two request.
smallStorageGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/small", nil)
// largeStorageGauge is the metric to track how many storages are large enough
// to retrieved concurrently.
largeStorageGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/large", nil)
// skipStorageHealingGauge is the metric to track how many storages are retrieved
// in multiple requests but healing is not necessary.
skipStorageHealingGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/noheal", nil)
)

@ -67,7 +67,7 @@ func (r *hashRange) End() common.Hash {
// If the end overflows (non divisible range), return a shorter interval
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
if overflow {
return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
return common.MaxHash
}
return next.SubUint64(next, 1).Bytes32()
}

@ -45,7 +45,7 @@ func TestHashRanges(t *testing.T) {
common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
// Split a divisible part of the hash range up into 2 chunks
@ -58,7 +58,7 @@ func TestHashRanges(t *testing.T) {
},
ends: []common.Hash{
common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
// Split the entire hash range into a non divisible 3 chunks
@ -73,7 +73,7 @@ func TestHashRanges(t *testing.T) {
ends: []common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
// Split a part of hash range into a non divisible 3 chunks
@ -88,7 +88,7 @@ func TestHashRanges(t *testing.T) {
ends: []common.Hash{
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
// Split a part of hash range into a non divisible 3 chunks, but with a
@ -108,7 +108,7 @@ func TestHashRanges(t *testing.T) {
ends: []common.Hash{
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
}

@ -717,6 +717,19 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
}
}
// cleanPath is used to remove the dangling nodes in the stackTrie.
func (s *Syncer) cleanPath(batch ethdb.Batch, owner common.Hash, path []byte) {
if owner == (common.Hash{}) && rawdb.ExistsAccountTrieNode(s.db, path) {
rawdb.DeleteAccountTrieNode(batch, path)
deletionGauge.Inc(1)
}
if owner != (common.Hash{}) && rawdb.ExistsStorageTrieNode(s.db, owner, path) {
rawdb.DeleteStorageTrieNode(batch, owner, path)
deletionGauge.Inc(1)
}
lookupGauge.Inc(1)
}
// loadSyncStatus retrieves a previously aborted sync status from the database,
// or generates a fresh one if none is available.
func (s *Syncer) loadSyncStatus() {
@ -739,9 +752,22 @@ func (s *Syncer) loadSyncStatus() {
s.accountBytes += common.StorageSize(len(key) + len(value))
},
}
task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(task.genBatch, owner, path, hash, val, s.scheme)
options := trie.NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, blob, s.scheme)
})
if s.scheme == rawdb.PathScheme {
// Configure the dangling node cleaner and also filter out boundary nodes
// only in the context of the path scheme. Deletion is forbidden in the
// hash scheme, as it can disrupt state completeness.
options = options.WithCleaner(func(path []byte) {
s.cleanPath(task.genBatch, common.Hash{}, path)
})
// Skip the left boundary if it's not the first range.
// Skip the right boundary if it's not the last range.
options = options.WithSkipBoundary(task.Next != (common.Hash{}), task.Last != common.MaxHash, boundaryAccountNodesGauge)
}
task.genTrie = trie.NewStackTrie(options)
for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback
@ -752,9 +778,23 @@ func (s *Syncer) loadSyncStatus() {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme)
}, accountHash)
owner := accountHash // local assignment for stacktrie writer closure
options := trie.NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, blob, s.scheme)
})
if s.scheme == rawdb.PathScheme {
// Configure the dangling node cleaner and also filter out boundary nodes
// only in the context of the path scheme. Deletion is forbidden in the
// hash scheme, as it can disrupt state completeness.
options = options.WithCleaner(func(path []byte) {
s.cleanPath(subtask.genBatch, owner, path)
})
// Skip the left boundary if it's not the first range.
// Skip the right boundary if it's not the last range.
options = options.WithSkipBoundary(subtask.Next != common.Hash{}, subtask.Last != common.MaxHash, boundaryStorageNodesGauge)
}
subtask.genTrie = trie.NewStackTrie(options)
}
}
}
@ -798,7 +838,7 @@ func (s *Syncer) loadSyncStatus() {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
// Make sure we don't overflow if the step is not a proper divisor
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
last = common.MaxHash
}
batch := ethdb.HookedBatch{
Batch: s.db.NewBatch(),
@ -806,14 +846,27 @@ func (s *Syncer) loadSyncStatus() {
s.accountBytes += common.StorageSize(len(key) + len(value))
},
}
options := trie.NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, blob, s.scheme)
})
if s.scheme == rawdb.PathScheme {
// Configure the dangling node cleaner and also filter out boundary nodes
// only in the context of the path scheme. Deletion is forbidden in the
// hash scheme, as it can disrupt state completeness.
options = options.WithCleaner(func(path []byte) {
s.cleanPath(batch, common.Hash{}, path)
})
// Skip the left boundary if it's not the first range.
// Skip the right boundary if it's not the last range.
options = options.WithSkipBoundary(next != common.Hash{}, last != common.MaxHash, boundaryAccountNodesGauge)
}
s.tasks = append(s.tasks, &accountTask{
Next: next,
Last: last,
SubTasks: make(map[common.Hash][]*storageTask),
genBatch: batch,
genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}),
genTrie: trie.NewStackTrie(options),
})
log.Debug("Created account sync task", "from", next, "last", last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
@ -1877,7 +1930,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
return
}
// Some accounts are incomplete, leave as is for the storage and contract
// task assigners to pick up and fill.
// task assigners to pick up and fill
}
// processBytecodeResponse integrates an already validated bytecode response
@ -1965,6 +2018,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
res.mainTask.needState[j] = false
res.mainTask.pend--
smallStorageGauge.Inc(1)
}
// If the last contract was chunked, mark it as needing healing
// to avoid writing it out to disk prematurely.
@ -2000,7 +2054,11 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
}
r := newHashRange(lastKey, chunks)
if chunks == 1 {
smallStorageGauge.Inc(1)
} else {
largeStorageGauge.Inc(1)
}
// Our first task is the one that was just filled by this response.
batch := ethdb.HookedBatch{
Batch: s.db.NewBatch(),
@ -2008,14 +2066,25 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
owner := account // local assignment for stacktrie writer closure
options := trie.NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, blob, s.scheme)
})
if s.scheme == rawdb.PathScheme {
options = options.WithCleaner(func(path []byte) {
s.cleanPath(batch, owner, path)
})
// Keep the left boundary as it's the first range.
// Skip the right boundary if it's not the last range.
options = options.WithSkipBoundary(false, r.End() != common.MaxHash, boundaryStorageNodesGauge)
}
tasks = append(tasks, &storageTask{
Next: common.Hash{},
Last: r.End(),
root: acc.Root,
genBatch: batch,
genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}, account),
genTrie: trie.NewStackTrie(options),
})
for r.Next() {
batch := ethdb.HookedBatch{
@ -2024,14 +2093,27 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
options := trie.NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, blob, s.scheme)
})
if s.scheme == rawdb.PathScheme {
// Configure the dangling node cleaner and also filter out boundary nodes
// only in the context of the path scheme. Deletion is forbidden in the
// hash scheme, as it can disrupt state completeness.
options = options.WithCleaner(func(path []byte) {
s.cleanPath(batch, owner, path)
})
// Skip the left boundary as it's not the first range
// Skip the right boundary if it's not the last range.
options = options.WithSkipBoundary(true, r.End() != common.MaxHash, boundaryStorageNodesGauge)
}
tasks = append(tasks, &storageTask{
Next: r.Start(),
Last: r.End(),
root: acc.Root,
genBatch: batch,
genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}, account),
genTrie: trie.NewStackTrie(options),
})
}
for _, task := range tasks {
@ -2076,9 +2158,23 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
slots += len(res.hashes[i])
if i < len(res.hashes)-1 || res.subTask == nil {
tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}, account)
// no need to make local reassignment of account: this closure does not outlive the loop
options := trie.NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(batch, account, path, hash, blob, s.scheme)
})
if s.scheme == rawdb.PathScheme {
// Configure the dangling node cleaner only in the context of the
// path scheme. Deletion is forbidden in the hash scheme, as it can
// disrupt state completeness.
//
// Notably, boundary nodes can be also kept because the whole storage
// trie is complete.
options = options.WithCleaner(func(path []byte) {
s.cleanPath(batch, account, path)
})
}
tr := trie.NewStackTrie(options)
for j := 0; j < len(res.hashes[i]); j++ {
tr.Update(res.hashes[i][j][:], res.slots[i][j])
}
@ -2100,18 +2196,25 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
// Large contracts could have generated new trie nodes, flush them to disk
if res.subTask != nil {
if res.subTask.done {
if root, err := res.subTask.genTrie.Commit(); err != nil {
log.Error("Failed to commit stack slots", "err", err)
} else if root == res.subTask.root {
// If the chunk's root is an overflown but full delivery, clear the heal request
root := res.subTask.genTrie.Commit()
if err := res.subTask.genBatch.Write(); err != nil {
log.Error("Failed to persist stack slots", "err", err)
}
res.subTask.genBatch.Reset()
// If the chunk's root is an overflown but full delivery,
// clear the heal request.
accountHash := res.accounts[len(res.accounts)-1]
if root == res.subTask.root && rawdb.HasStorageTrieNode(s.db, accountHash, nil, root) {
for i, account := range res.mainTask.res.hashes {
if account == res.accounts[len(res.accounts)-1] {
if account == accountHash {
res.mainTask.needHeal[i] = false
skipStorageHealingGauge.Inc(1)
}
}
}
}
if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || res.subTask.done {
if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize {
if err := res.subTask.genBatch.Write(); err != nil {
log.Error("Failed to persist stack slots", "err", err)
}
@ -2318,9 +2421,7 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
// flush after finalizing task.done. It's fine even if we crash and lose this
// write as it will only cause more data to be downloaded during heal.
if task.done {
if _, err := task.genTrie.Commit(); err != nil {
log.Error("Failed to commit stack account", "err", err)
}
task.genTrie.Commit()
}
if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done {
if err := task.genBatch.Write(); err != nil {
@ -2625,7 +2726,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
// the requested data. For storage range queries that means the state being
// retrieved was either already pruned remotely, or the peer is not yet
// synced to our head.
if len(hashes) == 0 {
if len(hashes) == 0 && len(proof) == 0 {
logger.Debug("Peer rejected storage request")
s.statelessPeers[peer.ID()] = struct{}{}
s.lock.Unlock()
@ -2637,6 +2738,13 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
// Reconstruct the partial tries from the response and verify them
var cont bool
// If a proof was attached while the response is empty, it indicates that the
// requested range specified with 'origin' is empty. Construct an empty state
// response locally to finalize the range.
if len(hashes) == 0 && len(proof) > 0 {
hashes = append(hashes, []common.Hash{})
slots = append(slots, [][]byte{})
}
for i := 0; i < len(hashes); i++ {
// Convert the keys and proofs into an internal format
keys := make([][]byte, len(hashes[i]))

@ -22,6 +22,7 @@ import (
"encoding/binary"
"fmt"
"math/big"
mrand "math/rand"
"sync"
"testing"
"time"
@ -35,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/testutil"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
@ -254,7 +256,7 @@ func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, orig
func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
var size uint64
if limit == (common.Hash{}) {
limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
limit = common.MaxHash
}
for _, entry := range t.accountValues {
if size > cap {
@ -319,7 +321,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
if len(origin) > 0 {
originHash = common.BytesToHash(origin)
}
var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
var limitHash = common.MaxHash
if len(limit) > 0 {
limitHash = common.BytesToHash(limit)
}
@ -762,7 +764,7 @@ func testSyncWithStorage(t *testing.T, scheme string) {
})
}
)
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@ -772,7 +774,7 @@ func testSyncWithStorage(t *testing.T, scheme string) {
source.storageValues = storageElems
return source
}
syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
syncer := setupSyncer(scheme, mkSource("sourceA"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
@ -799,7 +801,7 @@ func testMultiSyncManyUseless(t *testing.T, scheme string) {
})
}
)
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@ -821,7 +823,7 @@ func testMultiSyncManyUseless(t *testing.T, scheme string) {
}
syncer := setupSyncer(
nodeScheme,
scheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@ -853,7 +855,7 @@ func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
})
}
)
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@ -875,7 +877,7 @@ func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
}
syncer := setupSyncer(
nodeScheme,
scheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@ -912,7 +914,7 @@ func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
})
}
)
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@ -934,7 +936,7 @@ func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
}
syncer := setupSyncer(
nodeScheme,
scheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@ -1215,7 +1217,7 @@ func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
})
}
)
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@ -1226,7 +1228,7 @@ func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
return source
}
syncer := setupSyncer(
nodeScheme,
scheme,
mkSource("peer-a"),
mkSource("peer-b"),
)
@ -1257,7 +1259,7 @@ func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
})
}
)
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false, false)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
@ -1273,7 +1275,7 @@ func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
}
syncer := setupSyncer(
nodeScheme,
scheme,
mkSource("nice-a", false),
mkSource("slow", true),
)
@ -1304,7 +1306,7 @@ func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
})
}
)
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@ -1317,7 +1319,7 @@ func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
}
syncer := setupSyncer(
nodeScheme,
scheme,
mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler),
@ -1348,7 +1350,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
})
}
)
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@ -1360,7 +1362,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
return source
}
syncer := setupSyncer(
nodeScheme,
scheme,
mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler),
@ -1413,6 +1415,45 @@ func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithUnevenStorage tests sync where the storage trie is not even
// and with a few empty ranges.
func TestSyncWithUnevenStorage(t *testing.T) {
t.Parallel()
testSyncWithUnevenStorage(t, rawdb.HashScheme)
testSyncWithUnevenStorage(t, rawdb.PathScheme)
}
func testSyncWithUnevenStorage(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
term = func() {
once.Do(func() {
close(cancel)
})
}
)
accountTrie, accounts, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 256, false, false, true)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
source.accountTrie = accountTrie.Copy()
source.accountValues = accounts
source.setStorageTries(storageTries)
source.storageValues = storageElems
source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode
}
return source
}
syncer := setupSyncer(scheme, mkSource("source"))
if err := syncer.Sync(accountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
verifyTrie(scheme, syncer.db, accountTrie.Hash(), t)
}
type kv struct {
k, v []byte
}
@ -1511,7 +1552,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
last = common.MaxHash
}
boundaries = append(boundaries, last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
@ -1608,7 +1649,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots
}
// makeAccountTrieWithStorage spits out a trie, along with the leafs
func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
@ -1633,6 +1674,8 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
)
if boundary {
stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
} else if uneven {
stRoot, stNodes, stEntries = makeUnevenStorageTrie(common.BytesToHash(key), slots, db)
} else {
stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
}
@ -1675,7 +1718,7 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
}
storageTries[common.BytesToHash(key)] = trie
}
return db.Scheme(), accTrie, entries, storageTries, storageEntries
return accTrie, entries, storageTries, storageEntries
}
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
@ -1721,7 +1764,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
last = common.MaxHash
}
boundaries = append(boundaries, last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
@ -1752,6 +1795,38 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
return root, nodes, entries
}
// makeUnevenStorageTrie constructs a storage tries will states distributed in
// different range unevenly.
func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
var (
entries []*kv
tr, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
chosen = make(map[byte]struct{})
)
for i := 0; i < 3; i++ {
var n int
for {
n = mrand.Intn(15) // the last range is set empty deliberately
if _, ok := chosen[byte(n)]; ok {
continue
}
chosen[byte(n)] = struct{}{}
break
}
for j := 0; j < slots/3; j++ {
key := append([]byte{byte(n)}, testutil.RandBytes(31)...)
val, _ := rlp.EncodeToBytes(testutil.RandBytes(32))
elem := &kv{key, val}
tr.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
}
}
slices.SortFunc(entries, (*kv).cmp)
root, nodes, _ := tr.Commit(false)
return root, nodes, entries
}
func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))

@ -110,6 +110,16 @@ func (ec *Client) PeerCount(ctx context.Context) (uint64, error) {
return uint64(result), err
}
// BlockReceipts returns the receipts of a given block number or hash.
func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) {
var r []*types.Receipt
err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash.String())
if err == nil && r == nil {
return nil, ethereum.NotFound
}
return r, err
}
type rpcBlock struct {
Hash common.Hash `json:"hash"`
Transactions []rpcTransaction `json:"transactions"`

@ -14,8 +14,6 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (arm64 || amd64) && !openbsd
// Package pebble implements the key-value database layer based on pebble.
package pebble
@ -48,6 +46,9 @@ const (
// metricsGatheringInterval specifies the interval to retrieve pebble database
// compaction, io and pause stats to report to the user.
metricsGatheringInterval = 3 * time.Second
// numLevels is the level number of pebble sst files
numLevels = 7
)
// Database is a persistent key-value store based on the pebble storage engine.
@ -141,8 +142,15 @@ func New(file string, cache int, handles int, namespace string, readonly bool) (
// The max memtable size is limited by the uint32 offsets stored in
// internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry.
// Taken from https://github.com/cockroachdb/pebble/blob/master/open.go#L38
maxMemTableSize := 4<<30 - 1 // Capped by 4 GB
//
// - MaxUint32 on 64-bit platforms;
// - MaxInt on 32-bit platforms.
//
// It is used when slices are limited to Uint32 on 64-bit platforms (the
// length limit for slices is naturally MaxInt on 32-bit platforms).
//
// Taken from https://github.com/cockroachdb/pebble/blob/master/internal/constants/constants.go
maxMemTableSize := (1<<31)<<(^uint(0)>>63) - 1
// Two memory tables is configured which is identical to leveldb,
// including a frozen memory table and another live one.
@ -191,6 +199,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool) (
WriteStallBegin: db.onWriteStallBegin,
WriteStallEnd: db.onWriteStallEnd,
},
Levels: make([]pebble.LevelOptions, numLevels),
Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble
}

@ -14,8 +14,6 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (arm64 || amd64) && !openbsd
package pebble
import (

@ -964,6 +964,34 @@ func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address
return res[:], state.Error()
}
// GetBlockReceipts returns the block receipts for the given block hash or number or tag.
func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) {
block, err := s.b.BlockByNumberOrHash(ctx, blockNrOrHash)
if block == nil || err != nil {
// When the block doesn't exist, the RPC method should return JSON null
// as per specification.
return nil, nil
}
receipts, err := s.b.GetReceipts(ctx, block.Hash())
if err != nil {
return nil, err
}
txs := block.Transactions()
if len(txs) != len(receipts) {
return nil, fmt.Errorf("receipts length mismatch: %d vs %d", len(txs), len(receipts))
}
// Derive the sender.
signer := types.MakeSigner(s.b.ChainConfig(), block.Number(), block.Time())
result := make([]map[string]interface{}, len(receipts))
for i, receipt := range receipts {
result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i)
}
return result, nil
}
// OverrideAccount indicates the overriding fields of account during the execution
// of a message call.
// Note, state and stateDiff can't be specified at the same time. If state is
@ -2123,13 +2151,18 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common.
// Derive the sender.
signer := types.MakeSigner(s.b.ChainConfig(), header.Number, header.Time)
return marshalReceipt(receipt, blockHash, blockNumber, signer, tx, int(index)), nil
}
// marshalReceipt marshals a transaction receipt into a JSON object.
func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} {
from, _ := types.Sender(signer, tx)
fields := map[string]interface{}{
"blockHash": blockHash,
"blockNumber": hexutil.Uint64(blockNumber),
"transactionHash": hash,
"transactionIndex": hexutil.Uint64(index),
"transactionHash": tx.Hash(),
"transactionIndex": hexutil.Uint64(txIndex),
"from": from,
"to": tx.To(),
"gasUsed": hexutil.Uint64(receipt.GasUsed),
@ -2155,7 +2188,7 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common.
if receipt.ContractAddress != (common.Address{}) {
fields["contractAddress"] = receipt.ContractAddress
}
return fields, nil
return fields
}
// sign is a helper function that signs a transaction with the private key of the given address.

@ -1781,9 +1781,7 @@ func TestRPCGetBlockOrHeader(t *testing.T) {
}
*/
func TestRPCGetTransactionReceipt(t *testing.T) {
t.Parallel()
func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Hash) {
// Initialize test accounts
var (
acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
@ -1809,7 +1807,6 @@ func TestRPCGetTransactionReceipt(t *testing.T) {
contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")},
},
}
genBlocks = 5
signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID)
txHashes = make([]common.Hash, genBlocks)
gasPrice = big.NewInt(3e9) // 3Gwei
@ -1854,17 +1851,17 @@ func TestRPCGetTransactionReceipt(t *testing.T) {
txHashes[i] = tx.Hash()
}
})
api := NewTransactionAPI(backend, new(AddrLocker))
blockHashes := make([]common.Hash, genBlocks+1)
ctx := context.Background()
for i := 0; i <= genBlocks; i++ {
header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
if err != nil {
t.Errorf("failed to get block: %d err: %v", i, err)
}
blockHashes[i] = header.Hash()
return backend, txHashes
}
func TestRPCGetTransactionReceipt(t *testing.T) {
t.Parallel()
var (
backend, txHashes = setupReceiptBackend(t, 5)
api = NewTransactionAPI(backend, new(AddrLocker))
)
var testSuite = []struct {
txHash common.Hash
want string
@ -2016,3 +2013,102 @@ func TestRPCGetTransactionReceipt(t *testing.T) {
require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have)
}
}
func TestRPCGetBlockReceipts(t *testing.T) {
t.Parallel()
var (
genBlocks = 5
backend, _ = setupReceiptBackend(t, genBlocks)
api = NewBlockChainAPI(backend)
)
blockHashes := make([]common.Hash, genBlocks+1)
ctx := context.Background()
for i := 0; i <= genBlocks; i++ {
header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
if err != nil {
t.Errorf("failed to get block: %d err: %v", i, err)
}
blockHashes[i] = header.Hash()
}
var testSuite = []struct {
test rpc.BlockNumberOrHash
want string
}{
// 0. block without any txs(hash)
{
test: rpc.BlockNumberOrHashWithHash(blockHashes[0], false),
want: `[]`,
},
// 1. block without any txs(number)
{
test: rpc.BlockNumberOrHashWithNumber(0),
want: `[]`,
},
// 2. earliest tag
{
test: rpc.BlockNumberOrHashWithNumber(rpc.EarliestBlockNumber),
want: `[]`,
},
// 3. latest tag
{
test: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber),
want: `[{"blockHash":"0xde2f10e5c44cb6158aa7fbc70e98da70f51ee72f29c7a28fc30bf37e992655f7","blockNumber":"0x5","contractAddress":"0xfdaa97661a584d977b4d3abb5370766ff5b86a18","cumulativeGasUsed":"0xe01a","effectiveGasPrice":"0xb2d05e00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0xe01a","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":null,"transactionHash":"0x1b420e5f43f9620364d175d798acbd61d6f76ed8ea8ed7e0f93e4332ab8399b2","transactionIndex":"0x0","type":"0x1"}]`,
},
// 4. block with legacy transfer tx(hash)
{
test: rpc.BlockNumberOrHashWithHash(blockHashes[1], false),
want: `[{"blockHash":"0xcf5e82a62028debbeecba9a6a7cbeaed67b431800a4250a60943101a611d179a","blockNumber":"0x1","contractAddress":null,"cumulativeGasUsed":"0x5208","effectiveGasPrice":"0xb2d05e00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x5208","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":"0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e","transactionHash":"0xf875edfe4579e2a5e1ad45c4d802c1e5abbca19561398e9c58c41dabd86a3aa6","transactionIndex":"0x0","type":"0x0"}]`,
},
// 5. block with contract create tx(number)
{
test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(2)),
want: `[{"blockHash":"0x739ed5e516e6b2b0fa16f22a335a371a1fac616bc394b7454fdf07a0fd10db30","blockNumber":"0x2","contractAddress":"0xae9bea628c4ce503dcfd7e305cab4e29e7476592","cumulativeGasUsed":"0xcf4e","effectiveGasPrice":"0xb2d05e00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0xcf4e","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":null,"transactionHash":"0xd7a2d56946b13872c0064d0af803fa7b2a7f6be74023cf9ce6337c0dc5b06813","transactionIndex":"0x0","type":"0x0"}]`,
},
// 6. block with legacy contract call tx(hash)
{
test: rpc.BlockNumberOrHashWithHash(blockHashes[3], false),
want: `[{"blockHash":"0x07545649b5df1cd84de57265fa5acdb473a1a033bf51c43d61c6183b13487f19","blockNumber":"0x3","contractAddress":null,"cumulativeGasUsed":"0x5e28","effectiveGasPrice":"0xb2d05e00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x5e28","logs":[{"address":"0x0000000000000000000000000000000000031ec7","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7","0x0000000000000000000000000000000000000000000000000000000000000003"],"data":"0x000000000000000000000000000000000000000000000000000000000000000d","blockNumber":"0x3","transactionHash":"0x2c660ba194f0e2de5bbb4e2f38a15fe9f263dd5e5c524d45be4834755b2c2a8c","transactionIndex":"0x0","blockHash":"0x07545649b5df1cd84de57265fa5acdb473a1a033bf51c43d61c6183b13487f19","logIndex":"0x0","removed":false}],"logsBloom":"0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000","status":"0x1","to":"0x0000000000000000000000000000000000031ec7","transactionHash":"0x2c660ba194f0e2de5bbb4e2f38a15fe9f263dd5e5c524d45be4834755b2c2a8c","transactionIndex":"0x0","type":"0x0"}]`,
},
// 7. block with dynamic fee tx(number)
{
test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(4)),
want: `[{"blockHash":"0x2a78cbbc361402d352baf99afcd8d57b34df182ed44819ca6bf0a1ceedc94a1e","blockNumber":"0x4","contractAddress":null,"cumulativeGasUsed":"0x538d","effectiveGasPrice":"0xb2d05e00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x538d","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x0","to":"0x0000000000000000000000000000000000031ec7","transactionHash":"0x395e5326a196a53e961b8f406b706049288d8f92e467f95677ced981fa3a40ce","transactionIndex":"0x0","type":"0x2"}]`,
},
// 8. block is empty
{
test: rpc.BlockNumberOrHashWithHash(common.Hash{}, false),
want: `null`,
},
// 9. block is not found
{
test: rpc.BlockNumberOrHashWithHash(common.HexToHash("deadbeef"), false),
want: `null`,
},
// 10. block is not found
{
test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(genBlocks + 1)),
want: `null`,
},
}
for i, tt := range testSuite {
var (
result interface{}
err error
)
result, err = api.GetBlockReceipts(context.Background(), tt.test)
if err != nil {
t.Errorf("test %d: want no error, have %v", i, err)
continue
}
data, err := json.Marshal(result)
if err != nil {
t.Errorf("test %d: json marshal error", i)
continue
}
want, have := tt.want, string(data)
require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have)
}
}

@ -611,6 +611,11 @@ web3._extend({
params: 4,
inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputDefaultBlockNumberFormatter, null, null],
}),
new web3._extend.Method({
name: 'getBlockReceipts',
call: 'eth_getBlockReceipts',
params: 1,
}),
],
properties: [
new web3._extend.Property({

@ -17,15 +17,18 @@ type TimeTicker struct {
// NewTimeTicker creates a TimeTicker that notifies based on rotateHours parameter.
// if rotateHours is 1 and current time is 11:32 it means that the ticker will tick at 12:00
// if rotateHours is 5 and current time is 09:12 means that the ticker will tick at 11:00
func NewTimeTicker(rotateHours int) *TimeTicker {
// if rotateHours is 2 and current time is 09:12 means that the ticker will tick at 11:00
// specially, if rotateHours is 0, then no rotation
func NewTimeTicker(rotateHours uint) *TimeTicker {
ch := make(chan time.Time)
tt := TimeTicker{
stop: make(chan struct{}),
C: ch,
}
if rotateHours > 0 {
tt.startTicker(ch, rotateHours)
}
return &tt
}
@ -34,7 +37,7 @@ func (tt *TimeTicker) Stop() {
tt.stop <- struct{}{}
}
func (tt *TimeTicker) startTicker(ch chan time.Time, rotateHours int) {
func (tt *TimeTicker) startTicker(ch chan time.Time, rotateHours uint) {
go func() {
nextRotationHour := getNextRotationHour(time.Now(), rotateHours)
ticker := time.NewTicker(time.Second)
@ -53,7 +56,7 @@ func (tt *TimeTicker) startTicker(ch chan time.Time, rotateHours int) {
}()
}
func getNextRotationHour(now time.Time, delta int) int {
func getNextRotationHour(now time.Time, delta uint) int {
return now.Add(time.Hour * time.Duration(delta)).Hour()
}
@ -68,7 +71,7 @@ type AsyncFileWriter struct {
timeTicker *TimeTicker
}
func NewAsyncFileWriter(filePath string, maxBytesSize int64, rotateHours int) *AsyncFileWriter {
func NewAsyncFileWriter(filePath string, maxBytesSize int64, rotateHours uint) *AsyncFileWriter {
absFilePath, err := filepath.Abs(filePath)
if err != nil {
panic(fmt.Sprintf("get file path of logger error. filePath=%s, err=%s", filePath, err))

@ -29,7 +29,7 @@ func TestWriterHourly(t *testing.T) {
func TestGetNextRotationHour(t *testing.T) {
tcs := []struct {
now time.Time
delta int
delta uint
expectedHour int
}{
{
@ -54,7 +54,7 @@ func TestGetNextRotationHour(t *testing.T) {
},
}
test := func(now time.Time, delta, expectedHour int) func(*testing.T) {
test := func(now time.Time, delta uint, expectedHour int) func(*testing.T) {
return func(t *testing.T) {
got := getNextRotationHour(now, delta)
if got != expectedHour {

@ -75,7 +75,7 @@ func FileHandler(path string, fmtr Format) (Handler, error) {
// RotatingFileHandler returns a handler which writes log records to file chunks
// at the given path. When a file's size reaches the limit, the handler creates
// a new file named after the timestamp of the first log record it will contain.
func RotatingFileHandler(filePath string, limit uint, formatter Format, rotateHours int) (Handler, error) {
func RotatingFileHandler(filePath string, limit uint, formatter Format, rotateHours uint) (Handler, error) {
if _, err := os.Stat(path.Dir(filePath)); os.IsNotExist(err) {
err := os.MkdirAll(path.Dir(filePath), 0755)
if err != nil {

@ -290,7 +290,7 @@ func (c Ctx) toArray() []interface{} {
return arr
}
func NewFileLvlHandler(logPath string, maxBytesSize uint, level string, rotateHours int) Handler {
func NewFileLvlHandler(logPath string, maxBytesSize uint, level string, rotateHours uint) Handler {
rfh, err := RotatingFileHandler(logPath, maxBytesSize, LogfmtFormat(), rotateHours)
if err != nil {
panic(err)

@ -512,7 +512,7 @@ type LogConfig struct {
FilePath *string `toml:",omitempty"`
MaxBytesSize *uint `toml:",omitempty"`
Level *string `toml:",omitempty"`
RotateHours int `toml:",omitempty"`
RotateHours *uint `toml:",omitempty"`
// TermTimeFormat is the time format used for console logging.
TermTimeFormat *string `toml:",omitempty"`

@ -109,16 +109,16 @@ func New(conf *Config) (*Node, error) {
logFilePath = path.Join(*conf.LogConfig.FileRoot, *conf.LogConfig.FilePath)
}
if conf.LogConfig.RotateHours > 24 {
return nil, errors.New("Config.LogConfig.RotateHours cannot be greater than 24")
rotateHours := uint(1) // To maintain backwards compatibility, if RotateHours is not set, then it defaults to 1
if conf.LogConfig.RotateHours != nil {
if *conf.LogConfig.RotateHours > 23 {
return nil, errors.New("Config.LogConfig.RotateHours cannot be greater than 23")
}
// To maintain backwards compatibility, if RotateHours is not set or set to a negative value, then it defaults to 1
if conf.LogConfig.RotateHours < 1 {
conf.LogConfig.RotateHours = 1
rotateHours = *conf.LogConfig.RotateHours
}
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, *conf.LogConfig.Level, conf.LogConfig.RotateHours))
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, *conf.LogConfig.Level, rotateHours))
}
}
if conf.Logger == nil {

@ -427,6 +427,21 @@ var (
TestRules = TestChainConfig.Rules(new(big.Int), false, 0)
)
func GetBuiltInChainConfig(ghash common.Hash) *ChainConfig {
switch ghash {
case MainnetGenesisHash:
return MainnetChainConfig
case BSCGenesisHash:
return BSCChainConfig
case ChapelGenesisHash:
return ChapelChainConfig
case RialtoGenesisHash:
return RialtoChainConfig
default:
return nil
}
}
// NetworkNames are user friendly names to use in the chain spec banner.
var NetworkNames = map[string]string{
MainnetChainConfig.ChainID.String(): "mainnet",
@ -811,6 +826,15 @@ func (c *ChainConfig) IsShanghai(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.ShanghaiTime, time)
}
// IsOnShanghai returns whether currentBlockTime is either equal to the shanghai fork time or greater firstly.
func (c *ChainConfig) IsOnShanghai(currentBlockNumber *big.Int, lastBlockTime uint64, currentBlockTime uint64) bool {
lastBlockNumber := new(big.Int)
if currentBlockNumber.Cmp(big.NewInt(1)) >= 0 {
lastBlockNumber.Sub(currentBlockNumber, big.NewInt(1))
}
return !c.IsShanghai(lastBlockNumber, lastBlockTime) && c.IsShanghai(currentBlockNumber, currentBlockTime)
}
// IsKepler returns whether time is either equal to the kepler fork time or greater.
func (c *ChainConfig) IsKepler(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.KeplerTime, time)
@ -903,7 +927,6 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
{name: "platoBlock", block: c.PlatoBlock},
{name: "hertzBlock", block: c.HertzBlock},
{name: "hertzfixBlock", block: c.HertzfixBlock},
{name: "shanghaiTime", timestamp: c.ShanghaiTime},
{name: "keplerTime", timestamp: c.KeplerTime},
{name: "feynmanTime", timestamp: c.FeynmanTime},
{name: "cancunTime", timestamp: c.CancunTime, optional: true},

@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 3 // Minor version component of the current release
VersionPatch = 5 // Patch version component of the current release
VersionPatch = 7 // Patch version component of the current release
VersionMeta = "" // Version metadata to append to the version string
)

@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
@ -221,7 +220,7 @@ func (bnh *BlockNumberOrHash) Number() (BlockNumber, bool) {
func (bnh *BlockNumberOrHash) String() string {
if bnh.BlockNumber != nil {
return strconv.Itoa(int(*bnh.BlockNumber))
return bnh.BlockNumber.String()
}
if bnh.BlockHash != nil {
return bnh.BlockHash.String()

@ -153,3 +153,24 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) {
})
}
}
func TestBlockNumberOrHash_StringAndUnmarshal(t *testing.T) {
tests := []BlockNumberOrHash{
BlockNumberOrHashWithNumber(math.MaxInt64),
BlockNumberOrHashWithNumber(PendingBlockNumber),
BlockNumberOrHashWithNumber(LatestBlockNumber),
BlockNumberOrHashWithNumber(EarliestBlockNumber),
BlockNumberOrHashWithNumber(32),
BlockNumberOrHashWithHash(common.Hash{0xaa}, false),
}
for _, want := range tests {
marshalled, _ := json.Marshal(want.String())
var have BlockNumberOrHash
if err := json.Unmarshal(marshalled, &have); err != nil {
t.Fatalf("cannot unmarshal (%v): %v", string(marshalled), err)
}
if !reflect.DeepEqual(want, have) {
t.Fatalf("wrong result: have %v, want %v", have, want)
}
}
}

@ -140,9 +140,11 @@ func (f *fuzzer) fuzz() int {
trieA = trie.NewEmpty(dbA)
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB), nil)
trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme())
options = trie.NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme())
})
trieB = trie.NewStackTrie(options)
vals []kv
useful bool
maxElements = 10000
@ -205,21 +207,19 @@ func (f *fuzzer) fuzz() int {
// Ensure all the nodes are persisted correctly
var (
nodeset = make(map[string][]byte) // path -> blob
trieC = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
optionsC = trie.NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
if crypto.Keccak256Hash(blob) != hash {
panic("invalid node blob")
}
if owner != (common.Hash{}) {
panic("invalid node owner")
}
nodeset[string(path)] = common.CopyBytes(blob)
})
trieC = trie.NewStackTrie(optionsC)
checked int
)
for _, kv := range vals {
trieC.MustUpdate(kv.k, kv.v)
}
rootC, _ := trieC.Commit()
rootC := trieC.Commit()
if rootA != rootC {
panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC))
}

@ -51,9 +51,8 @@ func hexToCompact(hex []byte) []byte {
return buf
}
// hexToCompactInPlace places the compact key in input buffer, returning the length
// needed for the representation
func hexToCompactInPlace(hex []byte) int {
// hexToCompactInPlace places the compact key in input buffer, returning the compacted key.
func hexToCompactInPlace(hex []byte) []byte {
var (
hexLen = len(hex) // length of the hex input
firstByte = byte(0)
@ -77,7 +76,7 @@ func hexToCompactInPlace(hex []byte) int {
hex[bi] = hex[ni]<<4 | hex[ni+1]
}
hex[0] = firstByte
return binLen
return hex[:binLen]
}
func compactToHex(compact []byte) []byte {

@ -86,8 +86,7 @@ func TestHexToCompactInPlace(t *testing.T) {
} {
hexBytes, _ := hex.DecodeString(key)
exp := hexToCompact(hexBytes)
sz := hexToCompactInPlace(hexBytes)
got := hexBytes[:sz]
got := hexToCompactInPlace(hexBytes)
if !bytes.Equal(exp, got) {
t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, key, got, exp)
}
@ -102,8 +101,7 @@ func TestHexToCompactInPlaceRandom(t *testing.T) {
hexBytes := keybytesToHex(key)
hexOrig := []byte(string(hexBytes))
exp := hexToCompact(hexBytes)
sz := hexToCompactInPlace(hexBytes)
got := hexBytes[:sz]
got := hexToCompactInPlace(hexBytes)
if !bytes.Equal(exp, got) {
t.Fatalf("encoding err \ncpt %x\nhex %x\ngot %x\nexp %x\n",
@ -119,6 +117,13 @@ func BenchmarkHexToCompact(b *testing.B) {
}
}
func BenchmarkHexToCompactInPlace(b *testing.B) {
testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
for i := 0; i < b.N; i++ {
hexToCompactInPlace(testBytes)
}
}
func BenchmarkCompactToHex(b *testing.B) {
testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
for i := 0; i < b.N; i++ {

@ -250,7 +250,7 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
// Special case, two edge proofs for two edge key.
proof := memorydb.New()
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
last := common.MaxHash.Bytes()
if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
@ -451,7 +451,7 @@ func TestAllElementsProof(t *testing.T) {
// Even with non-existent edge proofs, it should still work.
proof = memorydb.New()
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
last := common.MaxHash.Bytes()
if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
@ -517,7 +517,7 @@ func TestReverseSingleSideRangeProof(t *testing.T) {
if err := trie.Prove(entries[pos].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
last := common.MaxHash
if err := trie.Prove(last.Bytes(), proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
@ -728,7 +728,7 @@ func TestHasRightElement(t *testing.T) {
}
}
if c.end == -1 {
lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries)
lastKey, end = common.MaxHash.Bytes(), len(entries)
if err := trie.Prove(lastKey, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}

@ -17,183 +17,146 @@
package trie
import (
"bufio"
"bytes"
"encoding/gob"
"errors"
"io"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
var ErrCommitDisabled = errors.New("no database for committing")
var (
stPool = sync.Pool{New: func() any { return new(stNode) }}
_ = types.TrieHasher((*StackTrie)(nil))
)
var stPool = sync.Pool{
New: func() interface{} {
return NewStackTrie(nil)
},
// StackTrieOptions contains the configured options for manipulating the stackTrie.
type StackTrieOptions struct {
Writer func(path []byte, hash common.Hash, blob []byte) // The function to commit the dirty nodes
Cleaner func(path []byte) // The function to clean up dangling nodes
SkipLeftBoundary bool // Flag whether the nodes on the left boundary are skipped for committing
SkipRightBoundary bool // Flag whether the nodes on the right boundary are skipped for committing
boundaryGauge metrics.Gauge // Gauge to track how many boundary nodes are met
}
// NodeWriteFunc is used to provide all information of a dirty node for committing
// so that callers can flush nodes into database with desired scheme.
type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte)
// NewStackTrieOptions initializes an empty options for stackTrie.
func NewStackTrieOptions() *StackTrieOptions { return &StackTrieOptions{} }
func stackTrieFromPool(writeFn NodeWriteFunc, owner common.Hash) *StackTrie {
st := stPool.Get().(*StackTrie)
st.owner = owner
st.writeFn = writeFn
return st
// WithWriter configures trie node writer within the options.
func (o *StackTrieOptions) WithWriter(writer func(path []byte, hash common.Hash, blob []byte)) *StackTrieOptions {
o.Writer = writer
return o
}
func returnToPool(st *StackTrie) {
st.Reset()
stPool.Put(st)
// WithCleaner configures the cleaner in the option for removing dangling nodes.
func (o *StackTrieOptions) WithCleaner(cleaner func(path []byte)) *StackTrieOptions {
o.Cleaner = cleaner
return o
}
// WithSkipBoundary configures whether the left and right boundary nodes are
// filtered for committing, along with a gauge metrics to track how many
// boundary nodes are met.
func (o *StackTrieOptions) WithSkipBoundary(skipLeft, skipRight bool, gauge metrics.Gauge) *StackTrieOptions {
o.SkipLeftBoundary = skipLeft
o.SkipRightBoundary = skipRight
o.boundaryGauge = gauge
return o
}
// StackTrie is a trie implementation that expects keys to be inserted
// in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses.
type StackTrie struct {
owner common.Hash // the owner of the trie
nodeType uint8 // node type (as in branch, ext, leaf)
val []byte // value contained by this node if it's a leaf
key []byte // key chunk covered by this (leaf|ext) node
children [16]*StackTrie // list of children (for branch and exts)
writeFn NodeWriteFunc // function for committing nodes, can be nil
options *StackTrieOptions
root *stNode
h *hasher
first []byte // The (hex-encoded without terminator) key of first inserted entry, tracked as left boundary.
last []byte // The (hex-encoded without terminator) key of last inserted entry, tracked as right boundary.
}
// NewStackTrie allocates and initializes an empty trie.
func NewStackTrie(writeFn NodeWriteFunc) *StackTrie {
func NewStackTrie(options *StackTrieOptions) *StackTrie {
if options == nil {
options = NewStackTrieOptions()
}
return &StackTrie{
nodeType: emptyNode,
writeFn: writeFn,
options: options,
root: stPool.Get().(*stNode),
h: newHasher(false),
}
}
// NewStackTrieWithOwner allocates and initializes an empty trie, but with
// the additional owner field.
func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie {
return &StackTrie{
owner: owner,
nodeType: emptyNode,
writeFn: writeFn,
}
// Update inserts a (key, value) pair into the stack trie.
func (t *StackTrie) Update(key, value []byte) error {
k := keybytesToHex(key)
if len(value) == 0 {
panic("deletion not supported")
}
k = k[:len(k)-1] // chop the termination flag
// NewFromBinary initialises a serialized stacktrie with the given db.
func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) {
var st StackTrie
if err := st.UnmarshalBinary(data); err != nil {
return nil, err
// track the first and last inserted entries.
if t.first == nil {
t.first = append([]byte{}, k...)
}
// If a database is used, we need to recursively add it to every child
if writeFn != nil {
st.setWriter(writeFn)
}
return &st, nil
}
// MarshalBinary implements encoding.BinaryMarshaler
func (st *StackTrie) MarshalBinary() (data []byte, err error) {
var (
b bytes.Buffer
w = bufio.NewWriter(&b)
)
if err := gob.NewEncoder(w).Encode(struct {
Owner common.Hash
NodeType uint8
Val []byte
Key []byte
}{
st.owner,
st.nodeType,
st.val,
st.key,
}); err != nil {
return nil, err
}
for _, child := range st.children {
if child == nil {
w.WriteByte(0)
continue
}
w.WriteByte(1)
if childData, err := child.MarshalBinary(); err != nil {
return nil, err
if t.last == nil {
t.last = append([]byte{}, k...) // allocate key slice
} else {
w.Write(childData)
}
}
w.Flush()
return b.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (st *StackTrie) UnmarshalBinary(data []byte) error {
r := bytes.NewReader(data)
return st.unmarshalBinary(r)
}
func (st *StackTrie) unmarshalBinary(r io.Reader) error {
var dec struct {
Owner common.Hash
NodeType uint8
Val []byte
Key []byte
}
if err := gob.NewDecoder(r).Decode(&dec); err != nil {
return err
}
st.owner = dec.Owner
st.nodeType = dec.NodeType
st.val = dec.Val
st.key = dec.Key
var hasChild = make([]byte, 1)
for i := range st.children {
if _, err := r.Read(hasChild); err != nil {
return err
} else if hasChild[0] == 0 {
continue
}
var child StackTrie
if err := child.unmarshalBinary(r); err != nil {
return err
}
st.children[i] = &child
t.last = append(t.last[:0], k...) // reuse key slice
}
t.insert(t.root, k, value, nil)
return nil
}
func (st *StackTrie) setWriter(writeFn NodeWriteFunc) {
st.writeFn = writeFn
for _, child := range st.children {
if child != nil {
child.setWriter(writeFn)
}
// MustUpdate is a wrapper of Update and will omit any encountered error but
// just print out an error message.
func (t *StackTrie) MustUpdate(key, value []byte) {
if err := t.Update(key, value); err != nil {
log.Error("Unhandled trie error in StackTrie.Update", "err", err)
}
}
func newLeaf(owner common.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie {
st := stackTrieFromPool(writeFn, owner)
st.nodeType = leafNode
// Reset resets the stack trie object to empty state.
func (t *StackTrie) Reset() {
t.options = NewStackTrieOptions()
t.root = stPool.Get().(*stNode)
t.first = nil
t.last = nil
}
// stNode represents a node within a StackTrie
type stNode struct {
typ uint8 // node type (as in branch, ext, leaf)
key []byte // key chunk covered by this (leaf|ext) node
val []byte // value contained by this node if it's a leaf
children [16]*stNode // list of children (for branch and exts)
}
// newLeaf constructs a leaf node with provided node key and value. The key
// will be deep-copied in the function and safe to modify afterwards, but
// value is not.
func newLeaf(key, val []byte) *stNode {
st := stPool.Get().(*stNode)
st.typ = leafNode
st.key = append(st.key, key...)
st.val = val
return st
}
func newExt(owner common.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie {
st := stackTrieFromPool(writeFn, owner)
st.nodeType = extNode
// newExt constructs an extension node with provided node key and child. The
// key will be deep-copied in the function and safe to modify afterwards.
func newExt(key []byte, child *stNode) *stNode {
st := stPool.Get().(*stNode)
st.typ = extNode
st.key = append(st.key, key...)
st.children[0] = child
return st
}
// List all values that StackTrie#nodeType can hold
// List all values that stNode#nodeType can hold
const (
emptyNode = iota
branchNode
@ -202,59 +165,40 @@ const (
hashedNode
)
// Update inserts a (key, value) pair into the stack trie.
func (st *StackTrie) Update(key, value []byte) error {
k := keybytesToHex(key)
if len(value) == 0 {
panic("deletion not supported")
func (n *stNode) reset() *stNode {
n.key = n.key[:0]
n.val = nil
for i := range n.children {
n.children[i] = nil
}
st.insert(k[:len(k)-1], value, nil)
return nil
}
// MustUpdate is a wrapper of Update and will omit any encountered error but
// just print out an error message.
func (st *StackTrie) MustUpdate(key, value []byte) {
if err := st.Update(key, value); err != nil {
log.Error("Unhandled trie error in StackTrie.Update", "err", err)
}
}
func (st *StackTrie) Reset() {
st.owner = common.Hash{}
st.writeFn = nil
st.key = st.key[:0]
st.val = nil
for i := range st.children {
st.children[i] = nil
}
st.nodeType = emptyNode
n.typ = emptyNode
return n
}
// Helper function that, given a full key, determines the index
// at which the chunk pointed by st.keyOffset is different from
// the same chunk in the full key.
func (st *StackTrie) getDiffIndex(key []byte) int {
for idx, nibble := range st.key {
func (n *stNode) getDiffIndex(key []byte) int {
for idx, nibble := range n.key {
if nibble != key[idx] {
return idx
}
}
return len(st.key)
return len(n.key)
}
// Helper function to that inserts a (key, value) pair into
// the trie.
func (st *StackTrie) insert(key, value []byte, prefix []byte) {
switch st.nodeType {
func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
switch st.typ {
case branchNode: /* Branch */
idx := int(key[0])
// Unresolve elder siblings
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
if st.children[i].nodeType != hashedNode {
st.children[i].hash(append(prefix, byte(i)))
if st.children[i].typ != hashedNode {
t.hash(st.children[i], append(path, byte(i)))
}
break
}
@ -262,9 +206,9 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// Add new child
if st.children[idx] == nil {
st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn)
st.children[idx] = newLeaf(key[1:], value)
} else {
st.children[idx].insert(key[1:], value, append(prefix, key[0]))
t.insert(st.children[idx], key[1:], value, append(path, key[0]))
}
case extNode: /* Ext */
@ -279,46 +223,46 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
if diffidx == len(st.key) {
// Ext key and key segment are identical, recurse into
// the child node.
st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...))
t.insert(st.children[0], key[diffidx:], value, append(path, key[:diffidx]...))
return
}
// Save the original part. Depending if the break is
// at the extension's last byte or not, create an
// intermediate extension or use the extension's child
// node directly.
var n *StackTrie
var n *stNode
if diffidx < len(st.key)-1 {
// Break on the non-last byte, insert an intermediate
// extension. The path prefix of the newly-inserted
// extension should also contain the different byte.
n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn)
n.hash(append(prefix, st.key[:diffidx+1]...))
n = newExt(st.key[diffidx+1:], st.children[0])
t.hash(n, append(path, st.key[:diffidx+1]...))
} else {
// Break on the last byte, no need to insert
// an extension node: reuse the current node.
// The path prefix of the original part should
// still be same.
n = st.children[0]
n.hash(append(prefix, st.key...))
t.hash(n, append(path, st.key...))
}
var p *StackTrie
var p *stNode
if diffidx == 0 {
// the break is on the first byte, so
// the current node is converted into
// a branch node.
st.children[0] = nil
p = st
st.nodeType = branchNode
st.typ = branchNode
} else {
// the common prefix is at least one byte
// long, insert a new intermediate branch
// node.
st.children[0] = stackTrieFromPool(st.writeFn, st.owner)
st.children[0].nodeType = branchNode
st.children[0] = stPool.Get().(*stNode)
st.children[0].typ = branchNode
p = st.children[0]
}
// Create a leaf for the inserted part
o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
o := newLeaf(key[diffidx+1:], value)
// Insert both child leaves where they belong:
origIdx := st.key[diffidx]
@ -344,18 +288,18 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// Check if the split occurs at the first nibble of the
// chunk. In that case, no prefix extnode is necessary.
// Otherwise, create that
var p *StackTrie
var p *stNode
if diffidx == 0 {
// Convert current leaf into a branch
st.nodeType = branchNode
st.typ = branchNode
p = st
st.children[0] = nil
} else {
// Convert current node into an ext,
// and insert a child branch node.
st.nodeType = extNode
st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner)
st.children[0].nodeType = branchNode
st.typ = extNode
st.children[0] = stPool.Get().(*stNode)
st.children[0].typ = branchNode
p = st.children[0]
}
@ -363,11 +307,11 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// value and another containing the new value. The child leaf
// is hashed directly in order to free up some memory.
origIdx := st.key[diffidx]
p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn)
p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...))
p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val)
t.hash(p.children[origIdx], append(path, st.key[:diffidx+1]...))
newIdx := key[diffidx]
p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
p.children[newIdx] = newLeaf(key[diffidx+1:], value)
// Finally, cut off the key part that has been passed
// over to the children.
@ -375,7 +319,7 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
st.val = nil
case emptyNode: /* Empty */
st.nodeType = leafNode
st.typ = leafNode
st.key = key
st.val = value
@ -398,25 +342,19 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// - And the 'st.type' will be 'hashedNode' AGAIN
//
// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
func (st *StackTrie) hash(path []byte) {
h := newHasher(false)
defer returnHasherToPool(h)
st.hashRec(h, path)
}
func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
// The switch below sets this to the RLP-encoding of this node.
var encodedNode []byte
switch st.nodeType {
func (t *StackTrie) hash(st *stNode, path []byte) {
var (
blob []byte // RLP-encoded node blob
internal [][]byte // List of node paths covered by the extension node
)
switch st.typ {
case hashedNode:
return
case emptyNode:
st.val = types.EmptyRootHash.Bytes()
st.key = st.key[:0]
st.nodeType = hashedNode
st.typ = hashedNode
return
case branchNode:
@ -426,109 +364,113 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
nodes.Children[i] = nilValueNode
continue
}
child.hashRec(hasher, append(path, byte(i)))
t.hash(child, append(path, byte(i)))
if len(child.val) < 32 {
nodes.Children[i] = rawNode(child.val)
} else {
nodes.Children[i] = hashNode(child.val)
}
// Release child back to pool.
st.children[i] = nil
returnToPool(child)
stPool.Put(child.reset()) // Release child back to pool.
}
nodes.encode(hasher.encbuf)
encodedNode = hasher.encodedBytes()
nodes.encode(t.h.encbuf)
blob = t.h.encodedBytes()
case extNode:
st.children[0].hashRec(hasher, append(path, st.key...))
// recursively hash and commit child as the first step
t.hash(st.children[0], append(path, st.key...))
n := shortNode{Key: hexToCompact(st.key)}
// Collect the path of internal nodes between shortNode and its **in disk**
// child. This is essential in the case of path mode scheme to avoid leaving
// danging nodes within the range of this internal path on disk, which would
// break the guarantee for state healing.
if len(st.children[0].val) >= 32 && t.options.Cleaner != nil {
for i := 1; i < len(st.key); i++ {
internal = append(internal, append(path, st.key[:i]...))
}
}
// encode the extension node
n := shortNode{Key: hexToCompactInPlace(st.key)}
if len(st.children[0].val) < 32 {
n.Val = rawNode(st.children[0].val)
} else {
n.Val = hashNode(st.children[0].val)
}
n.encode(t.h.encbuf)
blob = t.h.encodedBytes()
n.encode(hasher.encbuf)
encodedNode = hasher.encodedBytes()
// Release child back to pool.
returnToPool(st.children[0])
stPool.Put(st.children[0].reset()) // Release child back to pool.
st.children[0] = nil
case leafNode:
st.key = append(st.key, byte(16))
n := shortNode{Key: hexToCompact(st.key), Val: valueNode(st.val)}
n := shortNode{Key: hexToCompactInPlace(st.key), Val: valueNode(st.val)}
n.encode(hasher.encbuf)
encodedNode = hasher.encodedBytes()
n.encode(t.h.encbuf)
blob = t.h.encodedBytes()
default:
panic("invalid node type")
}
st.nodeType = hashedNode
st.typ = hashedNode
st.key = st.key[:0]
if len(encodedNode) < 32 {
st.val = common.CopyBytes(encodedNode)
// Skip committing the non-root node if the size is smaller than 32 bytes.
if len(blob) < 32 && len(path) > 0 {
st.val = common.CopyBytes(blob)
return
}
// Write the hash to the 'val'. We allocate a new val here to not mutate
// input values
st.val = hasher.hashData(encodedNode)
if st.writeFn != nil {
st.writeFn(st.owner, path, common.BytesToHash(st.val), encodedNode)
// input values.
st.val = t.h.hashData(blob)
// Short circuit if the stack trie is not configured for writing.
if t.options.Writer == nil {
return
}
// Skip committing if the node is on the left boundary and stackTrie is
// configured to filter the boundary.
if t.options.SkipLeftBoundary && bytes.HasPrefix(t.first, path) {
if t.options.boundaryGauge != nil {
t.options.boundaryGauge.Inc(1)
}
return
}
// Skip committing if the node is on the right boundary and stackTrie is
// configured to filter the boundary.
if t.options.SkipRightBoundary && bytes.HasPrefix(t.last, path) {
if t.options.boundaryGauge != nil {
t.options.boundaryGauge.Inc(1)
}
return
}
// Clean up the internal dangling nodes covered by the extension node.
// This should be done before writing the node to adhere to the committing
// order from bottom to top.
for _, path := range internal {
t.options.Cleaner(path)
}
t.options.Writer(path, common.BytesToHash(st.val), blob)
}
// Hash returns the hash of the current node.
func (st *StackTrie) Hash() (h common.Hash) {
hasher := newHasher(false)
defer returnHasherToPool(hasher)
st.hashRec(hasher, nil)
if len(st.val) == 32 {
copy(h[:], st.val)
return h
}
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed, and instead contain the rlp-encoding of the
// node. For the top level node, we need to force the hashing.
hasher.sha.Reset()
hasher.sha.Write(st.val)
hasher.sha.Read(h[:])
return h
}
// Commit will firstly hash the entire trie if it's still not hashed
// and then commit all nodes to the associated database. Actually most
// of the trie nodes MAY have been committed already. The main purpose
// here is to commit the root node.
// Hash will firstly hash the entire trie if it's still not hashed and then commit
// all nodes to the associated database. Actually most of the trie nodes have been
// committed already. The main purpose here is to commit the nodes on right boundary.
//
// The associated database is expected, otherwise the whole commit
// functionality should be disabled.
func (st *StackTrie) Commit() (h common.Hash, err error) {
if st.writeFn == nil {
return common.Hash{}, ErrCommitDisabled
// For stack trie, Hash and Commit are functionally identical.
func (t *StackTrie) Hash() common.Hash {
n := t.root
t.hash(n, nil)
return common.BytesToHash(n.val)
}
hasher := newHasher(false)
defer returnHasherToPool(hasher)
st.hashRec(hasher, nil)
if len(st.val) == 32 {
copy(h[:], st.val)
return h, nil
}
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed (and committed), and instead contain the rlp-encoding of the
// node. For the top level node, we need to force the hashing+commit.
hasher.sha.Reset()
hasher.sha.Write(st.val)
hasher.sha.Read(h[:])
st.writeFn(st.owner, nil, h, st.val)
return h, nil
// Commit will firstly hash the entire trie if it's still not hashed and then commit
// all nodes to the associated database. Actually most of the trie nodes have been
// committed already. The main purpose here is to commit the nodes on right boundary.
//
// For stack trie, Hash and Commit are functionally identical.
func (t *StackTrie) Commit() common.Hash {
return t.Hash()
}

@ -19,11 +19,14 @@ package trie
import (
"bytes"
"math/big"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/trie/testutil"
"golang.org/x/exp/slices"
)
func TestStackTrieInsertAndHash(t *testing.T) {
@ -166,12 +169,11 @@ func TestStackTrieInsertAndHash(t *testing.T) {
{"13aa", "x___________________________3", "ff0dc70ce2e5db90ee42a4c2ad12139596b890e90eb4e16526ab38fa465b35cf"},
},
}
st := NewStackTrie(nil)
for i, test := range tests {
// The StackTrie does not allow Insert(), Hash(), Insert(), ...
// so we will create new trie for every sequence length of inserts.
for l := 1; l <= len(test); l++ {
st.Reset()
st := NewStackTrie(nil)
for j := 0; j < l; j++ {
kv := &test[j]
if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil {
@ -346,47 +348,86 @@ func TestStacktrieNotModifyValues(t *testing.T) {
}
}
// TestStacktrieSerialization tests that the stacktrie works well if we
// serialize/unserialize it a lot
func TestStacktrieSerialization(t *testing.T) {
func buildPartialTree(entries []*kv, t *testing.T) map[string]common.Hash {
var (
st = NewStackTrie(nil)
nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
keyB = big.NewInt(1)
keyDelta = big.NewInt(1)
vals [][]byte
keys [][]byte
options = NewStackTrieOptions()
nodes = make(map[string]common.Hash)
)
getValue := func(i int) []byte {
if i%2 == 0 { // large
return crypto.Keccak256(big.NewInt(int64(i)).Bytes())
} else { //small
return big.NewInt(int64(i)).Bytes()
var (
first int
last = len(entries) - 1
noLeft bool
noRight bool
)
// Enter split mode if there are at least two elements
if rand.Intn(5) != 0 {
for {
first = rand.Intn(len(entries))
last = rand.Intn(len(entries))
if first <= last {
break
}
}
for i := 0; i < 10; i++ {
vals = append(vals, getValue(i))
keys = append(keys, common.BigToHash(keyB).Bytes())
keyB = keyB.Add(keyB, keyDelta)
keyDelta.Add(keyDelta, common.Big1)
if first != 0 {
noLeft = true
}
for i, k := range keys {
nt.Update(k, common.CopyBytes(vals[i]))
if last != len(entries)-1 {
noRight = true
}
}
options = options.WithSkipBoundary(noLeft, noRight, nil)
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
nodes[string(path)] = hash
})
tr := NewStackTrie(options)
for i := first; i <= last; i++ {
tr.MustUpdate(entries[i].k, entries[i].v)
}
tr.Commit()
return nodes
}
for i, k := range keys {
blob, err := st.MarshalBinary()
if err != nil {
t.Fatal(err)
func TestPartialStackTrie(t *testing.T) {
for round := 0; round < 100; round++ {
var (
n = rand.Intn(100) + 1
entries []*kv
)
for i := 0; i < n; i++ {
var val []byte
if rand.Intn(3) == 0 {
val = testutil.RandBytes(3)
} else {
val = testutil.RandBytes(32)
}
entries = append(entries, &kv{
k: testutil.RandBytes(32),
v: val,
})
}
slices.SortFunc(entries, (*kv).cmp)
var (
nodes = make(map[string]common.Hash)
options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
nodes[string(path)] = hash
})
)
tr := NewStackTrie(options)
for i := 0; i < len(entries); i++ {
tr.MustUpdate(entries[i].k, entries[i].v)
}
tr.Commit()
for j := 0; j < 100; j++ {
for path, hash := range buildPartialTree(entries, t) {
if nodes[path] != hash {
t.Errorf("%v, want %x, got %x", []byte(path), nodes[path], hash)
}
newSt, err := NewFromBinary(blob, nil)
if err != nil {
t.Fatal(err)
}
st = newSt
st.Update(k, common.CopyBytes(vals[i]))
}
if have, want := st.Hash(), nt.Hash(); have != want {
t.Fatalf("have %#x want %#x", have, want)
}
}

@ -51,6 +51,18 @@ var (
// lookupGauge is the metric to track how many trie node lookups are
// performed to determine if node needs to be deleted.
lookupGauge = metrics.NewRegisteredGauge("trie/sync/lookup", nil)
// accountNodeSyncedGauge is the metric to track how many account trie
// node are written during the sync.
accountNodeSyncedGauge = metrics.NewRegisteredGauge("trie/sync/nodes/account", nil)
// storageNodeSyncedGauge is the metric to track how many account trie
// node are written during the sync.
storageNodeSyncedGauge = metrics.NewRegisteredGauge("trie/sync/nodes/storage", nil)
// codeSyncedGauge is the metric to track how many contract codes are
// written during the sync.
codeSyncedGauge = metrics.NewRegisteredGauge("trie/sync/codes", nil)
)
// SyncPath is a path tuple identifying a particular trie node either in a single
@ -362,10 +374,22 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error {
// storage, returning any occurred error.
func (s *Sync) Commit(dbw ethdb.Batch) error {
// Flush the pending node writes into database batch.
var (
account int
storage int
)
for path, value := range s.membatch.nodes {
owner, inner := ResolvePath([]byte(path))
if owner == (common.Hash{}) {
account += 1
} else {
storage += 1
}
rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme)
}
accountNodeSyncedGauge.Inc(int64(account))
storageNodeSyncedGauge.Inc(int64(storage))
// Flush the pending node deletes into the database batch.
// Please note that each written and deleted node has a
// unique path, ensuring no duplication occurs.
@ -377,6 +401,8 @@ func (s *Sync) Commit(dbw ethdb.Batch) error {
for hash, value := range s.membatch.codes {
rawdb.WriteCode(dbw, hash, value)
}
codeSyncedGauge.Inc(int64(len(s.membatch.codes)))
s.membatch = newSyncMemBatch() // reset the batch
return nil
}

@ -908,9 +908,12 @@ func TestCommitSequenceStackTrie(t *testing.T) {
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
options := NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
})
stTrie := NewStackTrie(options)
// Fill the trie with elements
for i := 0; i < count; i++ {
// For the stack trie, we need to do inserts in proper order
@ -933,10 +936,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
if err != nil {
t.Fatalf("Failed to commit stack trie %v", err)
}
stRoot := stTrie.Commit()
if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
}
@ -967,9 +967,12 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
options := NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
})
stTrie := NewStackTrie(options)
// Add a single small-element to the trie(s)
key := make([]byte, 5)
key[0] = 1
@ -981,10 +984,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
if err != nil {
t.Fatalf("Failed to commit stack trie %v", err)
}
stRoot := stTrie.Commit()
if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
}

@ -571,7 +571,16 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead
if err != nil {
return 0, err
}
if ohead <= nhead {
otail, err := freezer.Tail()
if err != nil {
return 0, err
}
// Ensure that the truncation target falls within the specified range.
if ohead < nhead || nhead < otail {
return 0, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", otail, ohead, nhead)
}
// Short circuit if nothing to truncate.
if ohead == nhead {
return 0, nil
}
// Load the meta objects in range [nhead+1, ohead]
@ -600,11 +609,20 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead
// truncateFromTail removes the extra state histories from the tail with the given
// parameters. It returns the number of items removed from the tail.
func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) {
ohead, err := freezer.Ancients()
if err != nil {
return 0, err
}
otail, err := freezer.Tail()
if err != nil {
return 0, err
}
if otail >= ntail {
// Ensure that the truncation target falls within the specified range.
if otail > ntail || ntail > ohead {
return 0, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", otail, ohead, ntail)
}
// Short circuit if nothing to truncate.
if otail == ntail {
return 0, nil
}
// Load the meta objects in range [otail+1, ntail]

@ -224,6 +224,50 @@ func TestTruncateTailHistories(t *testing.T) {
}
}
func TestTruncateOutOfRange(t *testing.T) {
var (
hs = makeHistories(10)
db = rawdb.NewMemoryDatabase()
freezer, _ = openFreezer(t.TempDir(), false)
)
defer freezer.Close()
for i := 0; i < len(hs); i++ {
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
}
truncateFromTail(db, freezer, uint64(len(hs)/2))
// Ensure of-out-range truncations are rejected correctly.
head, _ := freezer.Ancients()
tail, _ := freezer.Tail()
cases := []struct {
mode int
target uint64
expErr error
}{
{0, head, nil}, // nothing to delete
{0, head + 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, head+1)},
{0, tail - 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, tail-1)},
{1, tail, nil}, // nothing to delete
{1, head + 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, head+1)},
{1, tail - 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, tail-1)},
}
for _, c := range cases {
var gotErr error
if c.mode == 0 {
_, gotErr = truncateFromHead(db, freezer, c.target)
} else {
_, gotErr = truncateFromTail(db, freezer, c.target)
}
if !reflect.DeepEqual(gotErr, c.expErr) {
t.Errorf("Unexpected error, want: %v, got: %v", c.expErr, gotErr)
}
}
}
// openFreezer initializes the freezer instance for storing state histories.
func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) {
return rawdb.NewStateFreezer(datadir, readOnly, 0)