commit
4566ac7659
22
CHANGELOG.md
22
CHANGELOG.md
@ -1,4 +1,26 @@
|
||||
# Changelog
|
||||
## v1.4.9
|
||||
### FEATURE
|
||||
* [\#2463](https://github.com/bnb-chain/bsc/pull/2463) utils: add check_blobtx.js
|
||||
* [\#2470](https://github.com/bnb-chain/bsc/pull/2470) jsutils: faucet successful requests within blocks
|
||||
* [\#2467](https://github.com/bnb-chain/bsc/pull/2467) internal/ethapi: add optional parameter for blobSidecars
|
||||
|
||||
### IMPROVEMENT
|
||||
* [\#2462](https://github.com/bnb-chain/bsc/pull/2462) cmd/utils: add a flag to change breathe block interval for testing
|
||||
* [\#2497](https://github.com/bnb-chain/bsc/pull/2497) params/config: add Bohr hardfork
|
||||
* [\#2479](https://github.com/bnb-chain/bsc/pull/2479) dev: ensure consistency in BPS bundle result
|
||||
|
||||
### BUGFIX
|
||||
* [\#2461](https://github.com/bnb-chain/bsc/pull/2461) eth/handler: check lists in body before broadcast blocks
|
||||
* [\#2455](https://github.com/bnb-chain/bsc/pull/2455) cmd: fix memory leak when big dataset
|
||||
* [\#2466](https://github.com/bnb-chain/bsc/pull/2466) sync: fix some sync issues caused by prune-block.
|
||||
* [\#2475](https://github.com/bnb-chain/bsc/pull/2475) fix: move mev op to MinerAPI & add command to console
|
||||
* [\#2473](https://github.com/bnb-chain/bsc/pull/2473) fix: limit the gas price of the mev bid
|
||||
* [\#2484](https://github.com/bnb-chain/bsc/pull/2484) fix: fix inspect database error
|
||||
* [\#2481](https://github.com/bnb-chain/bsc/pull/2481) fix: keep 9W blocks in ancient db when prune block
|
||||
* [\#2495](https://github.com/bnb-chain/bsc/pull/2495) fix: add an empty freeze db
|
||||
* [\#2507](https://github.com/bnb-chain/bsc/pull/2507) fix: waiting for the last simulation before pick best bid
|
||||
|
||||
## v1.4.8
|
||||
### FEATURE
|
||||
* [\#2483](https://github.com/bnb-chain/bsc/pull/2483) core/vm: add secp256r1 into PrecompiledContractsHaber
|
||||
|
@ -36,7 +36,7 @@ To combine DPoS and PoA for consensus, BNB Smart Chain implement a novel consens
|
||||
2. Validators take turns to produce blocks in a PoA manner, similar to Ethereum's Clique consensus engine.
|
||||
3. Validator set are elected in and out based on a staking based governance on BNB Beacon Chain.
|
||||
4. The validator set change is relayed via a cross-chain communication mechanism.
|
||||
5. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/docs/learn/system-contract) to achieve liveness slash, revenue distributing and validator set renewing func.
|
||||
5. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/bnb-smart-chain/staking/overview/#system-contracts) to achieve liveness slash, revenue distributing and validator set renewing func.
|
||||
|
||||
|
||||
### Light Client of BNB Beacon Chain
|
||||
@ -183,7 +183,7 @@ This tool is optional and if you leave it out you can always attach to an alread
|
||||
|
||||
#### 7. More
|
||||
|
||||
More details about [running a node](https://docs.bnbchain.org/docs/validator/fullnode) and [becoming a validator](https://docs.bnbchain.org/docs/validator/create-val)
|
||||
More details about [running a node](https://docs.bnbchain.org/bnb-smart-chain/developers/node_operators/full_node/) and [becoming a validator](https://docs.bnbchain.org/bnb-smart-chain/validator/create-val/)
|
||||
|
||||
*Note: Although some internal protective measures prevent transactions from
|
||||
crossing over between the main network and test network, you should always
|
||||
|
@ -64,6 +64,7 @@ var (
|
||||
utils.CachePreimagesFlag,
|
||||
utils.OverrideCancun,
|
||||
utils.OverrideHaber,
|
||||
utils.OverrideBohr,
|
||||
utils.OverrideVerkle,
|
||||
}, utils.DatabaseFlags),
|
||||
Description: `
|
||||
@ -261,6 +262,10 @@ func initGenesis(ctx *cli.Context) error {
|
||||
v := ctx.Uint64(utils.OverrideHaber.Name)
|
||||
overrides.OverrideHaber = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideBohr.Name) {
|
||||
v := ctx.Uint64(utils.OverrideBohr.Name)
|
||||
overrides.OverrideBohr = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||
overrides.OverrideVerkle = &v
|
||||
|
@ -193,6 +193,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
v := ctx.Uint64(utils.OverrideHaber.Name)
|
||||
cfg.Eth.OverrideHaber = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideBohr.Name) {
|
||||
v := ctx.Uint64(utils.OverrideBohr.Name)
|
||||
cfg.Eth.OverrideBohr = &v
|
||||
}
|
||||
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
||||
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
||||
cfg.Eth.OverrideVerkle = &v
|
||||
|
@ -106,12 +106,12 @@ Remove blockchain and state databases`,
|
||||
dbInspectTrieCmd = &cli.Command{
|
||||
Action: inspectTrie,
|
||||
Name: "inspect-trie",
|
||||
ArgsUsage: "<blocknum> <jobnum>",
|
||||
ArgsUsage: "<blocknum> <jobnum> <topn>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Usage: "Inspect the MPT tree of the account and contract.",
|
||||
Usage: "Inspect the MPT tree of the account and contract. 'blocknum' can be latest/snapshot/number. 'topn' means output the top N storage tries info ranked by the total number of TrieNodes",
|
||||
Description: `This commands iterates the entrie WorldState.`,
|
||||
}
|
||||
dbCheckStateContentCmd = &cli.Command{
|
||||
@ -386,6 +386,7 @@ func inspectTrie(ctx *cli.Context) error {
|
||||
blockNumber uint64
|
||||
trieRootHash common.Hash
|
||||
jobnum uint64
|
||||
topN uint64
|
||||
)
|
||||
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
@ -405,24 +406,37 @@ func inspectTrie(ctx *cli.Context) error {
|
||||
var err error
|
||||
blockNumber, err = strconv.ParseUint(ctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to Parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
|
||||
return fmt.Errorf("failed to parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.NArg() == 1 {
|
||||
jobnum = 1000
|
||||
topN = 10
|
||||
} else if ctx.NArg() == 2 {
|
||||
var err error
|
||||
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
}
|
||||
topN = 10
|
||||
} else {
|
||||
var err error
|
||||
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
return fmt.Errorf("failed to parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
}
|
||||
|
||||
topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
|
||||
}
|
||||
}
|
||||
|
||||
if blockNumber != math.MaxUint64 {
|
||||
headerBlockHash = rawdb.ReadCanonicalHash(db, blockNumber)
|
||||
if headerBlockHash == (common.Hash{}) {
|
||||
return errors.New("ReadHeadBlockHash empry hash")
|
||||
return errors.New("ReadHeadBlockHash empty hash")
|
||||
}
|
||||
blockHeader := rawdb.ReadHeader(db, headerBlockHash, blockNumber)
|
||||
trieRootHash = blockHeader.Root
|
||||
@ -437,6 +451,7 @@ func inspectTrie(ctx *cli.Context) error {
|
||||
if dbScheme == rawdb.PathScheme {
|
||||
config = &triedb.Config{
|
||||
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
|
||||
Cache: 0,
|
||||
}
|
||||
} else if dbScheme == rawdb.HashScheme {
|
||||
config = triedb.HashDefaults
|
||||
@ -448,7 +463,7 @@ func inspectTrie(ctx *cli.Context) error {
|
||||
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
|
||||
return err
|
||||
}
|
||||
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum)
|
||||
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum, int(topN))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -493,7 +508,7 @@ func ancientInspect(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, true)
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
defer db.Close()
|
||||
return rawdb.AncientInspect(db)
|
||||
}
|
||||
|
@ -74,6 +74,7 @@ var (
|
||||
utils.RialtoHash,
|
||||
utils.OverrideCancun,
|
||||
utils.OverrideHaber,
|
||||
utils.OverrideBohr,
|
||||
utils.OverrideVerkle,
|
||||
utils.OverrideFullImmutabilityThreshold,
|
||||
utils.OverrideMinBlocksForBlobRequests,
|
||||
|
@ -75,7 +75,7 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData)
|
||||
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData, false)
|
||||
if err != nil {
|
||||
kvdb.Close()
|
||||
return nil, err
|
||||
@ -178,11 +178,10 @@ func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemai
|
||||
|
||||
// Force run a freeze cycle
|
||||
type freezer interface {
|
||||
Freeze() error
|
||||
Freeze(threshold uint64) error
|
||||
Ancients() (uint64, error)
|
||||
}
|
||||
blockchain.SetFinalized(blocks[len(blocks)-1].Header())
|
||||
db.(freezer).Freeze()
|
||||
db.(freezer).Freeze(10)
|
||||
|
||||
frozen, err := db.Ancients()
|
||||
//make sure there're frozen items
|
||||
|
@ -43,9 +43,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@ -245,7 +247,16 @@ func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) {
|
||||
NoBuild: true,
|
||||
AsyncBuild: false,
|
||||
}
|
||||
snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, nil), headBlock.Root(), TriesInMemory, false)
|
||||
dbScheme := rawdb.ReadStateScheme(chaindb)
|
||||
var config *triedb.Config
|
||||
if dbScheme == rawdb.PathScheme {
|
||||
config = &triedb.Config{
|
||||
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
|
||||
}
|
||||
} else if dbScheme == rawdb.HashScheme {
|
||||
config = triedb.HashDefaults
|
||||
}
|
||||
snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, config), headBlock.Root(), TriesInMemory, false)
|
||||
if err != nil {
|
||||
log.Error("snaptree error", "err", err)
|
||||
return nil, err // The relevant snapshot(s) might not exist
|
||||
@ -333,6 +344,9 @@ func pruneBlock(ctx *cli.Context) error {
|
||||
stack, config = makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
blockAmountReserved = ctx.Uint64(utils.BlockAmountReserved.Name)
|
||||
if blockAmountReserved < params.FullImmutabilityThreshold {
|
||||
return fmt.Errorf("block-amount-reserved must be greater than or equal to %d", params.FullImmutabilityThreshold)
|
||||
}
|
||||
chaindb, err = accessDb(ctx, stack)
|
||||
if err != nil {
|
||||
return err
|
||||
|
51
cmd/jsutils/check_blobtx.js
Normal file
51
cmd/jsutils/check_blobtx.js
Normal file
@ -0,0 +1,51 @@
|
||||
import { ethers } from "ethers";
|
||||
import program from "commander";
|
||||
|
||||
// depends on ethjs v6.11.0+ for 4844, https://github.com/ethers-io/ethers.js/releases/tag/v6.11.0
|
||||
// BSC testnet enabled 4844 on block: 39539137
|
||||
// Usage:
|
||||
// nvm use 20
|
||||
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137
|
||||
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 --endNum 40345994
|
||||
program.option("--rpc <Rpc>", "Rpc Server URL");
|
||||
program.option("--startNum <Num>", "start block", 0);
|
||||
program.option("--endNum <Num>", "end block", 0);
|
||||
program.parse(process.argv);
|
||||
|
||||
const provider = new ethers.JsonRpcProvider(program.rpc);
|
||||
const main = async () => {
|
||||
var startBlock = parseInt(program.startNum)
|
||||
var endBlock = parseInt(program.endNum)
|
||||
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
|
||||
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
|
||||
return
|
||||
}
|
||||
// if --endNum is not specified, set it to the latest block number.
|
||||
if (endBlock == 0) {
|
||||
endBlock = await provider.getBlockNumber();
|
||||
}
|
||||
if (startBlock > endBlock) {
|
||||
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
|
||||
return
|
||||
}
|
||||
|
||||
for (let i = startBlock; i <= endBlock; i++) {
|
||||
let blockData = await provider.getBlock(i);
|
||||
console.log("startBlock:",startBlock, "endBlock:", endBlock, "curBlock", i, "blobGasUsed", blockData.blobGasUsed);
|
||||
if (blockData.blobGasUsed == 0) {
|
||||
continue
|
||||
}
|
||||
for (let txIndex = 0; txIndex<= blockData.transactions.length - 1; txIndex++) {
|
||||
let txHash = blockData.transactions[txIndex]
|
||||
let txData = await provider.getTransaction(txHash);
|
||||
if (txData.type == 3) {
|
||||
console.log("BlobTx in block:",i, " txIndex:", txIndex, " txHash:", txHash);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
main().then(() => process.exit(0))
|
||||
.catch((error) => {
|
||||
console.error(error);
|
||||
process.exit(1);
|
||||
});
|
49
cmd/jsutils/faucet_request.js
Normal file
49
cmd/jsutils/faucet_request.js
Normal file
@ -0,0 +1,49 @@
|
||||
import { ethers } from "ethers";
|
||||
import program from "commander";
|
||||
|
||||
// Usage:
|
||||
// node faucet_request.js --rpc localhost:8545 --startNum 39539137
|
||||
// node faucet_request.js --rpc localhost:8545 --startNum 39539137 --endNum 40345994
|
||||
|
||||
// node faucet_request.js --rpc https://data-seed-prebsc-1-s1.bnbchain.org:8545 --startNum 39539137 --endNum 40345994
|
||||
program.option("--rpc <Rpc>", "Rpc Server URL");
|
||||
program.option("--startNum <Num>", "start block", 0);
|
||||
program.option("--endNum <Num>", "end block", 0);
|
||||
program.parse(process.argv);
|
||||
|
||||
const provider = new ethers.JsonRpcProvider(program.rpc);
|
||||
const main = async () => {
|
||||
var startBlock = parseInt(program.startNum)
|
||||
var endBlock = parseInt(program.endNum)
|
||||
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
|
||||
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
|
||||
return
|
||||
}
|
||||
// if --endNum is not specified, set it to the latest block number.
|
||||
if (endBlock == 0) {
|
||||
endBlock = await provider.getBlockNumber();
|
||||
}
|
||||
if (startBlock > endBlock) {
|
||||
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
|
||||
return
|
||||
}
|
||||
|
||||
let startBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", startBlock)
|
||||
let endBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", endBlock)
|
||||
const faucetAmount = BigInt(0.3 * 10**18); // Convert 0.3 ether to wei as a BigInt
|
||||
const numFaucetRequest = (startBalance - endBalance) / faucetAmount;
|
||||
|
||||
// Convert BigInt to ether
|
||||
const startBalanceEth = Number(startBalance) / 10**18;
|
||||
const endBalanceEth = Number(endBalance) / 10**18;
|
||||
|
||||
console.log(`Start Balance: ${startBalanceEth} ETH`);
|
||||
console.log(`End Balance: ${endBalanceEth} ETH`);
|
||||
|
||||
console.log("successful faucet request: ",numFaucetRequest);
|
||||
};
|
||||
main().then(() => process.exit(0))
|
||||
.catch((error) => {
|
||||
console.error(error);
|
||||
process.exit(1);
|
||||
});
|
@ -315,6 +315,11 @@ var (
|
||||
Usage: "Manually specify the Haber fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverrideBohr = &cli.Uint64Flag{
|
||||
Name: "override.bohr",
|
||||
Usage: "Manually specify the Bohr fork timestamp, overriding the bundled setting",
|
||||
Category: flags.EthCategory,
|
||||
}
|
||||
OverrideVerkle = &cli.Uint64Flag{
|
||||
Name: "override.verkle",
|
||||
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
|
||||
@ -1082,6 +1087,7 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
|
||||
Name: "block-amount-reserved",
|
||||
Usage: "Sets the expected remained amount of blocks for offline block prune",
|
||||
Category: flags.BlockHistoryCategory,
|
||||
Value: params.FullImmutabilityThreshold,
|
||||
}
|
||||
|
||||
CheckSnapshotWithMPT = &cli.BoolFlag{
|
||||
|
@ -163,7 +163,7 @@ func TestHistoryImportAndExport(t *testing.T) {
|
||||
|
||||
// Now import Era.
|
||||
freezer := t.TempDir()
|
||||
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false)
|
||||
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -66,6 +66,31 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
|
||||
return validator
|
||||
}
|
||||
|
||||
// ValidateListsInBody validates that UncleHash, WithdrawalsHash, and WithdrawalsHash correspond to the lists in the block body, respectively.
|
||||
func ValidateListsInBody(block *types.Block) error {
|
||||
header := block.Header()
|
||||
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
|
||||
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
|
||||
}
|
||||
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
|
||||
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
|
||||
}
|
||||
// Withdrawals are present after the Shanghai fork.
|
||||
if header.WithdrawalsHash != nil {
|
||||
// Withdrawals list must be present in body after Shanghai.
|
||||
if block.Withdrawals() == nil {
|
||||
return errors.New("missing withdrawals in block body")
|
||||
}
|
||||
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
|
||||
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
|
||||
}
|
||||
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
|
||||
// Withdrawals are not allowed prior to shanghai fork
|
||||
return errors.New("withdrawals present in block body")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateBody validates the given block's uncles and verifies the block
|
||||
// header's transaction and uncle roots. The headers are assumed to be already
|
||||
// validated at this point.
|
||||
@ -83,31 +108,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
|
||||
return err
|
||||
}
|
||||
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
|
||||
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
|
||||
}
|
||||
|
||||
validateFuns := []func() error{
|
||||
func() error {
|
||||
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
|
||||
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
|
||||
}
|
||||
return nil
|
||||
return ValidateListsInBody(block)
|
||||
},
|
||||
func() error {
|
||||
// Withdrawals are present after the Shanghai fork.
|
||||
if header.WithdrawalsHash != nil {
|
||||
// Withdrawals list must be present in body after Shanghai.
|
||||
if block.Withdrawals() == nil {
|
||||
return errors.New("missing withdrawals in block body")
|
||||
}
|
||||
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
|
||||
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
|
||||
}
|
||||
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
|
||||
// Withdrawals are not allowed prior to shanghai fork
|
||||
return errors.New("withdrawals present in block body")
|
||||
}
|
||||
// Blob transactions may be present after the Cancun fork.
|
||||
var blobs int
|
||||
for i, tx := range block.Transactions() {
|
||||
|
@ -511,3 +511,12 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr
|
||||
func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription {
|
||||
return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch))
|
||||
}
|
||||
|
||||
// AncientTail retrieves the tail the ancients blocks
|
||||
func (bc *BlockChain) AncientTail() (uint64, error) {
|
||||
tail, err := bc.db.Tail()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return tail, nil
|
||||
}
|
||||
|
@ -1832,14 +1832,10 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
|
||||
}
|
||||
// Force run a freeze cycle
|
||||
type freezer interface {
|
||||
Freeze() error
|
||||
Freeze(threshold uint64) error
|
||||
Ancients() (uint64, error)
|
||||
}
|
||||
if tt.freezeThreshold < uint64(tt.canonicalBlocks) {
|
||||
final := uint64(tt.canonicalBlocks) - tt.freezeThreshold
|
||||
chain.SetFinalized(canonblocks[int(final)-1].Header())
|
||||
}
|
||||
db.(freezer).Freeze()
|
||||
db.(freezer).Freeze(tt.freezeThreshold)
|
||||
|
||||
// Set the simulated pivot block
|
||||
if tt.pivotBlock != nil {
|
||||
|
@ -2045,14 +2045,10 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
|
||||
|
||||
// Force run a freeze cycle
|
||||
type freezer interface {
|
||||
Freeze() error
|
||||
Freeze(threshold uint64) error
|
||||
Ancients() (uint64, error)
|
||||
}
|
||||
if tt.freezeThreshold < uint64(tt.canonicalBlocks) {
|
||||
final := uint64(tt.canonicalBlocks) - tt.freezeThreshold
|
||||
chain.SetFinalized(canonblocks[int(final)-1].Header())
|
||||
}
|
||||
db.(freezer).Freeze()
|
||||
db.(freezer).Freeze(tt.freezeThreshold)
|
||||
|
||||
// Set the simulated pivot block
|
||||
if tt.pivotBlock != nil {
|
||||
|
@ -974,7 +974,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
|
||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
||||
}
|
||||
// Freezer style fast import the chain.
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
@ -1069,7 +1069,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
|
||||
|
||||
// makeDb creates a db instance for testing.
|
||||
makeDb := func() ethdb.Database {
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
@ -1957,7 +1957,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
|
||||
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
|
||||
|
||||
// Import the shared chain and the original canonical one
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
defer db.Close()
|
||||
|
||||
chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
|
||||
@ -2026,7 +2026,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
|
||||
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
||||
|
||||
// Import the chain as a ancient-first node and ensure all pointers are updated
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
@ -2097,7 +2097,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) {
|
||||
}
|
||||
|
||||
// Set up a BlockChain that uses the ancient store.
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
@ -2167,7 +2167,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
|
||||
})
|
||||
|
||||
// Import the canonical chain
|
||||
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
defer diskdb.Close()
|
||||
|
||||
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
|
||||
@ -2384,7 +2384,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
|
||||
b.OffsetTime(-9) // A higher difficulty
|
||||
})
|
||||
// Import the shared chain and the original canonical one
|
||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
@ -2555,7 +2555,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
|
||||
}
|
||||
})
|
||||
// Import the shared chain and the original canonical one
|
||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||
}
|
||||
@ -3858,7 +3858,7 @@ func testSetCanonical(t *testing.T, scheme string) {
|
||||
}
|
||||
gen.AddTx(tx)
|
||||
})
|
||||
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
defer diskdb.Close()
|
||||
|
||||
chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
|
||||
@ -4483,7 +4483,7 @@ func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint
|
||||
func TestParliaBlobFeeReward(t *testing.T) {
|
||||
// Have N headers in the freezer
|
||||
frdir := t.TempDir()
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
|
@ -218,6 +218,7 @@ func (e *GenesisMismatchError) Error() string {
|
||||
type ChainOverrides struct {
|
||||
OverrideCancun *uint64
|
||||
OverrideHaber *uint64
|
||||
OverrideBohr *uint64
|
||||
OverrideVerkle *uint64
|
||||
}
|
||||
|
||||
@ -250,6 +251,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
|
||||
if overrides != nil && overrides.OverrideHaber != nil {
|
||||
config.HaberTime = overrides.OverrideHaber
|
||||
}
|
||||
if overrides != nil && overrides.OverrideBohr != nil {
|
||||
config.BohrTime = overrides.OverrideBohr
|
||||
}
|
||||
if overrides != nil && overrides.OverrideVerkle != nil {
|
||||
config.VerkleTime = overrides.OverrideVerkle
|
||||
}
|
||||
|
@ -518,7 +518,7 @@ func checkBlobSidecarsRLP(have, want types.BlobSidecars) error {
|
||||
func TestAncientStorage(t *testing.T) {
|
||||
// Freezer style fast import the chain.
|
||||
frdir := t.TempDir()
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
@ -657,7 +657,7 @@ func TestHashesInRange(t *testing.T) {
|
||||
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
||||
// Open freezer database.
|
||||
frdir := b.TempDir()
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
@ -1001,7 +1001,7 @@ func TestHeadersRLPStorage(t *testing.T) {
|
||||
// Have N headers in the freezer
|
||||
frdir := t.TempDir()
|
||||
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
|
@ -24,12 +24,12 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -51,25 +51,31 @@ var (
|
||||
// The background thread will keep moving ancient chain segments from key-value
|
||||
// database to flat files for saving space on live database.
|
||||
type chainFreezer struct {
|
||||
threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
||||
|
||||
*Freezer
|
||||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
|
||||
|
||||
freezeEnv atomic.Value
|
||||
|
||||
multiDatabase bool
|
||||
}
|
||||
|
||||
// newChainFreezer initializes the freezer for ancient chain data.
|
||||
func newChainFreezer(datadir string, namespace string, readonly bool, offset uint64) (*chainFreezer, error) {
|
||||
func newChainFreezer(datadir string, namespace string, readonly bool, offset uint64, multiDatabase bool) (*chainFreezer, error) {
|
||||
freezer, err := NewChainFreezer(datadir, namespace, readonly, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &chainFreezer{
|
||||
cf := chainFreezer{
|
||||
Freezer: freezer,
|
||||
quit: make(chan struct{}),
|
||||
trigger: make(chan chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
cf.threshold.Store(params.FullImmutabilityThreshold)
|
||||
return &cf, nil
|
||||
}
|
||||
|
||||
// Close closes the chain freezer instance and terminates the background thread.
|
||||
@ -185,29 +191,101 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
||||
continue
|
||||
}
|
||||
|
||||
threshold, err := f.freezeThreshold(nfdb)
|
||||
if err != nil {
|
||||
backoff = true
|
||||
log.Debug("Current full block not old enough to freeze", "err", err)
|
||||
continue
|
||||
}
|
||||
frozen := f.frozen.Load()
|
||||
var (
|
||||
frozen uint64
|
||||
threshold uint64
|
||||
first uint64 // the first block to freeze
|
||||
last uint64 // the last block to freeze
|
||||
|
||||
// Short circuit if the blocks below threshold are already frozen.
|
||||
if frozen != 0 && frozen-1 >= threshold {
|
||||
backoff = true
|
||||
log.Debug("Ancient blocks frozen already", "threshold", threshold, "frozen", frozen)
|
||||
continue
|
||||
hash common.Hash
|
||||
number *uint64
|
||||
head *types.Header
|
||||
)
|
||||
|
||||
// use finalized block as the chain freeze indicator was used for multiDatabase feature, if multiDatabase is false, keep 9W blocks in db
|
||||
if f.multiDatabase {
|
||||
threshold, err = f.freezeThreshold(nfdb)
|
||||
if err != nil {
|
||||
backoff = true
|
||||
log.Debug("Current full block not old enough to freeze", "err", err)
|
||||
continue
|
||||
}
|
||||
frozen = f.frozen.Load()
|
||||
|
||||
// Short circuit if the blocks below threshold are already frozen.
|
||||
if frozen != 0 && frozen-1 >= threshold {
|
||||
backoff = true
|
||||
log.Debug("Ancient blocks frozen already", "threshold", threshold, "frozen", frozen)
|
||||
continue
|
||||
}
|
||||
|
||||
hash = ReadHeadBlockHash(nfdb)
|
||||
if hash == (common.Hash{}) {
|
||||
log.Debug("Current full block hash unavailable") // new chain, empty database
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
number = ReadHeaderNumber(nfdb, hash)
|
||||
if number == nil {
|
||||
log.Error("Current full block number unavailable", "hash", hash)
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
head = ReadHeader(nfdb, hash, *number)
|
||||
if head == nil {
|
||||
log.Error("Current full block unavailable", "number", *number, "hash", hash)
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
|
||||
first = frozen
|
||||
last = threshold
|
||||
if last-first+1 > freezerBatchLimit {
|
||||
last = freezerBatchLimit + first - 1
|
||||
}
|
||||
} else {
|
||||
// Retrieve the freezing threshold.
|
||||
hash = ReadHeadBlockHash(nfdb)
|
||||
if hash == (common.Hash{}) {
|
||||
log.Debug("Current full block hash unavailable") // new chain, empty database
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
number = ReadHeaderNumber(nfdb, hash)
|
||||
threshold = f.threshold.Load()
|
||||
frozen = f.frozen.Load()
|
||||
switch {
|
||||
case number == nil:
|
||||
log.Error("Current full block number unavailable", "hash", hash)
|
||||
backoff = true
|
||||
continue
|
||||
|
||||
case *number < threshold:
|
||||
log.Debug("Current full block not old enough to freeze", "number", *number, "hash", hash, "delay", threshold)
|
||||
backoff = true
|
||||
continue
|
||||
|
||||
case *number-threshold <= frozen:
|
||||
log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", frozen)
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
head = ReadHeader(nfdb, hash, *number)
|
||||
if head == nil {
|
||||
log.Error("Current full block unavailable", "number", *number, "hash", hash)
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
first, _ = f.Ancients()
|
||||
last = *number - threshold
|
||||
if last-first > freezerBatchLimit {
|
||||
last = first + freezerBatchLimit
|
||||
}
|
||||
}
|
||||
// Seems we have data ready to be frozen, process in usable batches
|
||||
var (
|
||||
start = time.Now()
|
||||
first = frozen // the first block to freeze
|
||||
last = threshold // the last block to freeze
|
||||
)
|
||||
if last-first+1 > freezerBatchLimit {
|
||||
last = freezerBatchLimit + first - 1
|
||||
}
|
||||
|
||||
ancients, err := f.freezeRangeWithBlobs(nfdb, first, last)
|
||||
if err != nil {
|
||||
@ -295,24 +373,6 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
||||
log.Debug("Deep froze chain segment", context...)
|
||||
|
||||
env, _ := f.freezeEnv.Load().(*ethdb.FreezerEnv)
|
||||
hash := ReadHeadBlockHash(nfdb)
|
||||
if hash == (common.Hash{}) {
|
||||
log.Debug("Current full block hash unavailable") // new chain, empty database
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
number := ReadHeaderNumber(nfdb, hash)
|
||||
if number == nil {
|
||||
log.Error("Current full block number unavailable", "hash", hash)
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
head := ReadHeader(nfdb, hash, *number)
|
||||
if head == nil {
|
||||
log.Error("Current full block unavailable", "number", *number, "hash", hash)
|
||||
backoff = true
|
||||
continue
|
||||
}
|
||||
// try prune blob data after cancun fork
|
||||
if isCancun(env, head.Number, head.Time) {
|
||||
f.tryPruneBlobAncientTable(env, *number)
|
||||
|
@ -61,7 +61,7 @@ func (frdb *freezerdb) BlockStoreReader() ethdb.Reader {
|
||||
}
|
||||
|
||||
func (frdb *freezerdb) BlockStoreWriter() ethdb.Writer {
|
||||
//TODO implement me
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@ -138,13 +138,22 @@ func (frdb *freezerdb) SetBlockStore(block ethdb.Database) {
|
||||
frdb.blockStore = block
|
||||
}
|
||||
|
||||
func (frdb *freezerdb) HasSeparateBlockStore() bool {
|
||||
return frdb.blockStore != nil
|
||||
}
|
||||
|
||||
// Freeze is a helper method used for external testing to trigger and block until
|
||||
// a freeze cycle completes, without having to sleep for a minute to trigger the
|
||||
// automatic background run.
|
||||
func (frdb *freezerdb) Freeze() error {
|
||||
func (frdb *freezerdb) Freeze(threshold uint64) error {
|
||||
if frdb.AncientStore.(*chainFreezer).readonly {
|
||||
return errReadOnly
|
||||
}
|
||||
// Set the freezer threshold to a temporary value
|
||||
defer func(old uint64) {
|
||||
frdb.AncientStore.(*chainFreezer).threshold.Store(old)
|
||||
}(frdb.AncientStore.(*chainFreezer).threshold.Load())
|
||||
frdb.AncientStore.(*chainFreezer).threshold.Store(threshold)
|
||||
// Trigger a freeze cycle and block until it's done
|
||||
trigger := make(chan struct{}, 1)
|
||||
frdb.AncientStore.(*chainFreezer).trigger <- trigger
|
||||
@ -184,7 +193,7 @@ func (db *nofreezedb) Ancients() (uint64, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
|
||||
// Ancients returns an error as we don't have a backing chain freezer.
|
||||
// ItemAmountInAncient returns an error as we don't have a backing chain freezer.
|
||||
func (db *nofreezedb) ItemAmountInAncient() (uint64, error) {
|
||||
return 0, errNotSupported
|
||||
}
|
||||
@ -263,6 +272,10 @@ func (db *nofreezedb) SetBlockStore(block ethdb.Database) {
|
||||
db.blockStore = block
|
||||
}
|
||||
|
||||
func (db *nofreezedb) HasSeparateBlockStore() bool {
|
||||
return db.blockStore != nil
|
||||
}
|
||||
|
||||
func (db *nofreezedb) BlockStoreReader() ethdb.Reader {
|
||||
if db.blockStore != nil {
|
||||
return db.blockStore
|
||||
@ -318,6 +331,110 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
||||
return &nofreezedb{KeyValueStore: db}
|
||||
}
|
||||
|
||||
type emptyfreezedb struct {
|
||||
ethdb.KeyValueStore
|
||||
}
|
||||
|
||||
// HasAncient returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) HasAncient(kind string, number uint64) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Ancient returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// AncientRange returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ancients returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) Ancients() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ItemAmountInAncient returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) ItemAmountInAncient() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Tail returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) Tail() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// AncientSize returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) AncientSize(kind string) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ModifyAncients returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// TruncateHead returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) TruncateHead(items uint64) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// TruncateTail returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) TruncateTail(items uint64) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// TruncateTableTail returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) TruncateTableTail(kind string, tail uint64) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ResetTable returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) ResetTable(kind string, startAt uint64, onlyEmpty bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *emptyfreezedb) DiffStore() ethdb.KeyValueStore { return db }
|
||||
func (db *emptyfreezedb) SetDiffStore(diff ethdb.KeyValueStore) {}
|
||||
func (db *emptyfreezedb) StateStore() ethdb.Database { return db }
|
||||
func (db *emptyfreezedb) SetStateStore(state ethdb.Database) {}
|
||||
func (db *emptyfreezedb) StateStoreReader() ethdb.Reader { return db }
|
||||
func (db *emptyfreezedb) BlockStore() ethdb.Database { return db }
|
||||
func (db *emptyfreezedb) SetBlockStore(block ethdb.Database) {}
|
||||
func (db *emptyfreezedb) HasSeparateBlockStore() bool { return false }
|
||||
func (db *emptyfreezedb) BlockStoreReader() ethdb.Reader { return db }
|
||||
func (db *emptyfreezedb) BlockStoreWriter() ethdb.Writer { return db }
|
||||
func (db *emptyfreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
|
||||
return nil
|
||||
}
|
||||
func (db *emptyfreezedb) AncientOffSet() uint64 { return 0 }
|
||||
|
||||
// MigrateTable returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AncientDatadir returns nil for pruned db that we don't have a backing chain freezer.
|
||||
func (db *emptyfreezedb) AncientDatadir() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (db *emptyfreezedb) SetupFreezerEnv(env *ethdb.FreezerEnv) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewEmptyFreezeDB is used for CLI such as `geth db inspect` in pruned db that we don't
|
||||
// have a backing chain freezer.
|
||||
// WARNING: it must be only used in the above case.
|
||||
func NewEmptyFreezeDB(db ethdb.KeyValueStore) ethdb.Database {
|
||||
return &emptyfreezedb{KeyValueStore: db}
|
||||
}
|
||||
|
||||
// NewFreezerDb only create a freezer without statedb.
|
||||
func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, newOffSet uint64) (*Freezer, error) {
|
||||
// Create the idle freezer instance, this operation should be atomic to avoid mismatch between offset and acientDB.
|
||||
@ -358,7 +475,7 @@ func resolveChainFreezerDir(ancient string) string {
|
||||
// value data store with a freezer moving immutable chain segments into cold
|
||||
// storage. The passed ancient indicates the path of root ancient directory
|
||||
// where the chain freezer can be opened.
|
||||
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) {
|
||||
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData, multiDatabase bool) (ethdb.Database, error) {
|
||||
var offset uint64
|
||||
// The offset of ancientDB should be handled differently in different scenarios.
|
||||
if isLastOffset {
|
||||
@ -367,6 +484,12 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
||||
offset = ReadOffSetOfCurrentAncientFreezer(db)
|
||||
}
|
||||
|
||||
// This case is used for someone who wants to execute geth db inspect CLI in a pruned db
|
||||
if !disableFreeze && readonly && ReadAncientType(db) == PruneFreezerType {
|
||||
log.Warn("Disk db is pruned, using an empty freezer db for CLI")
|
||||
return NewEmptyFreezeDB(db), nil
|
||||
}
|
||||
|
||||
if pruneAncientData && !disableFreeze && !readonly {
|
||||
frdb, err := newPrunedFreezer(resolveChainFreezerDir(ancient), db, offset)
|
||||
if err != nil {
|
||||
@ -394,7 +517,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
|
||||
}
|
||||
|
||||
// Create the idle freezer instance
|
||||
frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, offset)
|
||||
frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, offset, multiDatabase)
|
||||
if err != nil {
|
||||
printChainMetadata(db)
|
||||
return nil, err
|
||||
@ -575,6 +698,8 @@ type OpenOptions struct {
|
||||
// Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
|
||||
// a crash is not important. This option should typically be used in tests.
|
||||
Ephemeral bool
|
||||
|
||||
MultiDataBase bool
|
||||
}
|
||||
|
||||
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
|
||||
@ -619,13 +744,13 @@ func Open(o OpenOptions) (ethdb.Database, error) {
|
||||
}
|
||||
if ReadAncientType(kvdb) == PruneFreezerType {
|
||||
if !o.PruneAncientData {
|
||||
log.Warn("Disk db is pruned")
|
||||
log.Warn("NOTICE: You're opening a pruned disk db!")
|
||||
}
|
||||
}
|
||||
if len(o.AncientsDirectory) == 0 {
|
||||
return kvdb, nil
|
||||
}
|
||||
frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, o.DisableFreeze, o.IsLastOffset, o.PruneAncientData)
|
||||
frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, o.DisableFreeze, o.IsLastOffset, o.PruneAncientData, o.MultiDataBase)
|
||||
if err != nil {
|
||||
kvdb.Close()
|
||||
return nil, err
|
||||
@ -769,7 +894,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||||
trieIter = db.StateStore().NewIterator(keyPrefix, nil)
|
||||
defer trieIter.Release()
|
||||
}
|
||||
if db.BlockStore() != db {
|
||||
if db.HasSeparateBlockStore() {
|
||||
blockIter = db.BlockStore().NewIterator(keyPrefix, nil)
|
||||
defer blockIter.Release()
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
|
||||
// - if maxBytes is not specified, 'count' items will be returned if they are present.
|
||||
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
|
||||
if table := f.tables[kind]; table != nil {
|
||||
return table.RetrieveItems(start, count, maxBytes)
|
||||
return table.RetrieveItems(start-f.offset, count, maxBytes)
|
||||
}
|
||||
return nil, errUnknownTable
|
||||
}
|
||||
@ -252,7 +252,7 @@ func (f *Freezer) Ancients() (uint64, error) {
|
||||
func (f *Freezer) TableAncients(kind string) (uint64, error) {
|
||||
f.writeLock.RLock()
|
||||
defer f.writeLock.RUnlock()
|
||||
return f.tables[kind].items.Load(), nil
|
||||
return f.tables[kind].items.Load() + f.offset, nil
|
||||
}
|
||||
|
||||
// ItemAmountInAncient returns the actual length of current ancientDB.
|
||||
|
@ -43,6 +43,10 @@ func (t *table) SetBlockStore(block ethdb.Database) {
|
||||
panic("not implement")
|
||||
}
|
||||
|
||||
func (t *table) HasSeparateBlockStore() bool {
|
||||
panic("not implement")
|
||||
}
|
||||
|
||||
// NewTable returns a database object that prefixes all keys with a given string.
|
||||
func NewTable(db ethdb.Database, prefix string) ethdb.Database {
|
||||
return &table{
|
||||
|
@ -382,7 +382,7 @@ func (p *BlockPruner) backUpOldDb(name string, cache, handles int, namespace str
|
||||
log.Info("chainDB opened successfully")
|
||||
|
||||
// Get the number of items in old ancient db.
|
||||
itemsOfAncient, err := chainDb.ItemAmountInAncient()
|
||||
itemsOfAncient, err := chainDb.BlockStore().ItemAmountInAncient()
|
||||
log.Info("the number of items in ancientDB is ", "itemsOfAncient", itemsOfAncient)
|
||||
|
||||
// If we can't access the freezer or it's empty, abort.
|
||||
|
@ -212,7 +212,7 @@ func TestTxIndexer(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
frdir := t.TempDir()
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
||||
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
||||
|
||||
// Index the initial blocks from ancient store
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -40,6 +42,12 @@ func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b.RawBid.UnRevertible) > len(txs) {
|
||||
return nil, fmt.Errorf("expect NonRevertible no more than %d", len(txs))
|
||||
}
|
||||
unRevertibleHashes := mapset.NewThreadUnsafeSetWithSize[common.Hash](len(b.RawBid.UnRevertible))
|
||||
unRevertibleHashes.Append(b.RawBid.UnRevertible...)
|
||||
|
||||
if len(b.PayBidTx) != 0 {
|
||||
var payBidTx = new(Transaction)
|
||||
err = payBidTx.UnmarshalBinary(b.PayBidTx)
|
||||
@ -51,14 +59,15 @@ func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
|
||||
}
|
||||
|
||||
bid := &Bid{
|
||||
Builder: builder,
|
||||
BlockNumber: b.RawBid.BlockNumber,
|
||||
ParentHash: b.RawBid.ParentHash,
|
||||
Txs: txs,
|
||||
GasUsed: b.RawBid.GasUsed + b.PayBidTxGasUsed,
|
||||
GasFee: b.RawBid.GasFee,
|
||||
BuilderFee: b.RawBid.BuilderFee,
|
||||
rawBid: *b.RawBid,
|
||||
Builder: builder,
|
||||
BlockNumber: b.RawBid.BlockNumber,
|
||||
ParentHash: b.RawBid.ParentHash,
|
||||
Txs: txs,
|
||||
UnRevertible: unRevertibleHashes,
|
||||
GasUsed: b.RawBid.GasUsed + b.PayBidTxGasUsed,
|
||||
GasFee: b.RawBid.GasFee,
|
||||
BuilderFee: b.RawBid.BuilderFee,
|
||||
rawBid: *b.RawBid,
|
||||
}
|
||||
|
||||
if bid.BuilderFee == nil {
|
||||
@ -70,12 +79,13 @@ func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
|
||||
|
||||
// RawBid represents a raw bid from builder directly.
|
||||
type RawBid struct {
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
Txs []hexutil.Bytes `json:"txs"`
|
||||
GasUsed uint64 `json:"gasUsed"`
|
||||
GasFee *big.Int `json:"gasFee"`
|
||||
BuilderFee *big.Int `json:"builderFee"`
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
Txs []hexutil.Bytes `json:"txs"`
|
||||
UnRevertible []common.Hash `json:"unRevertible"`
|
||||
GasUsed uint64 `json:"gasUsed"`
|
||||
GasFee *big.Int `json:"gasFee"`
|
||||
BuilderFee *big.Int `json:"builderFee"`
|
||||
|
||||
hash atomic.Value
|
||||
}
|
||||
@ -154,13 +164,14 @@ func (b *RawBid) Hash() common.Hash {
|
||||
|
||||
// Bid represents a bid.
|
||||
type Bid struct {
|
||||
Builder common.Address
|
||||
BlockNumber uint64
|
||||
ParentHash common.Hash
|
||||
Txs Transactions
|
||||
GasUsed uint64
|
||||
GasFee *big.Int
|
||||
BuilderFee *big.Int
|
||||
Builder common.Address
|
||||
BlockNumber uint64
|
||||
ParentHash common.Hash
|
||||
Txs Transactions
|
||||
UnRevertible mapset.Set[common.Hash]
|
||||
GasUsed uint64
|
||||
GasFee *big.Int
|
||||
BuilderFee *big.Int
|
||||
|
||||
rawBid RawBid
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -142,31 +141,3 @@ func (api *AdminAPI) ImportChain(file string) (bool, error) {
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// MevRunning returns true if the validator accept bids from builder
|
||||
func (api *AdminAPI) MevRunning() bool {
|
||||
return api.eth.APIBackend.MevRunning()
|
||||
}
|
||||
|
||||
// StartMev starts mev. It notifies the miner to start to receive bids.
|
||||
func (api *AdminAPI) StartMev() {
|
||||
api.eth.APIBackend.StartMev()
|
||||
}
|
||||
|
||||
// StopMev stops mev. It notifies the miner to stop receiving bids from this moment,
|
||||
// but the bids before this moment would still been taken into consideration by mev.
|
||||
func (api *AdminAPI) StopMev() {
|
||||
api.eth.APIBackend.StopMev()
|
||||
}
|
||||
|
||||
// AddBuilder adds a builder to the bid simulator.
|
||||
// url is the endpoint of the builder, for example, "https://mev-builder.amazonaws.com",
|
||||
// if validator is equipped with sentry, ignore the url.
|
||||
func (api *AdminAPI) AddBuilder(builder common.Address, url string) error {
|
||||
return api.eth.APIBackend.AddBuilder(builder, url)
|
||||
}
|
||||
|
||||
// RemoveBuilder removes a builder from the bid simulator.
|
||||
func (api *AdminAPI) RemoveBuilder(builder common.Address) error {
|
||||
return api.eth.APIBackend.RemoveBuilder(builder)
|
||||
}
|
||||
|
@ -89,3 +89,31 @@ func (api *MinerAPI) SetEtherbase(etherbase common.Address) bool {
|
||||
func (api *MinerAPI) SetRecommitInterval(interval int) {
|
||||
api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond)
|
||||
}
|
||||
|
||||
// MevRunning returns true if the validator accept bids from builder
|
||||
func (api *MinerAPI) MevRunning() bool {
|
||||
return api.e.APIBackend.MevRunning()
|
||||
}
|
||||
|
||||
// StartMev starts mev. It notifies the miner to start to receive bids.
|
||||
func (api *MinerAPI) StartMev() {
|
||||
api.e.APIBackend.StartMev()
|
||||
}
|
||||
|
||||
// StopMev stops mev. It notifies the miner to stop receiving bids from this moment,
|
||||
// but the bids before this moment would still been taken into consideration by mev.
|
||||
func (api *MinerAPI) StopMev() {
|
||||
api.e.APIBackend.StopMev()
|
||||
}
|
||||
|
||||
// AddBuilder adds a builder to the bid simulator.
|
||||
// url is the endpoint of the builder, for example, "https://mev-builder.amazonaws.com",
|
||||
// if validator is equipped with sentry, ignore the url.
|
||||
func (api *MinerAPI) AddBuilder(builder common.Address, url string) error {
|
||||
return api.e.APIBackend.AddBuilder(builder, url)
|
||||
}
|
||||
|
||||
// RemoveBuilder removes a builder from the bid simulator.
|
||||
func (api *MinerAPI) RemoveBuilder(builder common.Address) error {
|
||||
return api.e.APIBackend.RemoveBuilder(builder)
|
||||
}
|
||||
|
@ -187,6 +187,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
chainConfig.HaberTime = config.OverrideHaber
|
||||
overrides.OverrideHaber = config.OverrideHaber
|
||||
}
|
||||
if config.OverrideBohr != nil {
|
||||
chainConfig.BohrTime = config.OverrideBohr
|
||||
overrides.OverrideBohr = config.OverrideBohr
|
||||
}
|
||||
if config.OverrideVerkle != nil {
|
||||
chainConfig.VerkleTime = config.OverrideVerkle
|
||||
overrides.OverrideVerkle = config.OverrideVerkle
|
||||
|
@ -209,6 +209,9 @@ type BlockChain interface {
|
||||
|
||||
// UpdateChasingHead update remote best chain head, used by DA check now.
|
||||
UpdateChasingHead(head *types.Header)
|
||||
|
||||
// AncientTail retrieves the tail the ancients blocks
|
||||
AncientTail() (uint64, error)
|
||||
}
|
||||
|
||||
type DownloadOption func(downloader *Downloader) *Downloader
|
||||
@ -797,6 +800,11 @@ func (d *Downloader) findAncestor(p *peerConnection, localHeight uint64, remoteH
|
||||
// We're above the max reorg threshold, find the earliest fork point
|
||||
floor = int64(localHeight - maxForkAncestry)
|
||||
}
|
||||
// if we have pruned too much history, reset the floor
|
||||
if tail, err := d.blockchain.AncientTail(); err == nil && tail > uint64(floor) {
|
||||
floor = int64(tail)
|
||||
}
|
||||
|
||||
// If we're doing a light sync, ensure the floor doesn't go below the CHT, as
|
||||
// all headers before that point will be missing.
|
||||
if mode == LightSync {
|
||||
|
@ -60,7 +60,7 @@ func newTester(t *testing.T) *downloadTester {
|
||||
// newTester creates a new downloader test mocker.
|
||||
func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
|
||||
freezer := t.TempDir()
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false)
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -194,6 +194,9 @@ type Config struct {
|
||||
// OverrideHaber (TODO: remove after the fork)
|
||||
OverrideHaber *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverrideBohr (TODO: remove after the fork)
|
||||
OverrideBohr *uint64 `toml:",omitempty"`
|
||||
|
||||
// OverrideVerkle (TODO: remove after the fork)
|
||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||
|
||||
|
@ -72,6 +72,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
RPCTxFeeCap float64
|
||||
OverrideCancun *uint64 `toml:",omitempty"`
|
||||
OverrideHaber *uint64 `toml:",omitempty"`
|
||||
OverrideBohr *uint64 `toml:",omitempty"`
|
||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||
BlobExtraReserve uint64
|
||||
}
|
||||
@ -131,6 +132,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
||||
enc.OverrideCancun = c.OverrideCancun
|
||||
enc.OverrideHaber = c.OverrideHaber
|
||||
enc.OverrideBohr = c.OverrideBohr
|
||||
enc.OverrideVerkle = c.OverrideVerkle
|
||||
enc.BlobExtraReserve = c.BlobExtraReserve
|
||||
return &enc, nil
|
||||
@ -194,6 +196,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
RPCTxFeeCap *float64
|
||||
OverrideCancun *uint64 `toml:",omitempty"`
|
||||
OverrideHaber *uint64 `toml:",omitempty"`
|
||||
OverrideBohr *uint64 `toml:",omitempty"`
|
||||
OverrideVerkle *uint64 `toml:",omitempty"`
|
||||
BlobExtraReserve *uint64
|
||||
}
|
||||
@ -366,6 +369,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.OverrideHaber != nil {
|
||||
c.OverrideHaber = dec.OverrideHaber
|
||||
}
|
||||
if dec.OverrideBohr != nil {
|
||||
c.OverrideBohr = dec.OverrideBohr
|
||||
}
|
||||
if dec.OverrideVerkle != nil {
|
||||
c.OverrideVerkle = dec.OverrideVerkle
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ func (f *fetcherTester) chainFinalizedHeight() uint64 {
|
||||
return f.blocks[f.hashes[len(f.hashes)-3]].NumberU64()
|
||||
}
|
||||
|
||||
// insertChain injects a new headers into the simulated chain.
|
||||
// insertHeaders injects a new headers into the simulated chain.
|
||||
func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
@ -320,26 +320,22 @@ func newHandler(config *handlerConfig) (*handler, error) {
|
||||
}
|
||||
|
||||
broadcastBlockWithCheck := func(block *types.Block, propagate bool) {
|
||||
// All the block fetcher activities should be disabled
|
||||
// after the transition. Print the warning log.
|
||||
if h.merger.PoSFinalized() {
|
||||
log.Warn("Unexpected validation activity", "hash", block.Hash(), "number", block.Number())
|
||||
return
|
||||
}
|
||||
// Reject all the PoS style headers in the first place. No matter
|
||||
// the chain has finished the transition or not, the PoS headers
|
||||
// should only come from the trusted consensus layer instead of
|
||||
// p2p network.
|
||||
if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok {
|
||||
if beacon.IsPoSHeader(block.Header()) {
|
||||
log.Warn("unexpected post-merge header")
|
||||
return
|
||||
}
|
||||
}
|
||||
if propagate {
|
||||
if err := core.IsDataAvailable(h.chain, block); err != nil {
|
||||
log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err)
|
||||
return
|
||||
checkErrs := make(chan error, 2)
|
||||
|
||||
go func() {
|
||||
checkErrs <- core.ValidateListsInBody(block)
|
||||
}()
|
||||
go func() {
|
||||
checkErrs <- core.IsDataAvailable(h.chain, block)
|
||||
}()
|
||||
|
||||
for i := 0; i < cap(checkErrs); i++ {
|
||||
err := <-checkErrs
|
||||
if err != nil {
|
||||
log.Error("Propagating invalid block", "number", block.Number(), "hash", block.Hash(), "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
h.BroadcastBlock(block, propagate)
|
||||
|
@ -258,7 +258,7 @@ func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint
|
||||
func newTestParliaHandlerAfterCancun(t *testing.T, config *params.ChainConfig, mode downloader.SyncMode, preCancunBlks, postCancunBlks uint64) *testHandler {
|
||||
// Have N headers in the freezer
|
||||
frdir := t.TempDir()
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false)
|
||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database with ancient backend")
|
||||
}
|
||||
|
@ -183,6 +183,7 @@ type StateStoreReader interface {
|
||||
type BlockStore interface {
|
||||
BlockStore() Database
|
||||
SetBlockStore(block Database)
|
||||
HasSeparateBlockStore() bool
|
||||
}
|
||||
|
||||
type BlockStoreReader interface {
|
||||
|
@ -44,6 +44,10 @@ func (db *Database) BlockStore() ethdb.Database {
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *Database) HasSeparateBlockStore() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (db *Database) SetBlockStore(block ethdb.Database) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
@ -659,6 +659,30 @@ web3._extend({
|
||||
name: 'stop',
|
||||
call: 'miner_stop'
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'mevRunning',
|
||||
call: 'miner_mevRunning'
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'startMev',
|
||||
call: 'miner_startMev'
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'stopMev',
|
||||
call: 'miner_stopMev'
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'addBuilder',
|
||||
call: 'miner_addBuilder',
|
||||
params: 2,
|
||||
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'removeBuilder',
|
||||
call: 'miner_removeBuilder',
|
||||
params: 1,
|
||||
inputFormatter: [web3._extend.formatters.inputAddressFormatter]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'setEtherbase',
|
||||
call: 'miner_setEtherbase',
|
||||
|
@ -29,11 +29,6 @@ import (
|
||||
const (
|
||||
// maxBidPerBuilderPerBlock is the max bid number per builder
|
||||
maxBidPerBuilderPerBlock = 3
|
||||
|
||||
// leftOverTimeRate is the rate of left over time to simulate a bid
|
||||
leftOverTimeRate = 11
|
||||
// leftOverTimeScale is the scale of left over time to simulate a bid
|
||||
leftOverTimeScale = 10
|
||||
)
|
||||
|
||||
var (
|
||||
@ -78,6 +73,7 @@ type simBidReq struct {
|
||||
type bidSimulator struct {
|
||||
config *MevConfig
|
||||
delayLeftOver time.Duration
|
||||
minGasPrice *big.Int
|
||||
chain *core.BlockChain
|
||||
chainConfig *params.ChainConfig
|
||||
engine consensus.Engine
|
||||
@ -114,6 +110,7 @@ type bidSimulator struct {
|
||||
func newBidSimulator(
|
||||
config *MevConfig,
|
||||
delayLeftOver time.Duration,
|
||||
minGasPrice *big.Int,
|
||||
chain *core.BlockChain,
|
||||
chainConfig *params.ChainConfig,
|
||||
engine consensus.Engine,
|
||||
@ -122,6 +119,7 @@ func newBidSimulator(
|
||||
b := &bidSimulator{
|
||||
config: config,
|
||||
delayLeftOver: delayLeftOver,
|
||||
minGasPrice: minGasPrice,
|
||||
chain: chain,
|
||||
chainConfig: chainConfig,
|
||||
engine: engine,
|
||||
@ -315,18 +313,6 @@ func (b *bidSimulator) newBidLoop() {
|
||||
|
||||
// commit aborts in-flight bid execution with given signal and resubmits a new one.
|
||||
commit := func(reason int32, bidRuntime *BidRuntime) {
|
||||
// if the left time is not enough to do simulation, return
|
||||
var simDuration time.Duration
|
||||
if lastBid := b.GetBestBid(bidRuntime.bid.ParentHash); lastBid != nil && lastBid.duration != 0 {
|
||||
simDuration = lastBid.duration
|
||||
}
|
||||
|
||||
if time.Until(b.bidMustBefore(bidRuntime.bid.ParentHash)) <= simDuration*leftOverTimeRate/leftOverTimeScale {
|
||||
log.Debug("BidSimulator: abort commit, not enough time to simulate",
|
||||
"builder", bidRuntime.bid.Builder, "bidHash", bidRuntime.bid.Hash().Hex())
|
||||
return
|
||||
}
|
||||
|
||||
if interruptCh != nil {
|
||||
// each commit work will have its own interruptCh to stop work with a reason
|
||||
interruptCh <- reason
|
||||
@ -367,6 +353,7 @@ func (b *bidSimulator) newBidLoop() {
|
||||
expectedValidatorReward: expectedValidatorReward,
|
||||
packedBlockReward: big.NewInt(0),
|
||||
packedValidatorReward: big.NewInt(0),
|
||||
finished: make(chan struct{}),
|
||||
}
|
||||
|
||||
simulatingBid := b.GetSimulatingBid(newBid.ParentHash)
|
||||
@ -407,11 +394,6 @@ func (b *bidSimulator) newBidLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bidSimulator) bidMustBefore(parentHash common.Hash) time.Time {
|
||||
parentHeader := b.chain.GetHeaderByHash(parentHash)
|
||||
return bidutil.BidMustBefore(parentHeader, b.chainConfig.Parlia.Period, b.delayLeftOver)
|
||||
}
|
||||
|
||||
func (b *bidSimulator) bidBetterBefore(parentHash common.Hash) time.Time {
|
||||
parentHeader := b.chain.GetHeaderByHash(parentHash)
|
||||
return bidutil.BidBetterBefore(parentHeader, b.chainConfig.Parlia.Period, b.delayLeftOver, b.config.BidSimulationLeftOver)
|
||||
@ -527,7 +509,6 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
|
||||
// ensure simulation exited then start next simulation
|
||||
b.SetSimulatingBid(parentHash, bidRuntime)
|
||||
start := time.Now()
|
||||
|
||||
defer func(simStart time.Time) {
|
||||
logCtx := []any{
|
||||
@ -553,10 +534,11 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
}
|
||||
|
||||
b.RemoveSimulatingBid(parentHash)
|
||||
bidSimTimer.UpdateSince(start)
|
||||
close(bidRuntime.finished)
|
||||
|
||||
if success {
|
||||
bidRuntime.duration = time.Since(simStart)
|
||||
bidSimTimer.UpdateSince(simStart)
|
||||
|
||||
// only recommit self bid when newBidCh is empty
|
||||
if len(b.newBidCh) > 0 {
|
||||
@ -580,6 +562,14 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
return
|
||||
}
|
||||
|
||||
// if the left time is not enough to do simulation, return
|
||||
delay := b.engine.Delay(b.chain, bidRuntime.env.header, &b.delayLeftOver)
|
||||
if delay == nil || *delay <= 0 {
|
||||
log.Info("BidSimulator: abort commit, not enough time to simulate",
|
||||
"builder", bidRuntime.bid.Builder, "bidHash", bidRuntime.bid.Hash().Hex())
|
||||
return
|
||||
}
|
||||
|
||||
gasLimit := bidRuntime.env.header.GasLimit
|
||||
if bidRuntime.env.gasPool == nil {
|
||||
bidRuntime.env.gasPool = new(core.GasPool).AddGas(gasLimit)
|
||||
@ -592,6 +582,7 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
return
|
||||
}
|
||||
|
||||
// commit transactions in bid
|
||||
for _, tx := range bidRuntime.bid.Txs {
|
||||
select {
|
||||
case <-interruptCh:
|
||||
@ -609,7 +600,7 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
break
|
||||
}
|
||||
|
||||
err = bidRuntime.commitTransaction(b.chain, b.chainConfig, tx)
|
||||
err = bidRuntime.commitTransaction(b.chain, b.chainConfig, tx, bidRuntime.bid.UnRevertible.Contains(tx.Hash()))
|
||||
if err != nil {
|
||||
log.Error("BidSimulator: failed to commit tx", "bidHash", bidRuntime.bid.Hash(), "tx", tx.Hash(), "err", err)
|
||||
err = fmt.Errorf("invalid tx in bid, %v", err)
|
||||
@ -617,26 +608,41 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
}
|
||||
}
|
||||
|
||||
bidRuntime.packReward(b.config.ValidatorCommission)
|
||||
|
||||
// return if bid is invalid, reportIssue issue to mev-sentry/builder if simulation is fully done
|
||||
if !bidRuntime.validReward() {
|
||||
err = errors.New("reward does not achieve the expectation")
|
||||
return
|
||||
// check if bid reward is valid
|
||||
{
|
||||
bidRuntime.packReward(b.config.ValidatorCommission)
|
||||
if !bidRuntime.validReward() {
|
||||
err = errors.New("reward does not achieve the expectation")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// fill transactions from mempool
|
||||
// check if bid gas price is lower than min gas price
|
||||
{
|
||||
bidGasUsed := uint64(0)
|
||||
bidGasFee := bidRuntime.env.state.GetBalance(consensus.SystemAddress)
|
||||
|
||||
for _, receipt := range bidRuntime.env.receipts {
|
||||
bidGasUsed += receipt.GasUsed
|
||||
}
|
||||
|
||||
bidGasPrice := new(big.Int).Div(bidGasFee.ToBig(), new(big.Int).SetUint64(bidGasUsed))
|
||||
if bidGasPrice.Cmp(b.minGasPrice) < 0 {
|
||||
err = errors.New("bid gas price is lower than min gas price")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// if enable greedy merge, fill bid env with transactions from mempool
|
||||
if b.config.GreedyMergeTx {
|
||||
delay := b.engine.Delay(b.chain, bidRuntime.env.header, &b.delayLeftOver)
|
||||
if delay != nil && *delay > 0 {
|
||||
stopTimer := time.NewTimer(*delay)
|
||||
|
||||
bidTxsSet := mapset.NewSet[common.Hash]()
|
||||
for _, tx := range bidRuntime.bid.Txs {
|
||||
bidTxsSet.Add(tx.Hash())
|
||||
}
|
||||
|
||||
fillErr := b.bidWorker.fillTransactions(interruptCh, bidRuntime.env, stopTimer, bidTxsSet)
|
||||
fillErr := b.bidWorker.fillTransactions(interruptCh, bidRuntime.env, nil, bidTxsSet)
|
||||
log.Trace("BidSimulator: greedy merge stopped", "block", bidRuntime.env.header.Number,
|
||||
"builder", bidRuntime.bid.Builder, "tx count", bidRuntime.env.tcount-bidTxLen+1, "err", fillErr)
|
||||
|
||||
@ -645,8 +651,9 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
|
||||
}
|
||||
}
|
||||
|
||||
// commit payBidTx at the end of the block
|
||||
bidRuntime.env.gasPool.AddGas(params.PayBidTxGasLimit)
|
||||
err = bidRuntime.commitTransaction(b.chain, b.chainConfig, payBidTx)
|
||||
err = bidRuntime.commitTransaction(b.chain, b.chainConfig, payBidTx, true)
|
||||
if err != nil {
|
||||
log.Error("BidSimulator: failed to commit tx", "builder", bidRuntime.bid.Builder,
|
||||
"bidHash", bidRuntime.bid.Hash(), "tx", payBidTx.Hash(), "err", err)
|
||||
@ -711,6 +718,7 @@ type BidRuntime struct {
|
||||
packedBlockReward *big.Int
|
||||
packedValidatorReward *big.Int
|
||||
|
||||
finished chan struct{}
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
@ -727,12 +735,10 @@ func (r *BidRuntime) packReward(validatorCommission uint64) {
|
||||
r.packedValidatorReward.Sub(r.packedValidatorReward, r.bid.BuilderFee)
|
||||
}
|
||||
|
||||
func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *params.ChainConfig, tx *types.Transaction) error {
|
||||
func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *params.ChainConfig, tx *types.Transaction, unRevertible bool) error {
|
||||
var (
|
||||
env = r.env
|
||||
snap = env.state.Snapshot()
|
||||
gp = env.gasPool.Gas()
|
||||
sc *types.BlobSidecar
|
||||
env = r.env
|
||||
sc *types.BlobSidecar
|
||||
)
|
||||
|
||||
// Start executing the transaction
|
||||
@ -755,9 +761,9 @@ func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *para
|
||||
receipt, err := core.ApplyTransaction(chainConfig, chain, &env.coinbase, env.gasPool, env.state, env.header, tx,
|
||||
&env.header.GasUsed, *chain.GetVMConfig(), core.NewReceiptBloomGenerator())
|
||||
if err != nil {
|
||||
env.state.RevertToSnapshot(snap)
|
||||
env.gasPool.SetGas(gp)
|
||||
return err
|
||||
} else if unRevertible && receipt.Status == types.ReceiptStatusFailed {
|
||||
return errors.New("no revertible transaction failed")
|
||||
}
|
||||
|
||||
if tx.Type() == types.BlobTxType {
|
||||
|
@ -102,7 +102,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even
|
||||
worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, false),
|
||||
}
|
||||
|
||||
miner.bidSimulator = newBidSimulator(&config.Mev, config.DelayLeftOver, eth.BlockChain(), chainConfig, engine, miner.worker)
|
||||
miner.bidSimulator = newBidSimulator(&config.Mev, config.DelayLeftOver, config.GasPrice, eth.BlockChain(), chainConfig, engine, miner.worker)
|
||||
miner.worker.setBestBidFetcher(miner.bidSimulator)
|
||||
|
||||
miner.wg.Add(1)
|
||||
|
@ -67,6 +67,9 @@ const (
|
||||
// the current 4 mining loops could have asynchronous risk of mining block with
|
||||
// save height, keep recently mined blocks to avoid double sign for safety,
|
||||
recentMinedCacheLimit = 20
|
||||
|
||||
// the default to wait for the mev miner to finish
|
||||
waitMEVMinerEndTimeLimit = 50 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
@ -171,6 +174,7 @@ type getWorkReq struct {
|
||||
|
||||
type bidFetcher interface {
|
||||
GetBestBid(parentHash common.Hash) *BidRuntime
|
||||
GetSimulatingBid(prevBlockHash common.Hash) *BidRuntime
|
||||
}
|
||||
|
||||
// worker is the main object which takes care of submitting new work to consensus engine
|
||||
@ -1336,6 +1340,15 @@ LOOP:
|
||||
// when in-turn, compare with remote work.
|
||||
from := bestWork.coinbase
|
||||
if w.bidFetcher != nil && bestWork.header.Difficulty.Cmp(diffInTurn) == 0 {
|
||||
if pendingBid := w.bidFetcher.GetSimulatingBid(bestWork.header.ParentHash); pendingBid != nil {
|
||||
waitBidTimer := time.NewTimer(waitMEVMinerEndTimeLimit)
|
||||
defer waitBidTimer.Stop()
|
||||
select {
|
||||
case <-waitBidTimer.C:
|
||||
case <-pendingBid.finished:
|
||||
}
|
||||
}
|
||||
|
||||
bestBid := w.bidFetcher.GetBestBid(bestWork.header.ParentHash)
|
||||
|
||||
if bestBid != nil {
|
||||
|
13
node/node.go
13
node/node.go
@ -773,12 +773,13 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
} else {
|
||||
db, err = rawdb.Open(rawdb.OpenOptions{
|
||||
Type: n.config.DBEngine,
|
||||
Directory: n.ResolvePath(name),
|
||||
Namespace: namespace,
|
||||
Cache: cache,
|
||||
Handles: handles,
|
||||
ReadOnly: readonly,
|
||||
Type: n.config.DBEngine,
|
||||
Directory: n.ResolvePath(name),
|
||||
Namespace: namespace,
|
||||
Cache: cache,
|
||||
Handles: handles,
|
||||
ReadOnly: readonly,
|
||||
MultiDataBase: n.CheckIfMultiDataBase(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -153,6 +153,7 @@ var (
|
||||
FeynmanFixTime: newUint64(1713419340), // 2024-04-18 05:49:00 AM UTC
|
||||
CancunTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC
|
||||
HaberTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC
|
||||
BohrTime: nil,
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@ -192,6 +193,7 @@ var (
|
||||
FeynmanFixTime: newUint64(1711342800), // 2024-03-25 5:00:00 AM UTC
|
||||
CancunTime: newUint64(1713330442), // 2024-04-17 05:07:22 AM UTC
|
||||
HaberTime: newUint64(1716962820), // 2024-05-29 06:07:00 AM UTC
|
||||
BohrTime: nil,
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@ -232,6 +234,7 @@ var (
|
||||
FeynmanFixTime: newUint64(0),
|
||||
CancunTime: newUint64(0),
|
||||
HaberTime: newUint64(0),
|
||||
BohrTime: newUint64(0),
|
||||
|
||||
Parlia: &ParliaConfig{
|
||||
Period: 3,
|
||||
@ -509,6 +512,7 @@ type ChainConfig struct {
|
||||
FeynmanFixTime *uint64 `json:"feynmanFixTime,omitempty"` // FeynmanFix switch time (nil = no fork, 0 = already activated)
|
||||
CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
|
||||
HaberTime *uint64 `json:"haberTime,omitempty"` // Haber switch time (nil = no fork, 0 = already on haber)
|
||||
BohrTime *uint64 `json:"bohrTime,omitempty"` // Bohr switch time (nil = no fork, 0 = already on bohr)
|
||||
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
||||
VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle)
|
||||
|
||||
@ -619,7 +623,12 @@ func (c *ChainConfig) String() string {
|
||||
HaberTime = big.NewInt(0).SetUint64(*c.HaberTime)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, CancunTime: %v, HaberTime: %v, Engine: %v}",
|
||||
var BohrTime *big.Int
|
||||
if c.BohrTime != nil {
|
||||
BohrTime = big.NewInt(0).SetUint64(*c.BohrTime)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, CancunTime: %v, HaberTime: %v, BohrTime: %v, Engine: %v}",
|
||||
c.ChainID,
|
||||
c.HomesteadBlock,
|
||||
c.DAOForkBlock,
|
||||
@ -657,6 +666,7 @@ func (c *ChainConfig) String() string {
|
||||
FeynmanFixTime,
|
||||
CancunTime,
|
||||
HaberTime,
|
||||
BohrTime,
|
||||
engine,
|
||||
)
|
||||
}
|
||||
@ -929,6 +939,20 @@ func (c *ChainConfig) IsHaber(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.HaberTime, time)
|
||||
}
|
||||
|
||||
// IsBohr returns whether time is either equal to the Bohr fork time or greater.
|
||||
func (c *ChainConfig) IsBohr(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.BohrTime, time)
|
||||
}
|
||||
|
||||
// IsOnBohr returns whether currentBlockTime is either equal to the Bohr fork time or greater firstly.
|
||||
func (c *ChainConfig) IsOnBohr(currentBlockNumber *big.Int, lastBlockTime uint64, currentBlockTime uint64) bool {
|
||||
lastBlockNumber := new(big.Int)
|
||||
if currentBlockNumber.Cmp(big.NewInt(1)) >= 0 {
|
||||
lastBlockNumber.Sub(currentBlockNumber, big.NewInt(1))
|
||||
}
|
||||
return !c.IsBohr(lastBlockNumber, lastBlockTime) && c.IsBohr(currentBlockNumber, currentBlockTime)
|
||||
}
|
||||
|
||||
// IsPrague returns whether num is either equal to the Prague fork time or greater.
|
||||
func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool {
|
||||
return c.IsLondon(num) && isTimestampForked(c.PragueTime, time)
|
||||
@ -993,6 +1017,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
|
||||
{name: "feynmanFixTime", timestamp: c.FeynmanFixTime},
|
||||
{name: "cancunTime", timestamp: c.CancunTime},
|
||||
{name: "haberTime", timestamp: c.HaberTime},
|
||||
{name: "bohrTime", timestamp: c.BohrTime},
|
||||
{name: "pragueTime", timestamp: c.PragueTime, optional: true},
|
||||
{name: "verkleTime", timestamp: c.VerkleTime, optional: true},
|
||||
} {
|
||||
@ -1323,7 +1348,7 @@ type Rules struct {
|
||||
IsHertz bool
|
||||
IsHertzfix bool
|
||||
IsShanghai, IsKepler, IsFeynman, IsCancun, IsHaber bool
|
||||
IsPrague, IsVerkle bool
|
||||
IsBohr, IsPrague, IsVerkle bool
|
||||
}
|
||||
|
||||
// Rules ensures c's ChainID is not nil.
|
||||
@ -1359,6 +1384,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules
|
||||
IsFeynman: c.IsFeynman(num, timestamp),
|
||||
IsCancun: c.IsCancun(num, timestamp),
|
||||
IsHaber: c.IsHaber(num, timestamp),
|
||||
IsBohr: c.IsBohr(num, timestamp),
|
||||
IsPrague: c.IsPrague(num, timestamp),
|
||||
IsVerkle: c.IsVerkle(num, timestamp),
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 4 // Minor version component of the current release
|
||||
VersionPatch = 8 // Patch version component of the current release
|
||||
VersionPatch = 9 // Patch version component of the current release
|
||||
VersionMeta = "" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
@ -4,17 +4,15 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -26,63 +24,113 @@ import (
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
type Account struct {
|
||||
Nonce uint64
|
||||
Balance *big.Int
|
||||
Root common.Hash // merkle root of the storage trie
|
||||
CodeHash []byte
|
||||
}
|
||||
|
||||
type Database interface {
|
||||
database.Database
|
||||
Scheme() string
|
||||
Cap(limit common.StorageSize) error
|
||||
DiskDB() ethdb.Database
|
||||
}
|
||||
|
||||
const TopN = 3
|
||||
|
||||
type Inspector struct {
|
||||
trie *Trie // traverse trie
|
||||
db Database
|
||||
stateRootHash common.Hash
|
||||
blocknum uint64
|
||||
blockNum uint64
|
||||
root node // root of triedb
|
||||
totalNum uint64
|
||||
wg sync.WaitGroup
|
||||
statLock sync.RWMutex
|
||||
result map[string]*TrieTreeStat
|
||||
sem *semaphore.Weighted
|
||||
eoaAccountNums uint64
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
results stat
|
||||
topN int
|
||||
|
||||
totalAccountNum atomic.Uint64
|
||||
totalStorageNum atomic.Uint64
|
||||
lastTime mclock.AbsTime
|
||||
}
|
||||
|
||||
type TrieTreeStat struct {
|
||||
is_account_trie bool
|
||||
theNodeStatByLevel [15]NodeStat
|
||||
totalNodeStat NodeStat
|
||||
type stat struct {
|
||||
lock sync.RWMutex
|
||||
account *trieStat
|
||||
storageTopN []*trieStat
|
||||
storageTopNTotal []uint64
|
||||
storageTotal nodeStat
|
||||
storageTrieNum uint64
|
||||
}
|
||||
|
||||
type NodeStat struct {
|
||||
ShortNodeCnt uint64
|
||||
FullNodeCnt uint64
|
||||
ValueNodeCnt uint64
|
||||
type trieStat struct {
|
||||
owner common.Hash
|
||||
totalNodeStat nodeStat
|
||||
nodeStatByLevel [16]nodeStat
|
||||
}
|
||||
|
||||
func (trieStat *TrieTreeStat) AtomicAdd(theNode node, height uint32) {
|
||||
type nodeStat struct {
|
||||
ShortNodeCnt atomic.Uint64
|
||||
FullNodeCnt atomic.Uint64
|
||||
ValueNodeCnt atomic.Uint64
|
||||
}
|
||||
|
||||
func (ns *nodeStat) IsEmpty() bool {
|
||||
if ns.FullNodeCnt.Load() == 0 && ns.ShortNodeCnt.Load() == 0 && ns.ValueNodeCnt.Load() == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *stat) add(ts *trieStat, topN int) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if ts.owner == (common.Hash{}) {
|
||||
s.account = ts
|
||||
return
|
||||
}
|
||||
|
||||
total := ts.totalNodeStat.ValueNodeCnt.Load() + ts.totalNodeStat.FullNodeCnt.Load() + ts.totalNodeStat.ShortNodeCnt.Load()
|
||||
if len(s.storageTopNTotal) == 0 || total > s.storageTopNTotal[len(s.storageTopNTotal)-1] {
|
||||
var (
|
||||
i int
|
||||
t uint64
|
||||
)
|
||||
for i, t = range s.storageTopNTotal {
|
||||
if total < t {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
s.storageTopNTotal = append(s.storageTopNTotal[:i], append([]uint64{total}, s.storageTopNTotal[i:]...)...)
|
||||
s.storageTopN = append(s.storageTopN[:i], append([]*trieStat{ts}, s.storageTopN[i:]...)...)
|
||||
if len(s.storageTopN) > topN {
|
||||
s.storageTopNTotal = s.storageTopNTotal[:topN]
|
||||
s.storageTopN = s.storageTopN[:topN]
|
||||
}
|
||||
}
|
||||
|
||||
s.storageTotal.ShortNodeCnt.Add(ts.totalNodeStat.ShortNodeCnt.Load())
|
||||
s.storageTotal.ValueNodeCnt.Add(ts.totalNodeStat.ValueNodeCnt.Load())
|
||||
s.storageTotal.FullNodeCnt.Add(ts.totalNodeStat.FullNodeCnt.Load())
|
||||
s.storageTrieNum++
|
||||
}
|
||||
|
||||
func (trieStat *trieStat) add(theNode node, height int) {
|
||||
switch (theNode).(type) {
|
||||
case *shortNode:
|
||||
atomic.AddUint64(&trieStat.totalNodeStat.ShortNodeCnt, 1)
|
||||
atomic.AddUint64(&(trieStat.theNodeStatByLevel[height].ShortNodeCnt), 1)
|
||||
trieStat.totalNodeStat.ShortNodeCnt.Add(1)
|
||||
trieStat.nodeStatByLevel[height].ShortNodeCnt.Add(1)
|
||||
case *fullNode:
|
||||
atomic.AddUint64(&trieStat.totalNodeStat.FullNodeCnt, 1)
|
||||
atomic.AddUint64(&trieStat.theNodeStatByLevel[height].FullNodeCnt, 1)
|
||||
trieStat.totalNodeStat.FullNodeCnt.Add(1)
|
||||
trieStat.nodeStatByLevel[height].FullNodeCnt.Add(1)
|
||||
case valueNode:
|
||||
atomic.AddUint64(&trieStat.totalNodeStat.ValueNodeCnt, 1)
|
||||
atomic.AddUint64(&((trieStat.theNodeStatByLevel[height]).ValueNodeCnt), 1)
|
||||
default:
|
||||
panic(errors.New("Invalid node type to statistics"))
|
||||
trieStat.totalNodeStat.ValueNodeCnt.Add(1)
|
||||
trieStat.nodeStatByLevel[height].ValueNodeCnt.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) {
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
func (trieStat *trieStat) Display(ownerAddress string, treeType string) string {
|
||||
sw := new(strings.Builder)
|
||||
table := tablewriter.NewWriter(sw)
|
||||
table.SetHeader([]string{"-", "Level", "ShortNodeCnt", "FullNodeCnt", "ValueNodeCnt"})
|
||||
if ownerAddress == "" {
|
||||
table.SetCaption(true, fmt.Sprintf("%v", treeType))
|
||||
@ -90,38 +138,27 @@ func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) {
|
||||
table.SetCaption(true, fmt.Sprintf("%v-%v", treeType, ownerAddress))
|
||||
}
|
||||
table.SetAlignment(1)
|
||||
for i := 0; i < len(trieStat.theNodeStatByLevel); i++ {
|
||||
nodeStat := trieStat.theNodeStatByLevel[i]
|
||||
if nodeStat.FullNodeCnt == 0 && nodeStat.ShortNodeCnt == 0 && nodeStat.ValueNodeCnt == 0 {
|
||||
break
|
||||
|
||||
for i := range trieStat.nodeStatByLevel {
|
||||
if trieStat.nodeStatByLevel[i].IsEmpty() {
|
||||
continue
|
||||
}
|
||||
table.AppendBulk([][]string{
|
||||
{"-", strconv.Itoa(i), nodeStat.ShortNodeCount(), nodeStat.FullNodeCount(), nodeStat.ValueNodeCount()},
|
||||
{"-", fmt.Sprintf("%d", i),
|
||||
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ShortNodeCnt.Load()),
|
||||
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].FullNodeCnt.Load()),
|
||||
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ValueNodeCnt.Load())},
|
||||
})
|
||||
}
|
||||
table.AppendBulk([][]string{
|
||||
{"Total", "-", trieStat.totalNodeStat.ShortNodeCount(), trieStat.totalNodeStat.FullNodeCount(), trieStat.totalNodeStat.ValueNodeCount()},
|
||||
{"Total", "-", fmt.Sprintf("%d", trieStat.totalNodeStat.ShortNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.FullNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.ValueNodeCnt.Load())},
|
||||
})
|
||||
table.Render()
|
||||
}
|
||||
|
||||
func Uint64ToString(cnt uint64) string {
|
||||
return fmt.Sprintf("%v", cnt)
|
||||
}
|
||||
|
||||
func (nodeStat *NodeStat) ShortNodeCount() string {
|
||||
return Uint64ToString(nodeStat.ShortNodeCnt)
|
||||
}
|
||||
|
||||
func (nodeStat *NodeStat) FullNodeCount() string {
|
||||
return Uint64ToString(nodeStat.FullNodeCnt)
|
||||
}
|
||||
func (nodeStat *NodeStat) ValueNodeCount() string {
|
||||
return Uint64ToString(nodeStat.ValueNodeCnt)
|
||||
return sw.String()
|
||||
}
|
||||
|
||||
// NewInspector return a inspector obj
|
||||
func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Inspector, error) {
|
||||
func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blockNum uint64, jobNum uint64, topN int) (*Inspector, error) {
|
||||
if tr == nil {
|
||||
return nil, errors.New("trie is nil")
|
||||
}
|
||||
@ -131,15 +168,20 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin
|
||||
}
|
||||
|
||||
ins := &Inspector{
|
||||
trie: tr,
|
||||
db: db,
|
||||
stateRootHash: stateRootHash,
|
||||
blocknum: blocknum,
|
||||
root: tr.root,
|
||||
result: make(map[string]*TrieTreeStat),
|
||||
totalNum: (uint64)(0),
|
||||
wg: sync.WaitGroup{},
|
||||
sem: semaphore.NewWeighted(int64(jobnum)),
|
||||
trie: tr,
|
||||
db: db,
|
||||
stateRootHash: stateRootHash,
|
||||
blockNum: blockNum,
|
||||
root: tr.root,
|
||||
results: stat{},
|
||||
topN: topN,
|
||||
totalAccountNum: atomic.Uint64{},
|
||||
totalStorageNum: atomic.Uint64{},
|
||||
lastTime: mclock.Now(),
|
||||
sem: semaphore.NewWeighted(int64(jobNum)),
|
||||
|
||||
wg: sync.WaitGroup{},
|
||||
|
||||
eoaAccountNums: 0,
|
||||
}
|
||||
|
||||
@ -147,155 +189,123 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin
|
||||
}
|
||||
|
||||
// Run statistics, external call
|
||||
func (inspect *Inspector) Run() {
|
||||
accountTrieStat := &TrieTreeStat{
|
||||
is_account_trie: true,
|
||||
}
|
||||
if inspect.db.Scheme() == rawdb.HashScheme {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
go func() {
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
inspect.db.Cap(DEFAULT_TRIEDBCACHE_SIZE)
|
||||
func (s *Inspector) Run() {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
go func() {
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if s.db.Scheme() == rawdb.HashScheme {
|
||||
s.db.Cap(DEFAULT_TRIEDBCACHE_SIZE)
|
||||
}
|
||||
}()
|
||||
}
|
||||
runtime.GC()
|
||||
}
|
||||
}()
|
||||
|
||||
if _, ok := inspect.result[""]; !ok {
|
||||
inspect.result[""] = accountTrieStat
|
||||
}
|
||||
log.Info("Find Account Trie Tree", "rootHash: ", inspect.trie.Hash().String(), "BlockNum: ", inspect.blocknum)
|
||||
log.Info("Find Account Trie Tree", "rootHash: ", s.trie.Hash().String(), "BlockNum: ", s.blockNum)
|
||||
|
||||
inspect.ConcurrentTraversal(inspect.trie, accountTrieStat, inspect.root, 0, []byte{})
|
||||
inspect.wg.Wait()
|
||||
ts := &trieStat{
|
||||
owner: common.Hash{},
|
||||
}
|
||||
s.traversal(s.trie, ts, s.root, 0, []byte{})
|
||||
s.results.add(ts, s.topN)
|
||||
s.wg.Wait()
|
||||
}
|
||||
|
||||
func (inspect *Inspector) SubConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) {
|
||||
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, theNode, height, path)
|
||||
inspect.wg.Done()
|
||||
}
|
||||
|
||||
func (inspect *Inspector) ConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) {
|
||||
// print process progress
|
||||
total_num := atomic.AddUint64(&inspect.totalNum, 1)
|
||||
if total_num%100000 == 0 {
|
||||
fmt.Printf("Complete progress: %v, go routines Num: %v\n", total_num, runtime.NumGoroutine())
|
||||
}
|
||||
|
||||
func (s *Inspector) traversal(trie *Trie, ts *trieStat, n node, height int, path []byte) {
|
||||
// nil node
|
||||
if theNode == nil {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch current := (theNode).(type) {
|
||||
ts.add(n, height)
|
||||
|
||||
switch current := (n).(type) {
|
||||
case *shortNode:
|
||||
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, current.Val, height, append(path, current.Key...))
|
||||
s.traversal(trie, ts, current.Val, height, append(path, current.Key...))
|
||||
case *fullNode:
|
||||
for idx, child := range current.Children {
|
||||
if child == nil {
|
||||
continue
|
||||
}
|
||||
childPath := append(path, byte(idx))
|
||||
if inspect.sem.TryAcquire(1) {
|
||||
inspect.wg.Add(1)
|
||||
dst := make([]byte, len(childPath))
|
||||
copy(dst, childPath)
|
||||
go inspect.SubConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, dst)
|
||||
} else {
|
||||
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, childPath)
|
||||
}
|
||||
p := common.CopyBytes(append(path, byte(idx)))
|
||||
s.traversal(trie, ts, child, height+1, p)
|
||||
}
|
||||
case hashNode:
|
||||
n, err := theTrie.resloveWithoutTrack(current, path)
|
||||
tn, err := trie.resloveWithoutTrack(current, path)
|
||||
if err != nil {
|
||||
fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, theTrie.Hash().String(), height+1, path)
|
||||
fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, trie.Hash().String(), height+1, path)
|
||||
return
|
||||
}
|
||||
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, n, height, path)
|
||||
return
|
||||
s.PrintProgress(trie)
|
||||
s.traversal(trie, ts, tn, height, path)
|
||||
case valueNode:
|
||||
if !hasTerm(path) {
|
||||
break
|
||||
}
|
||||
var account Account
|
||||
var account types.StateAccount
|
||||
if err := rlp.Decode(bytes.NewReader(current), &account); err != nil {
|
||||
break
|
||||
}
|
||||
if common.BytesToHash(account.CodeHash) == types.EmptyCodeHash {
|
||||
inspect.eoaAccountNums++
|
||||
s.eoaAccountNums++
|
||||
}
|
||||
if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash {
|
||||
break
|
||||
}
|
||||
ownerAddress := common.BytesToHash(hexToCompact(path))
|
||||
contractTrie, err := New(StorageTrieID(inspect.stateRootHash, ownerAddress, account.Root), inspect.db)
|
||||
contractTrie, err := New(StorageTrieID(s.stateRootHash, ownerAddress, account.Root), s.db)
|
||||
if err != nil {
|
||||
fmt.Printf("New contract trie node: %v, error: %v, Height: %v, Path: %v\n", theNode, err, height, path)
|
||||
break
|
||||
panic(err)
|
||||
}
|
||||
contractTrie.tracer.reset()
|
||||
trieStat := &TrieTreeStat{
|
||||
is_account_trie: false,
|
||||
}
|
||||
|
||||
inspect.statLock.Lock()
|
||||
if _, ok := inspect.result[ownerAddress.String()]; !ok {
|
||||
inspect.result[ownerAddress.String()] = trieStat
|
||||
if s.sem.TryAcquire(1) {
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
t := &trieStat{
|
||||
owner: ownerAddress,
|
||||
}
|
||||
s.traversal(contractTrie, t, contractTrie.root, 0, []byte{})
|
||||
s.results.add(t, s.topN)
|
||||
s.sem.Release(1)
|
||||
s.wg.Done()
|
||||
}()
|
||||
} else {
|
||||
t := &trieStat{
|
||||
owner: ownerAddress,
|
||||
}
|
||||
s.traversal(contractTrie, t, contractTrie.root, 0, []byte{})
|
||||
s.results.add(t, s.topN)
|
||||
}
|
||||
inspect.statLock.Unlock()
|
||||
|
||||
// log.Info("Find Contract Trie Tree, rootHash: ", contractTrie.Hash().String(), "")
|
||||
inspect.wg.Add(1)
|
||||
go inspect.SubConcurrentTraversal(contractTrie, trieStat, contractTrie.root, 0, []byte{})
|
||||
default:
|
||||
panic(errors.New("Invalid node type to traverse."))
|
||||
panic(errors.New("invalid node type to traverse"))
|
||||
}
|
||||
theTrieTreeStat.AtomicAdd(theNode, height)
|
||||
}
|
||||
|
||||
func (inspect *Inspector) DisplayResult() {
|
||||
func (s *Inspector) PrintProgress(t *Trie) {
|
||||
var (
|
||||
elapsed = mclock.Now().Sub(s.lastTime)
|
||||
)
|
||||
if t.owner == (common.Hash{}) {
|
||||
s.totalAccountNum.Add(1)
|
||||
} else {
|
||||
s.totalStorageNum.Add(1)
|
||||
}
|
||||
if elapsed > 4*time.Second {
|
||||
log.Info("traversal progress", "TotalAccountNum", s.totalAccountNum.Load(), "TotalStorageNum", s.totalStorageNum.Load(), "Goroutine", runtime.NumGoroutine())
|
||||
s.lastTime = mclock.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Inspector) DisplayResult() {
|
||||
// display root hash
|
||||
if _, ok := inspect.result[""]; !ok {
|
||||
log.Info("Display result error", "missing account trie")
|
||||
return
|
||||
}
|
||||
inspect.result[""].Display("", "AccountTrie")
|
||||
fmt.Println(s.results.account.Display("", "AccountTrie"))
|
||||
fmt.Println("EOA accounts num: ", s.eoaAccountNums)
|
||||
|
||||
type SortedTrie struct {
|
||||
totalNum uint64
|
||||
ownerAddress string
|
||||
}
|
||||
// display contract trie
|
||||
var sortedTriesByNums []SortedTrie
|
||||
var totalContactsNodeStat NodeStat
|
||||
var contractTrieCnt uint64 = 0
|
||||
|
||||
for ownerAddress, stat := range inspect.result {
|
||||
if ownerAddress == "" {
|
||||
continue
|
||||
}
|
||||
contractTrieCnt++
|
||||
totalContactsNodeStat.ShortNodeCnt += stat.totalNodeStat.ShortNodeCnt
|
||||
totalContactsNodeStat.FullNodeCnt += stat.totalNodeStat.FullNodeCnt
|
||||
totalContactsNodeStat.ValueNodeCnt += stat.totalNodeStat.ValueNodeCnt
|
||||
totalNodeCnt := stat.totalNodeStat.ShortNodeCnt + stat.totalNodeStat.ValueNodeCnt + stat.totalNodeStat.FullNodeCnt
|
||||
sortedTriesByNums = append(sortedTriesByNums, SortedTrie{totalNum: totalNodeCnt, ownerAddress: ownerAddress})
|
||||
}
|
||||
sort.Slice(sortedTriesByNums, func(i, j int) bool {
|
||||
return sortedTriesByNums[i].totalNum > sortedTriesByNums[j].totalNum
|
||||
})
|
||||
fmt.Println("EOA accounts num: ", inspect.eoaAccountNums)
|
||||
// only display top 5
|
||||
for i, t := range sortedTriesByNums {
|
||||
if i > 5 {
|
||||
break
|
||||
}
|
||||
if stat, ok := inspect.result[t.ownerAddress]; !ok {
|
||||
log.Error("Storage trie stat not found", "ownerAddress", t.ownerAddress)
|
||||
} else {
|
||||
stat.Display(t.ownerAddress, "ContractTrie")
|
||||
}
|
||||
for _, st := range s.results.storageTopN {
|
||||
fmt.Println(st.Display(st.owner.String(), "StorageTrie"))
|
||||
}
|
||||
fmt.Printf("Contract Trie, total trie num: %v, ShortNodeCnt: %v, FullNodeCnt: %v, ValueNodeCnt: %v\n",
|
||||
contractTrieCnt, totalContactsNodeStat.ShortNodeCnt, totalContactsNodeStat.FullNodeCnt, totalContactsNodeStat.ValueNodeCnt)
|
||||
s.results.storageTrieNum, s.results.storageTotal.ShortNodeCnt.Load(), s.results.storageTotal.FullNodeCnt.Load(), s.results.storageTotal.ValueNodeCnt.Load())
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ type tester struct {
|
||||
|
||||
func newTester(t *testing.T, historyLimit uint64) *tester {
|
||||
var (
|
||||
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false)
|
||||
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false)
|
||||
db = New(disk, &Config{
|
||||
StateHistory: historyLimit,
|
||||
CleanCacheSize: 256 * 1024,
|
||||
|
@ -152,12 +152,13 @@ func (kr *JournalKVReader) Close() {
|
||||
}
|
||||
|
||||
func newJournalWriter(file string, db ethdb.Database, journalType JournalType) JournalWriter {
|
||||
log.Info("New journal writer", "path", file, "journalType", journalType)
|
||||
if journalType == JournalKVType {
|
||||
log.Info("New journal writer for journal kv")
|
||||
return &JournalKVWriter{
|
||||
diskdb: db,
|
||||
}
|
||||
} else {
|
||||
log.Info("New journal writer for journal file", "path", file)
|
||||
fd, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return nil
|
||||
@ -169,8 +170,8 @@ func newJournalWriter(file string, db ethdb.Database, journalType JournalType) J
|
||||
}
|
||||
|
||||
func newJournalReader(file string, db ethdb.Database, journalType JournalType) (JournalReader, error) {
|
||||
log.Info("New journal reader", "path", file, "journalType", journalType)
|
||||
if journalType == JournalKVType {
|
||||
log.Info("New journal reader for journal kv")
|
||||
journal := rawdb.ReadTrieJournal(db)
|
||||
if len(journal) == 0 {
|
||||
return nil, errMissJournal
|
||||
@ -179,6 +180,7 @@ func newJournalReader(file string, db ethdb.Database, journalType JournalType) (
|
||||
journalBuf: bytes.NewBuffer(journal),
|
||||
}, nil
|
||||
} else {
|
||||
log.Info("New journal reader for journal file", "path", file)
|
||||
fd, err := os.Open(file)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, errMissJournal
|
||||
|
Loading…
Reference in New Issue
Block a user