Compare commits

..

26 Commits

Author SHA1 Message Date
emailtovamos
be71d41aa5 eth: formatting 2024-05-23 14:04:22 +01:00
emailtovamos
5b46fe13e7 eth: make transaction propagation paths in the network deterministic (#29034)
* eth: make transaction propagation paths in the network deterministic

* eth: avoid potential division by 0

* eth: make tx propagation dependent on local node id too

* eth: fix review comments
2024-05-23 14:00:29 +01:00
Satyajit Das
b0146261c7 jsutils: faucet successful requests within blocks (#2470) 2024-05-23 15:08:36 +08:00
lx
d7b9866d3b Merge pull request #2487 from bnb-chain/master
merge PRs from master to develop branch
2024-05-22 13:56:19 +08:00
Mars
08769ead2b dev: ensure consistency in BPS bundle result (#2479)
* dev: ensure consistency in BPS bundle result

* fix: remove env operation once the sim is discarded & rename
2024-05-21 12:24:41 +08:00
irrun
c77bb1110d fix: limit the gas price of the mev bid (#2473) 2024-05-20 14:33:47 +08:00
Mars
c856d21719 fix: move mev op to MinerAPI & add command to console (#2475) 2024-05-20 14:00:28 +08:00
Eric
5edd032cdb internal/ethapi: add optional parameter for blobSidecars (#2467) 2024-05-16 19:06:49 +08:00
galaio
6b8cbbe172 sync: fix some sync issues caused by prune-block. (#2466) 2024-05-16 12:07:13 +08:00
setunapo
5ea2ada0ee utils: add check_blobtx.js (#2463) 2024-05-15 18:17:57 +08:00
Fynn
b230a02006 cmd: fix memory leak when big dataset (#2455) 2024-05-15 15:28:57 +08:00
Nathan
86e3a02490 cmd/utils: add a flag to change breathe block interval for testing (#2462) 2024-05-15 15:27:05 +08:00
Nathan
0c0958ff87 eth/handler: check lists in body before broadcast blocks (#2461) 2024-05-15 14:54:25 +08:00
Péter Szilágyi
0b1438c3df eth: make transaction propagation paths in the network deterministic (#29034)
* eth: make transaction propagation paths in the network deterministic

* eth: avoid potential division by 0

* eth: make tx propagation dependent on local node id too

* eth: fix review comments
2024-03-02 22:39:22 +02:00
Sina Mahmoodi
0a2f33946b eth/catalyst: update simulated beacon for cancun (#28829)
* eth/catalyst: update simulated beacon for cancun

* validate blob hashes

* compute hashes from commitment

* fix beacon root and payload version

* check commitment conversion

* fix random attr

* flip dev to cancun
2024-02-29 14:17:32 +02:00
Péter Szilágyi
865e1e9f57 cmd/utils, core/rawdb, triedb/pathdb: flip hash to path scheme (#29108)
* cmd/utils, core/rawdb, triedb/pathdb: flip hash to path scheme

* graphql: run tests in hash mode as the chain maker needs it
2024-02-29 12:40:59 +02:00
yzb
db4cf69166 all: replace fmt.Errorf() with errors.New() if no param required (#29126)
replace-fmt-errorf

Co-authored-by: yzb@example.cn <yzb@example.cn>
2024-02-29 11:56:46 +02:00
Ng Wei Han
28d55218f7 cmd/geth: parseDumpConfig should not return closed db (#29100)
* cmd: parseDumpConfig should not return closed db

* fix lint
2024-02-29 11:56:17 +02:00
cui fliter
dbc27a199f all: fix function names in docs (#29128)
Signed-off-by: cui fliter <imcusg@gmail.com>
2024-02-29 11:29:06 +02:00
lightclient
1883438964 eth/catalyst: return invalid payload attributes instead of invalid parms for bad fcu payload (#29115) 2024-02-28 19:59:16 +01:00
buddho
9986a69c25 internal/ethapi: pass in accesslist in test (#29089)
Co-authored-by: Sina Mahmoodi <itz.s1na@gmail.com>
2024-02-28 18:38:21 +01:00
rjl493456442
5bae14f9df triedb/pathdb: fix panic in recoverable (#29107)
* triedb/pathdb: fix panic in recoverable

* triedb/pathdb: add todo

* triedb/pathdb: rename

* triedb/pathdb: rename
2024-02-28 14:40:28 +02:00
rjl493456442
49623bd469 core, triedb/pathdb: calculate the size for batch pre-allocation (#29106)
* core, triedb/pathdb: calculate the size for batch pre-allocation

* triedb/pathdb: address comment
2024-02-28 14:23:52 +02:00
Péter Szilágyi
170fcd80c6 params: being major version bump cycle 2024-02-28 10:01:52 +02:00
cui
02d77c98f9 core: using math.MaxUint64 instead of 0xffffffffffffffff (#29094) 2024-02-28 15:25:12 +08:00
Péter Szilágyi
57d2b552c7 params: begin v1.13.15 cycle 2024-02-27 13:53:30 +02:00
16 changed files with 526 additions and 292 deletions

View File

@@ -106,12 +106,12 @@ Remove blockchain and state databases`,
dbInspectTrieCmd = &cli.Command{ dbInspectTrieCmd = &cli.Command{
Action: inspectTrie, Action: inspectTrie,
Name: "inspect-trie", Name: "inspect-trie",
ArgsUsage: "<blocknum> <jobnum>", ArgsUsage: "<blocknum> <jobnum> <topn>",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
}, },
Usage: "Inspect the MPT tree of the account and contract.", Usage: "Inspect the MPT tree of the account and contract. 'blocknum' can be latest/snapshot/number. 'topn' means output the top N storage tries info ranked by the total number of TrieNodes",
Description: `This commands iterates the entrie WorldState.`, Description: `This commands iterates the entrie WorldState.`,
} }
dbCheckStateContentCmd = &cli.Command{ dbCheckStateContentCmd = &cli.Command{
@@ -386,6 +386,7 @@ func inspectTrie(ctx *cli.Context) error {
blockNumber uint64 blockNumber uint64
trieRootHash common.Hash trieRootHash common.Hash
jobnum uint64 jobnum uint64
topN uint64
) )
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
@@ -411,12 +412,25 @@ func inspectTrie(ctx *cli.Context) error {
if ctx.NArg() == 1 { if ctx.NArg() == 1 {
jobnum = 1000 jobnum = 1000
topN = 10
} else if ctx.NArg() == 2 {
var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
topN = 10
} else { } else {
var err error var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64) jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil { if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err) return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
} }
topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
} }
if blockNumber != math.MaxUint64 { if blockNumber != math.MaxUint64 {
@@ -437,6 +451,7 @@ func inspectTrie(ctx *cli.Context) error {
if dbScheme == rawdb.PathScheme { if dbScheme == rawdb.PathScheme {
config = &triedb.Config{ config = &triedb.Config{
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly), PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
Cache: 0,
} }
} else if dbScheme == rawdb.HashScheme { } else if dbScheme == rawdb.HashScheme {
config = triedb.HashDefaults config = triedb.HashDefaults
@@ -448,7 +463,7 @@ func inspectTrie(ctx *cli.Context) error {
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String()) fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
return err return err
} }
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum) theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum, int(topN))
if err != nil { if err != nil {
return err return err
} }

View File

@@ -0,0 +1,51 @@
import { ethers } from "ethers";
import program from "commander";
// depends on ethjs v6.11.0+ for 4844, https://github.com/ethers-io/ethers.js/releases/tag/v6.11.0
// BSC testnet enabled 4844 on block: 39539137
// Usage:
// nvm use 20
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}
for (let i = startBlock; i <= endBlock; i++) {
let blockData = await provider.getBlock(i);
console.log("startBlock:",startBlock, "endBlock:", endBlock, "curBlock", i, "blobGasUsed", blockData.blobGasUsed);
if (blockData.blobGasUsed == 0) {
continue
}
for (let txIndex = 0; txIndex<= blockData.transactions.length - 1; txIndex++) {
let txHash = blockData.transactions[txIndex]
let txData = await provider.getTransaction(txHash);
if (txData.type == 3) {
console.log("BlobTx in block:",i, " txIndex:", txIndex, " txHash:", txHash);
}
}
}
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@@ -0,0 +1,49 @@
import { ethers } from "ethers";
import program from "commander";
// Usage:
// node faucet_request.js --rpc localhost:8545 --startNum 39539137
// node faucet_request.js --rpc localhost:8545 --startNum 39539137 --endNum 40345994
// node faucet_request.js --rpc https://data-seed-prebsc-1-s1.bnbchain.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}
let startBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", startBlock)
let endBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", endBlock)
const faucetAmount = BigInt(0.3 * 10**18); // Convert 0.3 ether to wei as a BigInt
const numFaucetRequest = (startBalance - endBalance) / faucetAmount;
// Convert BigInt to ether
const startBalanceEth = Number(startBalance) / 10**18;
const endBalanceEth = Number(endBalance) / 10**18;
console.log(`Start Balance: ${startBalanceEth} ETH`);
console.log(`End Balance: ${endBalanceEth} ETH`);
console.log("successful faucet request: ",numFaucetRequest);
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@@ -66,6 +66,31 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
return validator return validator
} }
// ValidateListsInBody validates that UncleHash, WithdrawalsHash, and WithdrawalsHash correspond to the lists in the block body, respectively.
func ValidateListsInBody(block *types.Block) error {
header := block.Header()
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
return nil
}
// ValidateBody validates the given block's uncles and verifies the block // ValidateBody validates the given block's uncles and verifies the block
// header's transaction and uncle roots. The headers are assumed to be already // header's transaction and uncle roots. The headers are assumed to be already
// validated at this point. // validated at this point.
@@ -83,31 +108,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if err := v.engine.VerifyUncles(v.bc, block); err != nil { if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err return err
} }
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
validateFuns := []func() error{ validateFuns := []func() error{
func() error { func() error {
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { return ValidateListsInBody(block)
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
return nil
}, },
func() error { func() error {
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
// Blob transactions may be present after the Cancun fork. // Blob transactions may be present after the Cancun fork.
var blobs int var blobs int
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {

View File

@@ -511,3 +511,12 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr
func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription { func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription {
return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch)) return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch))
} }
// AncientTail retrieves the tail the ancients blocks
func (bc *BlockChain) AncientTail() (uint64, error) {
tail, err := bc.db.Tail()
if err != nil {
return 0, err
}
return tail, nil
}

View File

@@ -239,7 +239,7 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
// - if maxBytes is not specified, 'count' items will be returned if they are present. // - if maxBytes is not specified, 'count' items will be returned if they are present.
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes) return table.RetrieveItems(start-f.offset, count, maxBytes)
} }
return nil, errUnknownTable return nil, errUnknownTable
} }
@@ -252,7 +252,7 @@ func (f *Freezer) Ancients() (uint64, error) {
func (f *Freezer) TableAncients(kind string) (uint64, error) { func (f *Freezer) TableAncients(kind string) (uint64, error) {
f.writeLock.RLock() f.writeLock.RLock()
defer f.writeLock.RUnlock() defer f.writeLock.RUnlock()
return f.tables[kind].items.Load(), nil return f.tables[kind].items.Load() + f.offset, nil
} }
// ItemAmountInAncient returns the actual length of current ancientDB. // ItemAmountInAncient returns the actual length of current ancientDB.

View File

@@ -6,6 +6,8 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
mapset "github.com/deckarep/golang-set/v2"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
@@ -40,6 +42,12 @@ func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
return nil, err return nil, err
} }
if len(b.RawBid.UnRevertible) > len(txs) {
return nil, fmt.Errorf("expect NonRevertible no more than %d", len(txs))
}
unRevertibleHashes := mapset.NewThreadUnsafeSetWithSize[common.Hash](len(b.RawBid.UnRevertible))
unRevertibleHashes.Append(b.RawBid.UnRevertible...)
if len(b.PayBidTx) != 0 { if len(b.PayBidTx) != 0 {
var payBidTx = new(Transaction) var payBidTx = new(Transaction)
err = payBidTx.UnmarshalBinary(b.PayBidTx) err = payBidTx.UnmarshalBinary(b.PayBidTx)
@@ -51,14 +59,15 @@ func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
} }
bid := &Bid{ bid := &Bid{
Builder: builder, Builder: builder,
BlockNumber: b.RawBid.BlockNumber, BlockNumber: b.RawBid.BlockNumber,
ParentHash: b.RawBid.ParentHash, ParentHash: b.RawBid.ParentHash,
Txs: txs, Txs: txs,
GasUsed: b.RawBid.GasUsed + b.PayBidTxGasUsed, UnRevertible: unRevertibleHashes,
GasFee: b.RawBid.GasFee, GasUsed: b.RawBid.GasUsed + b.PayBidTxGasUsed,
BuilderFee: b.RawBid.BuilderFee, GasFee: b.RawBid.GasFee,
rawBid: *b.RawBid, BuilderFee: b.RawBid.BuilderFee,
rawBid: *b.RawBid,
} }
if bid.BuilderFee == nil { if bid.BuilderFee == nil {
@@ -70,12 +79,13 @@ func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
// RawBid represents a raw bid from builder directly. // RawBid represents a raw bid from builder directly.
type RawBid struct { type RawBid struct {
BlockNumber uint64 `json:"blockNumber"` BlockNumber uint64 `json:"blockNumber"`
ParentHash common.Hash `json:"parentHash"` ParentHash common.Hash `json:"parentHash"`
Txs []hexutil.Bytes `json:"txs"` Txs []hexutil.Bytes `json:"txs"`
GasUsed uint64 `json:"gasUsed"` UnRevertible []common.Hash `json:"unRevertible"`
GasFee *big.Int `json:"gasFee"` GasUsed uint64 `json:"gasUsed"`
BuilderFee *big.Int `json:"builderFee"` GasFee *big.Int `json:"gasFee"`
BuilderFee *big.Int `json:"builderFee"`
hash atomic.Value hash atomic.Value
} }
@@ -154,13 +164,14 @@ func (b *RawBid) Hash() common.Hash {
// Bid represents a bid. // Bid represents a bid.
type Bid struct { type Bid struct {
Builder common.Address Builder common.Address
BlockNumber uint64 BlockNumber uint64
ParentHash common.Hash ParentHash common.Hash
Txs Transactions Txs Transactions
GasUsed uint64 UnRevertible mapset.Set[common.Hash]
GasFee *big.Int GasUsed uint64
BuilderFee *big.Int GasFee *big.Int
BuilderFee *big.Int
rawBid RawBid rawBid RawBid
} }

View File

@@ -24,7 +24,6 @@ import (
"os" "os"
"strings" "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@@ -142,31 +141,3 @@ func (api *AdminAPI) ImportChain(file string) (bool, error) {
} }
return true, nil return true, nil
} }
// MevRunning returns true if the validator accept bids from builder
func (api *AdminAPI) MevRunning() bool {
return api.eth.APIBackend.MevRunning()
}
// StartMev starts mev. It notifies the miner to start to receive bids.
func (api *AdminAPI) StartMev() {
api.eth.APIBackend.StartMev()
}
// StopMev stops mev. It notifies the miner to stop receiving bids from this moment,
// but the bids before this moment would still been taken into consideration by mev.
func (api *AdminAPI) StopMev() {
api.eth.APIBackend.StopMev()
}
// AddBuilder adds a builder to the bid simulator.
// url is the endpoint of the builder, for example, "https://mev-builder.amazonaws.com",
// if validator is equipped with sentry, ignore the url.
func (api *AdminAPI) AddBuilder(builder common.Address, url string) error {
return api.eth.APIBackend.AddBuilder(builder, url)
}
// RemoveBuilder removes a builder from the bid simulator.
func (api *AdminAPI) RemoveBuilder(builder common.Address) error {
return api.eth.APIBackend.RemoveBuilder(builder)
}

View File

@@ -89,3 +89,31 @@ func (api *MinerAPI) SetEtherbase(etherbase common.Address) bool {
func (api *MinerAPI) SetRecommitInterval(interval int) { func (api *MinerAPI) SetRecommitInterval(interval int) {
api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond) api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond)
} }
// MevRunning returns true if the validator accept bids from builder
func (api *MinerAPI) MevRunning() bool {
return api.e.APIBackend.MevRunning()
}
// StartMev starts mev. It notifies the miner to start to receive bids.
func (api *MinerAPI) StartMev() {
api.e.APIBackend.StartMev()
}
// StopMev stops mev. It notifies the miner to stop receiving bids from this moment,
// but the bids before this moment would still been taken into consideration by mev.
func (api *MinerAPI) StopMev() {
api.e.APIBackend.StopMev()
}
// AddBuilder adds a builder to the bid simulator.
// url is the endpoint of the builder, for example, "https://mev-builder.amazonaws.com",
// if validator is equipped with sentry, ignore the url.
func (api *MinerAPI) AddBuilder(builder common.Address, url string) error {
return api.e.APIBackend.AddBuilder(builder, url)
}
// RemoveBuilder removes a builder from the bid simulator.
func (api *MinerAPI) RemoveBuilder(builder common.Address) error {
return api.e.APIBackend.RemoveBuilder(builder)
}

View File

@@ -314,6 +314,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Permit the downloader to use the trie cache allowance during fast sync // Permit the downloader to use the trie cache allowance during fast sync
cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit
if eth.handler, err = newHandler(&handlerConfig{ if eth.handler, err = newHandler(&handlerConfig{
NodeID: eth.p2pServer.Self().ID(),
Database: chainDb, Database: chainDb,
Chain: eth.blockchain, Chain: eth.blockchain,
TxPool: eth.txPool, TxPool: eth.txPool,

View File

@@ -209,6 +209,9 @@ type BlockChain interface {
// UpdateChasingHead update remote best chain head, used by DA check now. // UpdateChasingHead update remote best chain head, used by DA check now.
UpdateChasingHead(head *types.Header) UpdateChasingHead(head *types.Header)
// AncientTail retrieves the tail the ancients blocks
AncientTail() (uint64, error)
} }
type DownloadOption func(downloader *Downloader) *Downloader type DownloadOption func(downloader *Downloader) *Downloader
@@ -797,6 +800,11 @@ func (d *Downloader) findAncestor(p *peerConnection, localHeight uint64, remoteH
// We're above the max reorg threshold, find the earliest fork point // We're above the max reorg threshold, find the earliest fork point
floor = int64(localHeight - maxForkAncestry) floor = int64(localHeight - maxForkAncestry)
} }
// if we have pruned too much history, reset the floor
if tail, err := d.blockchain.AncientTail(); err == nil && tail > uint64(floor) {
floor = int64(tail)
}
// If we're doing a light sync, ensure the floor doesn't go below the CHT, as // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
// all headers before that point will be missing. // all headers before that point will be missing.
if mode == LightSync { if mode == LightSync {

View File

@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/fetcher" "github.com/ethereum/go-ethereum/eth/fetcher"
"github.com/ethereum/go-ethereum/eth/protocols/bsc" "github.com/ethereum/go-ethereum/eth/protocols/bsc"
@@ -45,6 +46,8 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"golang.org/x/crypto/sha3"
) )
const ( const (
@@ -111,6 +114,7 @@ type votePool interface {
// handlerConfig is the collection of initialization parameters to create a full // handlerConfig is the collection of initialization parameters to create a full
// node network handler. // node network handler.
type handlerConfig struct { type handlerConfig struct {
NodeID enode.ID // P2P node ID used for tx propagation topology
Database ethdb.Database // Database for direct sync insertions Database ethdb.Database // Database for direct sync insertions
Chain *core.BlockChain // Blockchain to serve data from Chain *core.BlockChain // Blockchain to serve data from
TxPool txPool // Transaction pool to propagate from TxPool txPool // Transaction pool to propagate from
@@ -127,6 +131,7 @@ type handlerConfig struct {
} }
type handler struct { type handler struct {
nodeID enode.ID
networkID uint64 networkID uint64
forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
disablePeerTxBroadcast bool disablePeerTxBroadcast bool
@@ -184,6 +189,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
config.PeerSet = newPeerSet() // Nicety initialization for tests config.PeerSet = newPeerSet() // Nicety initialization for tests
} }
h := &handler{ h := &handler{
nodeID: config.NodeID,
networkID: config.Network, networkID: config.Network,
forkFilter: forkid.NewFilter(config.Chain), forkFilter: forkid.NewFilter(config.Chain),
disablePeerTxBroadcast: config.DisablePeerTxBroadcast, disablePeerTxBroadcast: config.DisablePeerTxBroadcast,
@@ -320,26 +326,22 @@ func newHandler(config *handlerConfig) (*handler, error) {
} }
broadcastBlockWithCheck := func(block *types.Block, propagate bool) { broadcastBlockWithCheck := func(block *types.Block, propagate bool) {
// All the block fetcher activities should be disabled
// after the transition. Print the warning log.
if h.merger.PoSFinalized() {
log.Warn("Unexpected validation activity", "hash", block.Hash(), "number", block.Number())
return
}
// Reject all the PoS style headers in the first place. No matter
// the chain has finished the transition or not, the PoS headers
// should only come from the trusted consensus layer instead of
// p2p network.
if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok {
if beacon.IsPoSHeader(block.Header()) {
log.Warn("unexpected post-merge header")
return
}
}
if propagate { if propagate {
if err := core.IsDataAvailable(h.chain, block); err != nil { checkErrs := make(chan error, 2)
log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err)
return go func() {
checkErrs <- core.ValidateListsInBody(block)
}()
go func() {
checkErrs <- core.IsDataAvailable(h.chain, block)
}()
for i := 0; i < cap(checkErrs); i++ {
err := <-checkErrs
if err != nil {
log.Error("Propagating invalid block", "number", block.Number(), "hash", block.Hash(), "err", err)
return
}
} }
} }
h.BroadcastBlock(block, propagate) h.BroadcastBlock(block, propagate)
@@ -856,25 +858,54 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
) )
// Broadcast transactions to a batch of peers not knowing about it // Broadcast transactions to a batch of peers not knowing about it
for _, tx := range txs { direct := big.NewInt(int64(math.Sqrt(float64(h.peers.len())))) // Approximate number of peers to broadcast to
peers := h.peers.peersWithoutTransaction(tx.Hash()) if direct.BitLen() == 0 {
direct = big.NewInt(1)
}
total := new(big.Int).Exp(direct, big.NewInt(2), nil) // Stabilise total peer count a bit based on sqrt peers
var numDirect int var (
signer = types.LatestSignerForChainID(h.chain.Config().ChainID) // Don't care about chain status, we just need *a* sender
hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState)
hash = make([]byte, 32)
)
for _, tx := range txs {
var maybeDirect bool
switch { switch {
case tx.Type() == types.BlobTxType: case tx.Type() == types.BlobTxType:
blobTxs++ blobTxs++
case tx.Size() > txMaxBroadcastSize: case tx.Size() > txMaxBroadcastSize:
largeTxs++ largeTxs++
default: default:
numDirect = int(math.Sqrt(float64(len(peers)))) maybeDirect = true
} }
// Send the tx unconditionally to a subset of our peers // Send the transaction (if it's small enough) directly to a subset of
for _, peer := range peers[:numDirect] { // the peers that have not received it yet, ensuring that the flow of
txset[peer] = append(txset[peer], tx.Hash()) // transactions is groupped by account to (try and) avoid nonce gaps.
} //
// For the remaining peers, send announcement only // To do this, we hash the local enode IW with together with a peer's
for _, peer := range peers[numDirect:] { // enode ID together with the transaction sender and broadcast if
annos[peer] = append(annos[peer], tx.Hash()) // `sha(self, peer, sender) mod peers < sqrt(peers)`.
for _, peer := range h.peers.peersWithoutTransaction(tx.Hash()) {
var broadcast bool
if maybeDirect {
hasher.Reset()
hasher.Write(h.nodeID.Bytes())
hasher.Write(peer.Node().ID().Bytes())
from, _ := types.Sender(signer, tx) // Ignore error, we only use the addr as a propagation target splitter
hasher.Write(from.Bytes())
hasher.Read(hash)
if new(big.Int).Mod(new(big.Int).SetBytes(hash), total).Cmp(direct) < 0 {
broadcast = true
}
}
if broadcast {
txset[peer] = append(txset[peer], tx.Hash())
} else {
annos[peer] = append(annos[peer], tx.Hash())
}
} }
} }
for peer, hashes := range txset { for peer, hashes := range txset {

View File

@@ -659,6 +659,30 @@ web3._extend({
name: 'stop', name: 'stop',
call: 'miner_stop' call: 'miner_stop'
}), }),
new web3._extend.Method({
name: 'mevRunning',
call: 'miner_mevRunning'
}),
new web3._extend.Method({
name: 'startMev',
call: 'miner_startMev'
}),
new web3._extend.Method({
name: 'stopMev',
call: 'miner_stopMev'
}),
new web3._extend.Method({
name: 'addBuilder',
call: 'miner_addBuilder',
params: 2,
inputFormatter: [web3._extend.formatters.inputAddressFormatter, null]
}),
new web3._extend.Method({
name: 'removeBuilder',
call: 'miner_removeBuilder',
params: 1,
inputFormatter: [web3._extend.formatters.inputAddressFormatter]
}),
new web3._extend.Method({ new web3._extend.Method({
name: 'setEtherbase', name: 'setEtherbase',
call: 'miner_setEtherbase', call: 'miner_setEtherbase',

View File

@@ -78,6 +78,7 @@ type simBidReq struct {
type bidSimulator struct { type bidSimulator struct {
config *MevConfig config *MevConfig
delayLeftOver time.Duration delayLeftOver time.Duration
minGasPrice *big.Int
chain *core.BlockChain chain *core.BlockChain
chainConfig *params.ChainConfig chainConfig *params.ChainConfig
engine consensus.Engine engine consensus.Engine
@@ -114,6 +115,7 @@ type bidSimulator struct {
func newBidSimulator( func newBidSimulator(
config *MevConfig, config *MevConfig,
delayLeftOver time.Duration, delayLeftOver time.Duration,
minGasPrice *big.Int,
chain *core.BlockChain, chain *core.BlockChain,
chainConfig *params.ChainConfig, chainConfig *params.ChainConfig,
engine consensus.Engine, engine consensus.Engine,
@@ -122,6 +124,7 @@ func newBidSimulator(
b := &bidSimulator{ b := &bidSimulator{
config: config, config: config,
delayLeftOver: delayLeftOver, delayLeftOver: delayLeftOver,
minGasPrice: minGasPrice,
chain: chain, chain: chain,
chainConfig: chainConfig, chainConfig: chainConfig,
engine: engine, engine: engine,
@@ -592,6 +595,7 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
return return
} }
// commit transactions in bid
for _, tx := range bidRuntime.bid.Txs { for _, tx := range bidRuntime.bid.Txs {
select { select {
case <-interruptCh: case <-interruptCh:
@@ -609,7 +613,7 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
break break
} }
err = bidRuntime.commitTransaction(b.chain, b.chainConfig, tx) err = bidRuntime.commitTransaction(b.chain, b.chainConfig, tx, bidRuntime.bid.UnRevertible.Contains(tx.Hash()))
if err != nil { if err != nil {
log.Error("BidSimulator: failed to commit tx", "bidHash", bidRuntime.bid.Hash(), "tx", tx.Hash(), "err", err) log.Error("BidSimulator: failed to commit tx", "bidHash", bidRuntime.bid.Hash(), "tx", tx.Hash(), "err", err)
err = fmt.Errorf("invalid tx in bid, %v", err) err = fmt.Errorf("invalid tx in bid, %v", err)
@@ -617,15 +621,32 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
} }
} }
bidRuntime.packReward(b.config.ValidatorCommission) // check if bid reward is valid
{
// return if bid is invalid, reportIssue issue to mev-sentry/builder if simulation is fully done bidRuntime.packReward(b.config.ValidatorCommission)
if !bidRuntime.validReward() { if !bidRuntime.validReward() {
err = errors.New("reward does not achieve the expectation") err = errors.New("reward does not achieve the expectation")
return return
}
} }
// fill transactions from mempool // check if bid gas price is lower than min gas price
{
bidGasUsed := uint64(0)
bidGasFee := bidRuntime.env.state.GetBalance(consensus.SystemAddress)
for _, receipt := range bidRuntime.env.receipts {
bidGasUsed += receipt.GasUsed
}
bidGasPrice := new(big.Int).Div(bidGasFee.ToBig(), new(big.Int).SetUint64(bidGasUsed))
if bidGasPrice.Cmp(b.minGasPrice) < 0 {
err = errors.New("bid gas price is lower than min gas price")
return
}
}
// if enable greedy merge, fill bid env with transactions from mempool
if b.config.GreedyMergeTx { if b.config.GreedyMergeTx {
delay := b.engine.Delay(b.chain, bidRuntime.env.header, &b.delayLeftOver) delay := b.engine.Delay(b.chain, bidRuntime.env.header, &b.delayLeftOver)
if delay != nil && *delay > 0 { if delay != nil && *delay > 0 {
@@ -645,8 +666,9 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
} }
} }
// commit payBidTx at the end of the block
bidRuntime.env.gasPool.AddGas(params.PayBidTxGasLimit) bidRuntime.env.gasPool.AddGas(params.PayBidTxGasLimit)
err = bidRuntime.commitTransaction(b.chain, b.chainConfig, payBidTx) err = bidRuntime.commitTransaction(b.chain, b.chainConfig, payBidTx, true)
if err != nil { if err != nil {
log.Error("BidSimulator: failed to commit tx", "builder", bidRuntime.bid.Builder, log.Error("BidSimulator: failed to commit tx", "builder", bidRuntime.bid.Builder,
"bidHash", bidRuntime.bid.Hash(), "tx", payBidTx.Hash(), "err", err) "bidHash", bidRuntime.bid.Hash(), "tx", payBidTx.Hash(), "err", err)
@@ -727,12 +749,10 @@ func (r *BidRuntime) packReward(validatorCommission uint64) {
r.packedValidatorReward.Sub(r.packedValidatorReward, r.bid.BuilderFee) r.packedValidatorReward.Sub(r.packedValidatorReward, r.bid.BuilderFee)
} }
func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *params.ChainConfig, tx *types.Transaction) error { func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *params.ChainConfig, tx *types.Transaction, unRevertible bool) error {
var ( var (
env = r.env env = r.env
snap = env.state.Snapshot() sc *types.BlobSidecar
gp = env.gasPool.Gas()
sc *types.BlobSidecar
) )
// Start executing the transaction // Start executing the transaction
@@ -755,9 +775,9 @@ func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *para
receipt, err := core.ApplyTransaction(chainConfig, chain, &env.coinbase, env.gasPool, env.state, env.header, tx, receipt, err := core.ApplyTransaction(chainConfig, chain, &env.coinbase, env.gasPool, env.state, env.header, tx,
&env.header.GasUsed, *chain.GetVMConfig(), core.NewReceiptBloomGenerator()) &env.header.GasUsed, *chain.GetVMConfig(), core.NewReceiptBloomGenerator())
if err != nil { if err != nil {
env.state.RevertToSnapshot(snap)
env.gasPool.SetGas(gp)
return err return err
} else if unRevertible && receipt.Status == types.ReceiptStatusFailed {
return errors.New("no revertible transaction failed")
} }
if tx.Type() == types.BlobTxType { if tx.Type() == types.BlobTxType {

View File

@@ -102,7 +102,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even
worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, false), worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, false),
} }
miner.bidSimulator = newBidSimulator(&config.Mev, config.DelayLeftOver, eth.BlockChain(), chainConfig, engine, miner.worker) miner.bidSimulator = newBidSimulator(&config.Mev, config.DelayLeftOver, config.GasPrice, eth.BlockChain(), chainConfig, engine, miner.worker)
miner.worker.setBestBidFetcher(miner.bidSimulator) miner.worker.setBestBidFetcher(miner.bidSimulator)
miner.wg.Add(1) miner.wg.Add(1)

View File

@@ -4,17 +4,15 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"math/big"
"os"
"runtime" "runtime"
"sort" "strings"
"strconv"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@@ -26,63 +24,113 @@ import (
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
) )
type Account struct {
Nonce uint64
Balance *big.Int
Root common.Hash // merkle root of the storage trie
CodeHash []byte
}
type Database interface { type Database interface {
database.Database database.Database
Scheme() string Scheme() string
Cap(limit common.StorageSize) error Cap(limit common.StorageSize) error
DiskDB() ethdb.Database DiskDB() ethdb.Database
} }
const TopN = 3
type Inspector struct { type Inspector struct {
trie *Trie // traverse trie trie *Trie // traverse trie
db Database db Database
stateRootHash common.Hash stateRootHash common.Hash
blocknum uint64 blockNum uint64
root node // root of triedb root node // root of triedb
totalNum uint64
wg sync.WaitGroup
statLock sync.RWMutex
result map[string]*TrieTreeStat
sem *semaphore.Weighted sem *semaphore.Weighted
eoaAccountNums uint64 eoaAccountNums uint64
wg sync.WaitGroup
results stat
topN int
totalAccountNum atomic.Uint64
totalStorageNum atomic.Uint64
lastTime mclock.AbsTime
} }
type TrieTreeStat struct { type stat struct {
is_account_trie bool lock sync.RWMutex
theNodeStatByLevel [15]NodeStat account *trieStat
totalNodeStat NodeStat storageTopN []*trieStat
storageTopNTotal []uint64
storageTotal nodeStat
storageTrieNum uint64
} }
type NodeStat struct { type trieStat struct {
ShortNodeCnt uint64 owner common.Hash
FullNodeCnt uint64 totalNodeStat nodeStat
ValueNodeCnt uint64 nodeStatByLevel [16]nodeStat
} }
func (trieStat *TrieTreeStat) AtomicAdd(theNode node, height uint32) { type nodeStat struct {
ShortNodeCnt atomic.Uint64
FullNodeCnt atomic.Uint64
ValueNodeCnt atomic.Uint64
}
func (ns *nodeStat) IsEmpty() bool {
if ns.FullNodeCnt.Load() == 0 && ns.ShortNodeCnt.Load() == 0 && ns.ValueNodeCnt.Load() == 0 {
return true
}
return false
}
func (s *stat) add(ts *trieStat, topN int) {
s.lock.Lock()
defer s.lock.Unlock()
if ts.owner == (common.Hash{}) {
s.account = ts
return
}
total := ts.totalNodeStat.ValueNodeCnt.Load() + ts.totalNodeStat.FullNodeCnt.Load() + ts.totalNodeStat.ShortNodeCnt.Load()
if len(s.storageTopNTotal) == 0 || total > s.storageTopNTotal[len(s.storageTopNTotal)-1] {
var (
i int
t uint64
)
for i, t = range s.storageTopNTotal {
if total < t {
continue
}
break
}
s.storageTopNTotal = append(s.storageTopNTotal[:i], append([]uint64{total}, s.storageTopNTotal[i:]...)...)
s.storageTopN = append(s.storageTopN[:i], append([]*trieStat{ts}, s.storageTopN[i:]...)...)
if len(s.storageTopN) > topN {
s.storageTopNTotal = s.storageTopNTotal[:topN]
s.storageTopN = s.storageTopN[:topN]
}
}
s.storageTotal.ShortNodeCnt.Add(ts.totalNodeStat.ShortNodeCnt.Load())
s.storageTotal.ValueNodeCnt.Add(ts.totalNodeStat.ValueNodeCnt.Load())
s.storageTotal.FullNodeCnt.Add(ts.totalNodeStat.FullNodeCnt.Load())
s.storageTrieNum++
}
func (trieStat *trieStat) add(theNode node, height int) {
switch (theNode).(type) { switch (theNode).(type) {
case *shortNode: case *shortNode:
atomic.AddUint64(&trieStat.totalNodeStat.ShortNodeCnt, 1) trieStat.totalNodeStat.ShortNodeCnt.Add(1)
atomic.AddUint64(&(trieStat.theNodeStatByLevel[height].ShortNodeCnt), 1) trieStat.nodeStatByLevel[height].ShortNodeCnt.Add(1)
case *fullNode: case *fullNode:
atomic.AddUint64(&trieStat.totalNodeStat.FullNodeCnt, 1) trieStat.totalNodeStat.FullNodeCnt.Add(1)
atomic.AddUint64(&trieStat.theNodeStatByLevel[height].FullNodeCnt, 1) trieStat.nodeStatByLevel[height].FullNodeCnt.Add(1)
case valueNode: case valueNode:
atomic.AddUint64(&trieStat.totalNodeStat.ValueNodeCnt, 1) trieStat.totalNodeStat.ValueNodeCnt.Add(1)
atomic.AddUint64(&((trieStat.theNodeStatByLevel[height]).ValueNodeCnt), 1) trieStat.nodeStatByLevel[height].ValueNodeCnt.Add(1)
default:
panic(errors.New("Invalid node type to statistics"))
} }
} }
func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) { func (trieStat *trieStat) Display(ownerAddress string, treeType string) string {
table := tablewriter.NewWriter(os.Stdout) sw := new(strings.Builder)
table := tablewriter.NewWriter(sw)
table.SetHeader([]string{"-", "Level", "ShortNodeCnt", "FullNodeCnt", "ValueNodeCnt"}) table.SetHeader([]string{"-", "Level", "ShortNodeCnt", "FullNodeCnt", "ValueNodeCnt"})
if ownerAddress == "" { if ownerAddress == "" {
table.SetCaption(true, fmt.Sprintf("%v", treeType)) table.SetCaption(true, fmt.Sprintf("%v", treeType))
@@ -90,38 +138,27 @@ func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) {
table.SetCaption(true, fmt.Sprintf("%v-%v", treeType, ownerAddress)) table.SetCaption(true, fmt.Sprintf("%v-%v", treeType, ownerAddress))
} }
table.SetAlignment(1) table.SetAlignment(1)
for i := 0; i < len(trieStat.theNodeStatByLevel); i++ {
nodeStat := trieStat.theNodeStatByLevel[i] for i := range trieStat.nodeStatByLevel {
if nodeStat.FullNodeCnt == 0 && nodeStat.ShortNodeCnt == 0 && nodeStat.ValueNodeCnt == 0 { if trieStat.nodeStatByLevel[i].IsEmpty() {
break continue
} }
table.AppendBulk([][]string{ table.AppendBulk([][]string{
{"-", strconv.Itoa(i), nodeStat.ShortNodeCount(), nodeStat.FullNodeCount(), nodeStat.ValueNodeCount()}, {"-", fmt.Sprintf("%d", i),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ShortNodeCnt.Load()),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].FullNodeCnt.Load()),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ValueNodeCnt.Load())},
}) })
} }
table.AppendBulk([][]string{ table.AppendBulk([][]string{
{"Total", "-", trieStat.totalNodeStat.ShortNodeCount(), trieStat.totalNodeStat.FullNodeCount(), trieStat.totalNodeStat.ValueNodeCount()}, {"Total", "-", fmt.Sprintf("%d", trieStat.totalNodeStat.ShortNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.FullNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.ValueNodeCnt.Load())},
}) })
table.Render() table.Render()
} return sw.String()
func Uint64ToString(cnt uint64) string {
return fmt.Sprintf("%v", cnt)
}
func (nodeStat *NodeStat) ShortNodeCount() string {
return Uint64ToString(nodeStat.ShortNodeCnt)
}
func (nodeStat *NodeStat) FullNodeCount() string {
return Uint64ToString(nodeStat.FullNodeCnt)
}
func (nodeStat *NodeStat) ValueNodeCount() string {
return Uint64ToString(nodeStat.ValueNodeCnt)
} }
// NewInspector return a inspector obj // NewInspector return a inspector obj
func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Inspector, error) { func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blockNum uint64, jobNum uint64, topN int) (*Inspector, error) {
if tr == nil { if tr == nil {
return nil, errors.New("trie is nil") return nil, errors.New("trie is nil")
} }
@@ -131,15 +168,20 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin
} }
ins := &Inspector{ ins := &Inspector{
trie: tr, trie: tr,
db: db, db: db,
stateRootHash: stateRootHash, stateRootHash: stateRootHash,
blocknum: blocknum, blockNum: blockNum,
root: tr.root, root: tr.root,
result: make(map[string]*TrieTreeStat), results: stat{},
totalNum: (uint64)(0), topN: topN,
wg: sync.WaitGroup{}, totalAccountNum: atomic.Uint64{},
sem: semaphore.NewWeighted(int64(jobnum)), totalStorageNum: atomic.Uint64{},
lastTime: mclock.Now(),
sem: semaphore.NewWeighted(int64(jobNum)),
wg: sync.WaitGroup{},
eoaAccountNums: 0, eoaAccountNums: 0,
} }
@@ -147,155 +189,123 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin
} }
// Run statistics, external call // Run statistics, external call
func (inspect *Inspector) Run() { func (s *Inspector) Run() {
accountTrieStat := &TrieTreeStat{ ticker := time.NewTicker(30 * time.Second)
is_account_trie: true, go func() {
} defer ticker.Stop()
if inspect.db.Scheme() == rawdb.HashScheme { for range ticker.C {
ticker := time.NewTicker(30 * time.Second) if s.db.Scheme() == rawdb.HashScheme {
go func() { s.db.Cap(DEFAULT_TRIEDBCACHE_SIZE)
defer ticker.Stop()
for range ticker.C {
inspect.db.Cap(DEFAULT_TRIEDBCACHE_SIZE)
} }
}() runtime.GC()
} }
}()
if _, ok := inspect.result[""]; !ok { log.Info("Find Account Trie Tree", "rootHash: ", s.trie.Hash().String(), "BlockNum: ", s.blockNum)
inspect.result[""] = accountTrieStat
}
log.Info("Find Account Trie Tree", "rootHash: ", inspect.trie.Hash().String(), "BlockNum: ", inspect.blocknum)
inspect.ConcurrentTraversal(inspect.trie, accountTrieStat, inspect.root, 0, []byte{}) ts := &trieStat{
inspect.wg.Wait() owner: common.Hash{},
}
s.traversal(s.trie, ts, s.root, 0, []byte{})
s.results.add(ts, s.topN)
s.wg.Wait()
} }
func (inspect *Inspector) SubConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) { func (s *Inspector) traversal(trie *Trie, ts *trieStat, n node, height int, path []byte) {
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, theNode, height, path)
inspect.wg.Done()
}
func (inspect *Inspector) ConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) {
// print process progress
total_num := atomic.AddUint64(&inspect.totalNum, 1)
if total_num%100000 == 0 {
fmt.Printf("Complete progress: %v, go routines Num: %v\n", total_num, runtime.NumGoroutine())
}
// nil node // nil node
if theNode == nil { if n == nil {
return return
} }
switch current := (theNode).(type) { ts.add(n, height)
switch current := (n).(type) {
case *shortNode: case *shortNode:
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, current.Val, height, append(path, current.Key...)) s.traversal(trie, ts, current.Val, height, append(path, current.Key...))
case *fullNode: case *fullNode:
for idx, child := range current.Children { for idx, child := range current.Children {
if child == nil { if child == nil {
continue continue
} }
childPath := append(path, byte(idx)) p := common.CopyBytes(append(path, byte(idx)))
if inspect.sem.TryAcquire(1) { s.traversal(trie, ts, child, height+1, p)
inspect.wg.Add(1)
dst := make([]byte, len(childPath))
copy(dst, childPath)
go inspect.SubConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, dst)
} else {
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, childPath)
}
} }
case hashNode: case hashNode:
n, err := theTrie.resloveWithoutTrack(current, path) tn, err := trie.resloveWithoutTrack(current, path)
if err != nil { if err != nil {
fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, theTrie.Hash().String(), height+1, path) fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, trie.Hash().String(), height+1, path)
return return
} }
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, n, height, path) s.PrintProgress(trie)
return s.traversal(trie, ts, tn, height, path)
case valueNode: case valueNode:
if !hasTerm(path) { if !hasTerm(path) {
break break
} }
var account Account var account types.StateAccount
if err := rlp.Decode(bytes.NewReader(current), &account); err != nil { if err := rlp.Decode(bytes.NewReader(current), &account); err != nil {
break break
} }
if common.BytesToHash(account.CodeHash) == types.EmptyCodeHash { if common.BytesToHash(account.CodeHash) == types.EmptyCodeHash {
inspect.eoaAccountNums++ s.eoaAccountNums++
} }
if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash { if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash {
break break
} }
ownerAddress := common.BytesToHash(hexToCompact(path)) ownerAddress := common.BytesToHash(hexToCompact(path))
contractTrie, err := New(StorageTrieID(inspect.stateRootHash, ownerAddress, account.Root), inspect.db) contractTrie, err := New(StorageTrieID(s.stateRootHash, ownerAddress, account.Root), s.db)
if err != nil { if err != nil {
fmt.Printf("New contract trie node: %v, error: %v, Height: %v, Path: %v\n", theNode, err, height, path) panic(err)
break
} }
contractTrie.tracer.reset() contractTrie.tracer.reset()
trieStat := &TrieTreeStat{
is_account_trie: false,
}
inspect.statLock.Lock() if s.sem.TryAcquire(1) {
if _, ok := inspect.result[ownerAddress.String()]; !ok { s.wg.Add(1)
inspect.result[ownerAddress.String()] = trieStat go func() {
t := &trieStat{
owner: ownerAddress,
}
s.traversal(contractTrie, t, contractTrie.root, 0, []byte{})
s.results.add(t, s.topN)
s.sem.Release(1)
s.wg.Done()
}()
} else {
t := &trieStat{
owner: ownerAddress,
}
s.traversal(contractTrie, t, contractTrie.root, 0, []byte{})
s.results.add(t, s.topN)
} }
inspect.statLock.Unlock()
// log.Info("Find Contract Trie Tree, rootHash: ", contractTrie.Hash().String(), "")
inspect.wg.Add(1)
go inspect.SubConcurrentTraversal(contractTrie, trieStat, contractTrie.root, 0, []byte{})
default: default:
panic(errors.New("Invalid node type to traverse.")) panic(errors.New("invalid node type to traverse"))
} }
theTrieTreeStat.AtomicAdd(theNode, height)
} }
func (inspect *Inspector) DisplayResult() { func (s *Inspector) PrintProgress(t *Trie) {
var (
elapsed = mclock.Now().Sub(s.lastTime)
)
if t.owner == (common.Hash{}) {
s.totalAccountNum.Add(1)
} else {
s.totalStorageNum.Add(1)
}
if elapsed > 4*time.Second {
log.Info("traversal progress", "TotalAccountNum", s.totalAccountNum.Load(), "TotalStorageNum", s.totalStorageNum.Load(), "Goroutine", runtime.NumGoroutine())
s.lastTime = mclock.Now()
}
}
func (s *Inspector) DisplayResult() {
// display root hash // display root hash
if _, ok := inspect.result[""]; !ok { fmt.Println(s.results.account.Display("", "AccountTrie"))
log.Info("Display result error", "missing account trie") fmt.Println("EOA accounts num: ", s.eoaAccountNums)
return
}
inspect.result[""].Display("", "AccountTrie")
type SortedTrie struct {
totalNum uint64
ownerAddress string
}
// display contract trie // display contract trie
var sortedTriesByNums []SortedTrie for _, st := range s.results.storageTopN {
var totalContactsNodeStat NodeStat fmt.Println(st.Display(st.owner.String(), "StorageTrie"))
var contractTrieCnt uint64 = 0
for ownerAddress, stat := range inspect.result {
if ownerAddress == "" {
continue
}
contractTrieCnt++
totalContactsNodeStat.ShortNodeCnt += stat.totalNodeStat.ShortNodeCnt
totalContactsNodeStat.FullNodeCnt += stat.totalNodeStat.FullNodeCnt
totalContactsNodeStat.ValueNodeCnt += stat.totalNodeStat.ValueNodeCnt
totalNodeCnt := stat.totalNodeStat.ShortNodeCnt + stat.totalNodeStat.ValueNodeCnt + stat.totalNodeStat.FullNodeCnt
sortedTriesByNums = append(sortedTriesByNums, SortedTrie{totalNum: totalNodeCnt, ownerAddress: ownerAddress})
}
sort.Slice(sortedTriesByNums, func(i, j int) bool {
return sortedTriesByNums[i].totalNum > sortedTriesByNums[j].totalNum
})
fmt.Println("EOA accounts num: ", inspect.eoaAccountNums)
// only display top 5
for i, t := range sortedTriesByNums {
if i > 5 {
break
}
if stat, ok := inspect.result[t.ownerAddress]; !ok {
log.Error("Storage trie stat not found", "ownerAddress", t.ownerAddress)
} else {
stat.Display(t.ownerAddress, "ContractTrie")
}
} }
fmt.Printf("Contract Trie, total trie num: %v, ShortNodeCnt: %v, FullNodeCnt: %v, ValueNodeCnt: %v\n", fmt.Printf("Contract Trie, total trie num: %v, ShortNodeCnt: %v, FullNodeCnt: %v, ValueNodeCnt: %v\n",
contractTrieCnt, totalContactsNodeStat.ShortNodeCnt, totalContactsNodeStat.FullNodeCnt, totalContactsNodeStat.ValueNodeCnt) s.results.storageTrieNum, s.results.storageTotal.ShortNodeCnt.Load(), s.results.storageTotal.FullNodeCnt.Load(), s.results.storageTotal.ValueNodeCnt.Load())
} }