Compare commits

...

8 Commits

Author SHA1 Message Date
emailtovamos
e149161d54 jsutil: correct the division 2024-05-21 15:42:58 +01:00
emailtovamos
cb0b34b7c2 jsutils: update logs 2024-05-16 14:34:11 +01:00
emailtovamos
5b7a71e38e jsutils: faucet balance 2024-05-16 12:25:25 +01:00
galaio
6b8cbbe172 sync: fix some sync issues caused by prune-block. (#2466) 2024-05-16 12:07:13 +08:00
setunapo
5ea2ada0ee utils: add check_blobtx.js (#2463) 2024-05-15 18:17:57 +08:00
Fynn
b230a02006 cmd: fix memory leak when big dataset (#2455) 2024-05-15 15:28:57 +08:00
Nathan
86e3a02490 cmd/utils: add a flag to change breathe block interval for testing (#2462) 2024-05-15 15:27:05 +08:00
Nathan
0c0958ff87 eth/handler: check lists in body before broadcast blocks (#2461) 2024-05-15 14:54:25 +08:00
14 changed files with 371 additions and 217 deletions

View File

@@ -203,6 +203,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.IsSet(utils.OverrideDefaultExtraReserveForBlobRequests.Name) {
params.DefaultExtraReserveForBlobRequests = ctx.Uint64(utils.OverrideDefaultExtraReserveForBlobRequests.Name)
}
if ctx.IsSet(utils.OverrideBreatheBlockInterval.Name) {
params.BreatheBlockInterval = ctx.Uint64(utils.OverrideBreatheBlockInterval.Name)
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)

View File

@@ -106,12 +106,12 @@ Remove blockchain and state databases`,
dbInspectTrieCmd = &cli.Command{
Action: inspectTrie,
Name: "inspect-trie",
ArgsUsage: "<blocknum> <jobnum>",
ArgsUsage: "<blocknum> <jobnum> <topn>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
},
Usage: "Inspect the MPT tree of the account and contract.",
Usage: "Inspect the MPT tree of the account and contract. 'blocknum' can be latest/snapshot/number. 'topn' means output the top N storage tries info ranked by the total number of TrieNodes",
Description: `This commands iterates the entrie WorldState.`,
}
dbCheckStateContentCmd = &cli.Command{
@@ -386,6 +386,7 @@ func inspectTrie(ctx *cli.Context) error {
blockNumber uint64
trieRootHash common.Hash
jobnum uint64
topN uint64
)
stack, _ := makeConfigNode(ctx)
@@ -411,12 +412,25 @@ func inspectTrie(ctx *cli.Context) error {
if ctx.NArg() == 1 {
jobnum = 1000
topN = 10
} else if ctx.NArg() == 2 {
var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
topN = 10
} else {
var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
}
if blockNumber != math.MaxUint64 {
@@ -437,6 +451,7 @@ func inspectTrie(ctx *cli.Context) error {
if dbScheme == rawdb.PathScheme {
config = &triedb.Config{
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
Cache: 0,
}
} else if dbScheme == rawdb.HashScheme {
config = triedb.HashDefaults
@@ -448,7 +463,7 @@ func inspectTrie(ctx *cli.Context) error {
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
return err
}
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum)
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum, int(topN))
if err != nil {
return err
}

View File

@@ -77,6 +77,7 @@ var (
utils.OverrideFullImmutabilityThreshold,
utils.OverrideMinBlocksForBlobRequests,
utils.OverrideDefaultExtraReserveForBlobRequests,
utils.OverrideBreatheBlockInterval,
utils.EnablePersonal,
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,

View File

@@ -0,0 +1,51 @@
import { ethers } from "ethers";
import program from "commander";
// depends on ethjs v6.11.0+ for 4844, https://github.com/ethers-io/ethers.js/releases/tag/v6.11.0
// BSC testnet enabled 4844 on block: 39539137
// Usage:
// nvm use 20
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}
for (let i = startBlock; i <= endBlock; i++) {
let blockData = await provider.getBlock(i);
console.log("startBlock:",startBlock, "endBlock:", endBlock, "curBlock", i, "blobGasUsed", blockData.blobGasUsed);
if (blockData.blobGasUsed == 0) {
continue
}
for (let txIndex = 0; txIndex<= blockData.transactions.length - 1; txIndex++) {
let txHash = blockData.transactions[txIndex]
let txData = await provider.getTransaction(txHash);
if (txData.type == 3) {
console.log("BlobTx in block:",i, " txIndex:", txIndex, " txHash:", txHash);
}
}
}
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@@ -0,0 +1,49 @@
import { ethers } from "ethers";
import program from "commander";
// Usage:
// node faucet_request.js --rpc localhost:8545 --startNum 39539137
// node faucet_request.js --rpc localhost:8545 --startNum 39539137 --endNum 40345994
// node faucet_request.js --rpc https://data-seed-prebsc-1-s1.bnbchain.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}
let startBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", startBlock)
let endBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", endBlock)
const faucetAmount = BigInt(0.3 * 10**18); // Convert 0.3 ether to wei as a BigInt
const numFaucetRequest = (startBalance - endBalance) / faucetAmount;
// Convert BigInt to ether
const startBalanceEth = Number(startBalance) / 10**18;
const endBalanceEth = Number(endBalance) / 10**18;
console.log(`Start Balance: ${startBalanceEth} ETH`);
console.log(`End Balance: ${endBalanceEth} ETH`);
console.log("successful faucet request: ",numFaucetRequest);
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@@ -333,6 +333,12 @@ var (
Value: params.DefaultExtraReserveForBlobRequests,
Category: flags.EthCategory,
}
OverrideBreatheBlockInterval = &cli.Uint64Flag{
Name: "override.breatheblockinterval",
Usage: "It changes the interval between breathe blocks, only for testing purpose",
Value: params.BreatheBlockInterval,
Category: flags.EthCategory,
}
SyncModeFlag = &flags.TextMarshalerFlag{
Name: "syncmode",
Usage: `Blockchain sync mode ("snap" or "full")`,

View File

@@ -15,14 +15,13 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
const SecondsPerDay uint64 = 86400
// the params should be two blocks' time(timestamp)
func sameDayInUTC(first, second uint64) bool {
return first/SecondsPerDay == second/SecondsPerDay
return first/params.BreatheBlockInterval == second/params.BreatheBlockInterval
}
func isBreatheBlock(lastBlockTime, blockTime uint64) bool {

View File

@@ -66,6 +66,31 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
return validator
}
// ValidateListsInBody validates that UncleHash, WithdrawalsHash, and WithdrawalsHash correspond to the lists in the block body, respectively.
func ValidateListsInBody(block *types.Block) error {
header := block.Header()
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
return nil
}
// ValidateBody validates the given block's uncles and verifies the block
// header's transaction and uncle roots. The headers are assumed to be already
// validated at this point.
@@ -83,31 +108,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err
}
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
validateFuns := []func() error{
func() error {
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
return nil
return ValidateListsInBody(block)
},
func() error {
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
// Blob transactions may be present after the Cancun fork.
var blobs int
for i, tx := range block.Transactions() {

View File

@@ -511,3 +511,12 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr
func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription {
return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch))
}
// AncientTail retrieves the tail the ancients blocks
func (bc *BlockChain) AncientTail() (uint64, error) {
tail, err := bc.db.Tail()
if err != nil {
return 0, err
}
return tail, nil
}

View File

@@ -239,7 +239,7 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
// - if maxBytes is not specified, 'count' items will be returned if they are present.
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes)
return table.RetrieveItems(start-f.offset, count, maxBytes)
}
return nil, errUnknownTable
}
@@ -252,7 +252,7 @@ func (f *Freezer) Ancients() (uint64, error) {
func (f *Freezer) TableAncients(kind string) (uint64, error) {
f.writeLock.RLock()
defer f.writeLock.RUnlock()
return f.tables[kind].items.Load(), nil
return f.tables[kind].items.Load() + f.offset, nil
}
// ItemAmountInAncient returns the actual length of current ancientDB.

View File

@@ -209,6 +209,9 @@ type BlockChain interface {
// UpdateChasingHead update remote best chain head, used by DA check now.
UpdateChasingHead(head *types.Header)
// AncientTail retrieves the tail the ancients blocks
AncientTail() (uint64, error)
}
type DownloadOption func(downloader *Downloader) *Downloader
@@ -797,6 +800,11 @@ func (d *Downloader) findAncestor(p *peerConnection, localHeight uint64, remoteH
// We're above the max reorg threshold, find the earliest fork point
floor = int64(localHeight - maxForkAncestry)
}
// if we have pruned too much history, reset the floor
if tail, err := d.blockchain.AncientTail(); err == nil && tail > uint64(floor) {
floor = int64(tail)
}
// If we're doing a light sync, ensure the floor doesn't go below the CHT, as
// all headers before that point will be missing.
if mode == LightSync {

View File

@@ -320,26 +320,22 @@ func newHandler(config *handlerConfig) (*handler, error) {
}
broadcastBlockWithCheck := func(block *types.Block, propagate bool) {
// All the block fetcher activities should be disabled
// after the transition. Print the warning log.
if h.merger.PoSFinalized() {
log.Warn("Unexpected validation activity", "hash", block.Hash(), "number", block.Number())
return
}
// Reject all the PoS style headers in the first place. No matter
// the chain has finished the transition or not, the PoS headers
// should only come from the trusted consensus layer instead of
// p2p network.
if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok {
if beacon.IsPoSHeader(block.Header()) {
log.Warn("unexpected post-merge header")
return
}
}
if propagate {
if err := core.IsDataAvailable(h.chain, block); err != nil {
log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err)
return
checkErrs := make(chan error, 2)
go func() {
checkErrs <- core.ValidateListsInBody(block)
}()
go func() {
checkErrs <- core.IsDataAvailable(h.chain, block)
}()
for i := 0; i < cap(checkErrs); i++ {
err := <-checkErrs
if err != nil {
log.Error("Propagating invalid block", "number", block.Number(), "hash", block.Hash(), "err", err)
return
}
}
}
h.BroadcastBlock(block, propagate)

View File

@@ -189,6 +189,7 @@ const (
var (
MinBlocksForBlobRequests uint64 = 524288 // it keeps blob data available for ~18.2 days in local, ref: https://github.com/bnb-chain/BEPs/blob/master/BEPs/BEP-336.md#51-parameters.
DefaultExtraReserveForBlobRequests uint64 = 1 * (24 * 3600) / 3 // it adds more time for expired blobs for some request cases, like expiry blob when remote peer is syncing, default 1 day.
BreatheBlockInterval uint64 = 86400 // Controls the interval for updateValidatorSetV2
)
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations

View File

@@ -4,17 +4,15 @@ import (
"bytes"
"errors"
"fmt"
"math/big"
"os"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
@@ -26,63 +24,113 @@ import (
"golang.org/x/sync/semaphore"
)
type Account struct {
Nonce uint64
Balance *big.Int
Root common.Hash // merkle root of the storage trie
CodeHash []byte
}
type Database interface {
database.Database
Scheme() string
Cap(limit common.StorageSize) error
DiskDB() ethdb.Database
}
const TopN = 3
type Inspector struct {
trie *Trie // traverse trie
db Database
stateRootHash common.Hash
blocknum uint64
blockNum uint64
root node // root of triedb
totalNum uint64
wg sync.WaitGroup
statLock sync.RWMutex
result map[string]*TrieTreeStat
sem *semaphore.Weighted
eoaAccountNums uint64
wg sync.WaitGroup
results stat
topN int
totalAccountNum atomic.Uint64
totalStorageNum atomic.Uint64
lastTime mclock.AbsTime
}
type TrieTreeStat struct {
is_account_trie bool
theNodeStatByLevel [15]NodeStat
totalNodeStat NodeStat
type stat struct {
lock sync.RWMutex
account *trieStat
storageTopN []*trieStat
storageTopNTotal []uint64
storageTotal nodeStat
storageTrieNum uint64
}
type NodeStat struct {
ShortNodeCnt uint64
FullNodeCnt uint64
ValueNodeCnt uint64
type trieStat struct {
owner common.Hash
totalNodeStat nodeStat
nodeStatByLevel [16]nodeStat
}
func (trieStat *TrieTreeStat) AtomicAdd(theNode node, height uint32) {
type nodeStat struct {
ShortNodeCnt atomic.Uint64
FullNodeCnt atomic.Uint64
ValueNodeCnt atomic.Uint64
}
func (ns *nodeStat) IsEmpty() bool {
if ns.FullNodeCnt.Load() == 0 && ns.ShortNodeCnt.Load() == 0 && ns.ValueNodeCnt.Load() == 0 {
return true
}
return false
}
func (s *stat) add(ts *trieStat, topN int) {
s.lock.Lock()
defer s.lock.Unlock()
if ts.owner == (common.Hash{}) {
s.account = ts
return
}
total := ts.totalNodeStat.ValueNodeCnt.Load() + ts.totalNodeStat.FullNodeCnt.Load() + ts.totalNodeStat.ShortNodeCnt.Load()
if len(s.storageTopNTotal) == 0 || total > s.storageTopNTotal[len(s.storageTopNTotal)-1] {
var (
i int
t uint64
)
for i, t = range s.storageTopNTotal {
if total < t {
continue
}
break
}
s.storageTopNTotal = append(s.storageTopNTotal[:i], append([]uint64{total}, s.storageTopNTotal[i:]...)...)
s.storageTopN = append(s.storageTopN[:i], append([]*trieStat{ts}, s.storageTopN[i:]...)...)
if len(s.storageTopN) > topN {
s.storageTopNTotal = s.storageTopNTotal[:topN]
s.storageTopN = s.storageTopN[:topN]
}
}
s.storageTotal.ShortNodeCnt.Add(ts.totalNodeStat.ShortNodeCnt.Load())
s.storageTotal.ValueNodeCnt.Add(ts.totalNodeStat.ValueNodeCnt.Load())
s.storageTotal.FullNodeCnt.Add(ts.totalNodeStat.FullNodeCnt.Load())
s.storageTrieNum++
}
func (trieStat *trieStat) add(theNode node, height int) {
switch (theNode).(type) {
case *shortNode:
atomic.AddUint64(&trieStat.totalNodeStat.ShortNodeCnt, 1)
atomic.AddUint64(&(trieStat.theNodeStatByLevel[height].ShortNodeCnt), 1)
trieStat.totalNodeStat.ShortNodeCnt.Add(1)
trieStat.nodeStatByLevel[height].ShortNodeCnt.Add(1)
case *fullNode:
atomic.AddUint64(&trieStat.totalNodeStat.FullNodeCnt, 1)
atomic.AddUint64(&trieStat.theNodeStatByLevel[height].FullNodeCnt, 1)
trieStat.totalNodeStat.FullNodeCnt.Add(1)
trieStat.nodeStatByLevel[height].FullNodeCnt.Add(1)
case valueNode:
atomic.AddUint64(&trieStat.totalNodeStat.ValueNodeCnt, 1)
atomic.AddUint64(&((trieStat.theNodeStatByLevel[height]).ValueNodeCnt), 1)
default:
panic(errors.New("Invalid node type to statistics"))
trieStat.totalNodeStat.ValueNodeCnt.Add(1)
trieStat.nodeStatByLevel[height].ValueNodeCnt.Add(1)
}
}
func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) {
table := tablewriter.NewWriter(os.Stdout)
func (trieStat *trieStat) Display(ownerAddress string, treeType string) string {
sw := new(strings.Builder)
table := tablewriter.NewWriter(sw)
table.SetHeader([]string{"-", "Level", "ShortNodeCnt", "FullNodeCnt", "ValueNodeCnt"})
if ownerAddress == "" {
table.SetCaption(true, fmt.Sprintf("%v", treeType))
@@ -90,38 +138,27 @@ func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) {
table.SetCaption(true, fmt.Sprintf("%v-%v", treeType, ownerAddress))
}
table.SetAlignment(1)
for i := 0; i < len(trieStat.theNodeStatByLevel); i++ {
nodeStat := trieStat.theNodeStatByLevel[i]
if nodeStat.FullNodeCnt == 0 && nodeStat.ShortNodeCnt == 0 && nodeStat.ValueNodeCnt == 0 {
break
for i := range trieStat.nodeStatByLevel {
if trieStat.nodeStatByLevel[i].IsEmpty() {
continue
}
table.AppendBulk([][]string{
{"-", strconv.Itoa(i), nodeStat.ShortNodeCount(), nodeStat.FullNodeCount(), nodeStat.ValueNodeCount()},
{"-", fmt.Sprintf("%d", i),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ShortNodeCnt.Load()),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].FullNodeCnt.Load()),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ValueNodeCnt.Load())},
})
}
table.AppendBulk([][]string{
{"Total", "-", trieStat.totalNodeStat.ShortNodeCount(), trieStat.totalNodeStat.FullNodeCount(), trieStat.totalNodeStat.ValueNodeCount()},
{"Total", "-", fmt.Sprintf("%d", trieStat.totalNodeStat.ShortNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.FullNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.ValueNodeCnt.Load())},
})
table.Render()
}
func Uint64ToString(cnt uint64) string {
return fmt.Sprintf("%v", cnt)
}
func (nodeStat *NodeStat) ShortNodeCount() string {
return Uint64ToString(nodeStat.ShortNodeCnt)
}
func (nodeStat *NodeStat) FullNodeCount() string {
return Uint64ToString(nodeStat.FullNodeCnt)
}
func (nodeStat *NodeStat) ValueNodeCount() string {
return Uint64ToString(nodeStat.ValueNodeCnt)
return sw.String()
}
// NewInspector return a inspector obj
func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Inspector, error) {
func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blockNum uint64, jobNum uint64, topN int) (*Inspector, error) {
if tr == nil {
return nil, errors.New("trie is nil")
}
@@ -131,15 +168,20 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin
}
ins := &Inspector{
trie: tr,
db: db,
stateRootHash: stateRootHash,
blocknum: blocknum,
root: tr.root,
result: make(map[string]*TrieTreeStat),
totalNum: (uint64)(0),
wg: sync.WaitGroup{},
sem: semaphore.NewWeighted(int64(jobnum)),
trie: tr,
db: db,
stateRootHash: stateRootHash,
blockNum: blockNum,
root: tr.root,
results: stat{},
topN: topN,
totalAccountNum: atomic.Uint64{},
totalStorageNum: atomic.Uint64{},
lastTime: mclock.Now(),
sem: semaphore.NewWeighted(int64(jobNum)),
wg: sync.WaitGroup{},
eoaAccountNums: 0,
}
@@ -147,155 +189,123 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin
}
// Run statistics, external call
func (inspect *Inspector) Run() {
accountTrieStat := &TrieTreeStat{
is_account_trie: true,
}
if inspect.db.Scheme() == rawdb.HashScheme {
ticker := time.NewTicker(30 * time.Second)
go func() {
defer ticker.Stop()
for range ticker.C {
inspect.db.Cap(DEFAULT_TRIEDBCACHE_SIZE)
func (s *Inspector) Run() {
ticker := time.NewTicker(30 * time.Second)
go func() {
defer ticker.Stop()
for range ticker.C {
if s.db.Scheme() == rawdb.HashScheme {
s.db.Cap(DEFAULT_TRIEDBCACHE_SIZE)
}
}()
}
runtime.GC()
}
}()
if _, ok := inspect.result[""]; !ok {
inspect.result[""] = accountTrieStat
}
log.Info("Find Account Trie Tree", "rootHash: ", inspect.trie.Hash().String(), "BlockNum: ", inspect.blocknum)
log.Info("Find Account Trie Tree", "rootHash: ", s.trie.Hash().String(), "BlockNum: ", s.blockNum)
inspect.ConcurrentTraversal(inspect.trie, accountTrieStat, inspect.root, 0, []byte{})
inspect.wg.Wait()
ts := &trieStat{
owner: common.Hash{},
}
s.traversal(s.trie, ts, s.root, 0, []byte{})
s.results.add(ts, s.topN)
s.wg.Wait()
}
func (inspect *Inspector) SubConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) {
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, theNode, height, path)
inspect.wg.Done()
}
func (inspect *Inspector) ConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) {
// print process progress
total_num := atomic.AddUint64(&inspect.totalNum, 1)
if total_num%100000 == 0 {
fmt.Printf("Complete progress: %v, go routines Num: %v\n", total_num, runtime.NumGoroutine())
}
func (s *Inspector) traversal(trie *Trie, ts *trieStat, n node, height int, path []byte) {
// nil node
if theNode == nil {
if n == nil {
return
}
switch current := (theNode).(type) {
ts.add(n, height)
switch current := (n).(type) {
case *shortNode:
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, current.Val, height, append(path, current.Key...))
s.traversal(trie, ts, current.Val, height, append(path, current.Key...))
case *fullNode:
for idx, child := range current.Children {
if child == nil {
continue
}
childPath := append(path, byte(idx))
if inspect.sem.TryAcquire(1) {
inspect.wg.Add(1)
dst := make([]byte, len(childPath))
copy(dst, childPath)
go inspect.SubConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, dst)
} else {
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, childPath)
}
p := common.CopyBytes(append(path, byte(idx)))
s.traversal(trie, ts, child, height+1, p)
}
case hashNode:
n, err := theTrie.resloveWithoutTrack(current, path)
tn, err := trie.resloveWithoutTrack(current, path)
if err != nil {
fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, theTrie.Hash().String(), height+1, path)
fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, trie.Hash().String(), height+1, path)
return
}
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, n, height, path)
return
s.PrintProgress(trie)
s.traversal(trie, ts, tn, height, path)
case valueNode:
if !hasTerm(path) {
break
}
var account Account
var account types.StateAccount
if err := rlp.Decode(bytes.NewReader(current), &account); err != nil {
break
}
if common.BytesToHash(account.CodeHash) == types.EmptyCodeHash {
inspect.eoaAccountNums++
s.eoaAccountNums++
}
if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash {
break
}
ownerAddress := common.BytesToHash(hexToCompact(path))
contractTrie, err := New(StorageTrieID(inspect.stateRootHash, ownerAddress, account.Root), inspect.db)
contractTrie, err := New(StorageTrieID(s.stateRootHash, ownerAddress, account.Root), s.db)
if err != nil {
fmt.Printf("New contract trie node: %v, error: %v, Height: %v, Path: %v\n", theNode, err, height, path)
break
panic(err)
}
contractTrie.tracer.reset()
trieStat := &TrieTreeStat{
is_account_trie: false,
}
inspect.statLock.Lock()
if _, ok := inspect.result[ownerAddress.String()]; !ok {
inspect.result[ownerAddress.String()] = trieStat
if s.sem.TryAcquire(1) {
s.wg.Add(1)
go func() {
t := &trieStat{
owner: ownerAddress,
}
s.traversal(contractTrie, t, contractTrie.root, 0, []byte{})
s.results.add(t, s.topN)
s.sem.Release(1)
s.wg.Done()
}()
} else {
t := &trieStat{
owner: ownerAddress,
}
s.traversal(contractTrie, t, contractTrie.root, 0, []byte{})
s.results.add(t, s.topN)
}
inspect.statLock.Unlock()
// log.Info("Find Contract Trie Tree, rootHash: ", contractTrie.Hash().String(), "")
inspect.wg.Add(1)
go inspect.SubConcurrentTraversal(contractTrie, trieStat, contractTrie.root, 0, []byte{})
default:
panic(errors.New("Invalid node type to traverse."))
panic(errors.New("invalid node type to traverse"))
}
theTrieTreeStat.AtomicAdd(theNode, height)
}
func (inspect *Inspector) DisplayResult() {
func (s *Inspector) PrintProgress(t *Trie) {
var (
elapsed = mclock.Now().Sub(s.lastTime)
)
if t.owner == (common.Hash{}) {
s.totalAccountNum.Add(1)
} else {
s.totalStorageNum.Add(1)
}
if elapsed > 4*time.Second {
log.Info("traversal progress", "TotalAccountNum", s.totalAccountNum.Load(), "TotalStorageNum", s.totalStorageNum.Load(), "Goroutine", runtime.NumGoroutine())
s.lastTime = mclock.Now()
}
}
func (s *Inspector) DisplayResult() {
// display root hash
if _, ok := inspect.result[""]; !ok {
log.Info("Display result error", "missing account trie")
return
}
inspect.result[""].Display("", "AccountTrie")
fmt.Println(s.results.account.Display("", "AccountTrie"))
fmt.Println("EOA accounts num: ", s.eoaAccountNums)
type SortedTrie struct {
totalNum uint64
ownerAddress string
}
// display contract trie
var sortedTriesByNums []SortedTrie
var totalContactsNodeStat NodeStat
var contractTrieCnt uint64 = 0
for ownerAddress, stat := range inspect.result {
if ownerAddress == "" {
continue
}
contractTrieCnt++
totalContactsNodeStat.ShortNodeCnt += stat.totalNodeStat.ShortNodeCnt
totalContactsNodeStat.FullNodeCnt += stat.totalNodeStat.FullNodeCnt
totalContactsNodeStat.ValueNodeCnt += stat.totalNodeStat.ValueNodeCnt
totalNodeCnt := stat.totalNodeStat.ShortNodeCnt + stat.totalNodeStat.ValueNodeCnt + stat.totalNodeStat.FullNodeCnt
sortedTriesByNums = append(sortedTriesByNums, SortedTrie{totalNum: totalNodeCnt, ownerAddress: ownerAddress})
}
sort.Slice(sortedTriesByNums, func(i, j int) bool {
return sortedTriesByNums[i].totalNum > sortedTriesByNums[j].totalNum
})
fmt.Println("EOA accounts num: ", inspect.eoaAccountNums)
// only display top 5
for i, t := range sortedTriesByNums {
if i > 5 {
break
}
if stat, ok := inspect.result[t.ownerAddress]; !ok {
log.Error("Storage trie stat not found", "ownerAddress", t.ownerAddress)
} else {
stat.Display(t.ownerAddress, "ContractTrie")
}
for _, st := range s.results.storageTopN {
fmt.Println(st.Display(st.owner.String(), "StorageTrie"))
}
fmt.Printf("Contract Trie, total trie num: %v, ShortNodeCnt: %v, FullNodeCnt: %v, ValueNodeCnt: %v\n",
contractTrieCnt, totalContactsNodeStat.ShortNodeCnt, totalContactsNodeStat.FullNodeCnt, totalContactsNodeStat.ValueNodeCnt)
s.results.storageTrieNum, s.results.storageTotal.ShortNodeCnt.Load(), s.results.storageTotal.FullNodeCnt.Load(), s.results.storageTotal.ValueNodeCnt.Load())
}