Compare commits

...

8 Commits

Author SHA1 Message Date
emailtovamos
e149161d54 jsutil: correct the division 2024-05-21 15:42:58 +01:00
emailtovamos
cb0b34b7c2 jsutils: update logs 2024-05-16 14:34:11 +01:00
emailtovamos
5b7a71e38e jsutils: faucet balance 2024-05-16 12:25:25 +01:00
galaio
6b8cbbe172 sync: fix some sync issues caused by prune-block. (#2466) 2024-05-16 12:07:13 +08:00
setunapo
5ea2ada0ee utils: add check_blobtx.js (#2463) 2024-05-15 18:17:57 +08:00
Fynn
b230a02006 cmd: fix memory leak when big dataset (#2455) 2024-05-15 15:28:57 +08:00
Nathan
86e3a02490 cmd/utils: add a flag to change breathe block interval for testing (#2462) 2024-05-15 15:27:05 +08:00
Nathan
0c0958ff87 eth/handler: check lists in body before broadcast blocks (#2461) 2024-05-15 14:54:25 +08:00
14 changed files with 371 additions and 217 deletions

View File

@@ -203,6 +203,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.IsSet(utils.OverrideDefaultExtraReserveForBlobRequests.Name) { if ctx.IsSet(utils.OverrideDefaultExtraReserveForBlobRequests.Name) {
params.DefaultExtraReserveForBlobRequests = ctx.Uint64(utils.OverrideDefaultExtraReserveForBlobRequests.Name) params.DefaultExtraReserveForBlobRequests = ctx.Uint64(utils.OverrideDefaultExtraReserveForBlobRequests.Name)
} }
if ctx.IsSet(utils.OverrideBreatheBlockInterval.Name) {
params.BreatheBlockInterval = ctx.Uint64(utils.OverrideBreatheBlockInterval.Name)
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)

View File

@@ -106,12 +106,12 @@ Remove blockchain and state databases`,
dbInspectTrieCmd = &cli.Command{ dbInspectTrieCmd = &cli.Command{
Action: inspectTrie, Action: inspectTrie,
Name: "inspect-trie", Name: "inspect-trie",
ArgsUsage: "<blocknum> <jobnum>", ArgsUsage: "<blocknum> <jobnum> <topn>",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
}, },
Usage: "Inspect the MPT tree of the account and contract.", Usage: "Inspect the MPT tree of the account and contract. 'blocknum' can be latest/snapshot/number. 'topn' means output the top N storage tries info ranked by the total number of TrieNodes",
Description: `This commands iterates the entrie WorldState.`, Description: `This commands iterates the entrie WorldState.`,
} }
dbCheckStateContentCmd = &cli.Command{ dbCheckStateContentCmd = &cli.Command{
@@ -386,6 +386,7 @@ func inspectTrie(ctx *cli.Context) error {
blockNumber uint64 blockNumber uint64
trieRootHash common.Hash trieRootHash common.Hash
jobnum uint64 jobnum uint64
topN uint64
) )
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
@@ -411,12 +412,25 @@ func inspectTrie(ctx *cli.Context) error {
if ctx.NArg() == 1 { if ctx.NArg() == 1 {
jobnum = 1000 jobnum = 1000
topN = 10
} else if ctx.NArg() == 2 {
var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
topN = 10
} else { } else {
var err error var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64) jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil { if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err) return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
} }
topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
} }
if blockNumber != math.MaxUint64 { if blockNumber != math.MaxUint64 {
@@ -437,6 +451,7 @@ func inspectTrie(ctx *cli.Context) error {
if dbScheme == rawdb.PathScheme { if dbScheme == rawdb.PathScheme {
config = &triedb.Config{ config = &triedb.Config{
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly), PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
Cache: 0,
} }
} else if dbScheme == rawdb.HashScheme { } else if dbScheme == rawdb.HashScheme {
config = triedb.HashDefaults config = triedb.HashDefaults
@@ -448,7 +463,7 @@ func inspectTrie(ctx *cli.Context) error {
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String()) fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
return err return err
} }
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum) theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum, int(topN))
if err != nil { if err != nil {
return err return err
} }

View File

@@ -77,6 +77,7 @@ var (
utils.OverrideFullImmutabilityThreshold, utils.OverrideFullImmutabilityThreshold,
utils.OverrideMinBlocksForBlobRequests, utils.OverrideMinBlocksForBlobRequests,
utils.OverrideDefaultExtraReserveForBlobRequests, utils.OverrideDefaultExtraReserveForBlobRequests,
utils.OverrideBreatheBlockInterval,
utils.EnablePersonal, utils.EnablePersonal,
utils.TxPoolLocalsFlag, utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag, utils.TxPoolNoLocalsFlag,

View File

@@ -0,0 +1,51 @@
import { ethers } from "ethers";
import program from "commander";
// depends on ethjs v6.11.0+ for 4844, https://github.com/ethers-io/ethers.js/releases/tag/v6.11.0
// BSC testnet enabled 4844 on block: 39539137
// Usage:
// nvm use 20
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}
for (let i = startBlock; i <= endBlock; i++) {
let blockData = await provider.getBlock(i);
console.log("startBlock:",startBlock, "endBlock:", endBlock, "curBlock", i, "blobGasUsed", blockData.blobGasUsed);
if (blockData.blobGasUsed == 0) {
continue
}
for (let txIndex = 0; txIndex<= blockData.transactions.length - 1; txIndex++) {
let txHash = blockData.transactions[txIndex]
let txData = await provider.getTransaction(txHash);
if (txData.type == 3) {
console.log("BlobTx in block:",i, " txIndex:", txIndex, " txHash:", txHash);
}
}
}
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@@ -0,0 +1,49 @@
import { ethers } from "ethers";
import program from "commander";
// Usage:
// node faucet_request.js --rpc localhost:8545 --startNum 39539137
// node faucet_request.js --rpc localhost:8545 --startNum 39539137 --endNum 40345994
// node faucet_request.js --rpc https://data-seed-prebsc-1-s1.bnbchain.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);
const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}
let startBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", startBlock)
let endBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", endBlock)
const faucetAmount = BigInt(0.3 * 10**18); // Convert 0.3 ether to wei as a BigInt
const numFaucetRequest = (startBalance - endBalance) / faucetAmount;
// Convert BigInt to ether
const startBalanceEth = Number(startBalance) / 10**18;
const endBalanceEth = Number(endBalance) / 10**18;
console.log(`Start Balance: ${startBalanceEth} ETH`);
console.log(`End Balance: ${endBalanceEth} ETH`);
console.log("successful faucet request: ",numFaucetRequest);
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@@ -333,6 +333,12 @@ var (
Value: params.DefaultExtraReserveForBlobRequests, Value: params.DefaultExtraReserveForBlobRequests,
Category: flags.EthCategory, Category: flags.EthCategory,
} }
OverrideBreatheBlockInterval = &cli.Uint64Flag{
Name: "override.breatheblockinterval",
Usage: "It changes the interval between breathe blocks, only for testing purpose",
Value: params.BreatheBlockInterval,
Category: flags.EthCategory,
}
SyncModeFlag = &flags.TextMarshalerFlag{ SyncModeFlag = &flags.TextMarshalerFlag{
Name: "syncmode", Name: "syncmode",
Usage: `Blockchain sync mode ("snap" or "full")`, Usage: `Blockchain sync mode ("snap" or "full")`,

View File

@@ -15,14 +15,13 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
const SecondsPerDay uint64 = 86400
// the params should be two blocks' time(timestamp) // the params should be two blocks' time(timestamp)
func sameDayInUTC(first, second uint64) bool { func sameDayInUTC(first, second uint64) bool {
return first/SecondsPerDay == second/SecondsPerDay return first/params.BreatheBlockInterval == second/params.BreatheBlockInterval
} }
func isBreatheBlock(lastBlockTime, blockTime uint64) bool { func isBreatheBlock(lastBlockTime, blockTime uint64) bool {

View File

@@ -66,6 +66,31 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
return validator return validator
} }
// ValidateListsInBody validates that UncleHash, WithdrawalsHash, and WithdrawalsHash correspond to the lists in the block body, respectively.
func ValidateListsInBody(block *types.Block) error {
header := block.Header()
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
return nil
}
// ValidateBody validates the given block's uncles and verifies the block // ValidateBody validates the given block's uncles and verifies the block
// header's transaction and uncle roots. The headers are assumed to be already // header's transaction and uncle roots. The headers are assumed to be already
// validated at this point. // validated at this point.
@@ -83,31 +108,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if err := v.engine.VerifyUncles(v.bc, block); err != nil { if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err return err
} }
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash)
}
validateFuns := []func() error{ validateFuns := []func() error{
func() error { func() error {
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { return ValidateListsInBody(block)
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
return nil
}, },
func() error { func() error {
// Withdrawals are present after the Shanghai fork.
if header.WithdrawalsHash != nil {
// Withdrawals list must be present in body after Shanghai.
if block.Withdrawals() == nil {
return errors.New("missing withdrawals in block body")
}
if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash {
return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash)
}
} else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars
// Withdrawals are not allowed prior to shanghai fork
return errors.New("withdrawals present in block body")
}
// Blob transactions may be present after the Cancun fork. // Blob transactions may be present after the Cancun fork.
var blobs int var blobs int
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {

View File

@@ -511,3 +511,12 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr
func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription { func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription {
return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch)) return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch))
} }
// AncientTail retrieves the tail the ancients blocks
func (bc *BlockChain) AncientTail() (uint64, error) {
tail, err := bc.db.Tail()
if err != nil {
return 0, err
}
return tail, nil
}

View File

@@ -239,7 +239,7 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
// - if maxBytes is not specified, 'count' items will be returned if they are present. // - if maxBytes is not specified, 'count' items will be returned if they are present.
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes) return table.RetrieveItems(start-f.offset, count, maxBytes)
} }
return nil, errUnknownTable return nil, errUnknownTable
} }
@@ -252,7 +252,7 @@ func (f *Freezer) Ancients() (uint64, error) {
func (f *Freezer) TableAncients(kind string) (uint64, error) { func (f *Freezer) TableAncients(kind string) (uint64, error) {
f.writeLock.RLock() f.writeLock.RLock()
defer f.writeLock.RUnlock() defer f.writeLock.RUnlock()
return f.tables[kind].items.Load(), nil return f.tables[kind].items.Load() + f.offset, nil
} }
// ItemAmountInAncient returns the actual length of current ancientDB. // ItemAmountInAncient returns the actual length of current ancientDB.

View File

@@ -209,6 +209,9 @@ type BlockChain interface {
// UpdateChasingHead update remote best chain head, used by DA check now. // UpdateChasingHead update remote best chain head, used by DA check now.
UpdateChasingHead(head *types.Header) UpdateChasingHead(head *types.Header)
// AncientTail retrieves the tail the ancients blocks
AncientTail() (uint64, error)
} }
type DownloadOption func(downloader *Downloader) *Downloader type DownloadOption func(downloader *Downloader) *Downloader
@@ -797,6 +800,11 @@ func (d *Downloader) findAncestor(p *peerConnection, localHeight uint64, remoteH
// We're above the max reorg threshold, find the earliest fork point // We're above the max reorg threshold, find the earliest fork point
floor = int64(localHeight - maxForkAncestry) floor = int64(localHeight - maxForkAncestry)
} }
// if we have pruned too much history, reset the floor
if tail, err := d.blockchain.AncientTail(); err == nil && tail > uint64(floor) {
floor = int64(tail)
}
// If we're doing a light sync, ensure the floor doesn't go below the CHT, as // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
// all headers before that point will be missing. // all headers before that point will be missing.
if mode == LightSync { if mode == LightSync {

View File

@@ -320,26 +320,22 @@ func newHandler(config *handlerConfig) (*handler, error) {
} }
broadcastBlockWithCheck := func(block *types.Block, propagate bool) { broadcastBlockWithCheck := func(block *types.Block, propagate bool) {
// All the block fetcher activities should be disabled
// after the transition. Print the warning log.
if h.merger.PoSFinalized() {
log.Warn("Unexpected validation activity", "hash", block.Hash(), "number", block.Number())
return
}
// Reject all the PoS style headers in the first place. No matter
// the chain has finished the transition or not, the PoS headers
// should only come from the trusted consensus layer instead of
// p2p network.
if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok {
if beacon.IsPoSHeader(block.Header()) {
log.Warn("unexpected post-merge header")
return
}
}
if propagate { if propagate {
if err := core.IsDataAvailable(h.chain, block); err != nil { checkErrs := make(chan error, 2)
log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err)
return go func() {
checkErrs <- core.ValidateListsInBody(block)
}()
go func() {
checkErrs <- core.IsDataAvailable(h.chain, block)
}()
for i := 0; i < cap(checkErrs); i++ {
err := <-checkErrs
if err != nil {
log.Error("Propagating invalid block", "number", block.Number(), "hash", block.Hash(), "err", err)
return
}
} }
} }
h.BroadcastBlock(block, propagate) h.BroadcastBlock(block, propagate)

View File

@@ -189,6 +189,7 @@ const (
var ( var (
MinBlocksForBlobRequests uint64 = 524288 // it keeps blob data available for ~18.2 days in local, ref: https://github.com/bnb-chain/BEPs/blob/master/BEPs/BEP-336.md#51-parameters. MinBlocksForBlobRequests uint64 = 524288 // it keeps blob data available for ~18.2 days in local, ref: https://github.com/bnb-chain/BEPs/blob/master/BEPs/BEP-336.md#51-parameters.
DefaultExtraReserveForBlobRequests uint64 = 1 * (24 * 3600) / 3 // it adds more time for expired blobs for some request cases, like expiry blob when remote peer is syncing, default 1 day. DefaultExtraReserveForBlobRequests uint64 = 1 * (24 * 3600) / 3 // it adds more time for expired blobs for some request cases, like expiry blob when remote peer is syncing, default 1 day.
BreatheBlockInterval uint64 = 86400 // Controls the interval for updateValidatorSetV2
) )
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations

View File

@@ -4,17 +4,15 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"math/big"
"os"
"runtime" "runtime"
"sort" "strings"
"strconv"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@@ -26,63 +24,113 @@ import (
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
) )
type Account struct {
Nonce uint64
Balance *big.Int
Root common.Hash // merkle root of the storage trie
CodeHash []byte
}
type Database interface { type Database interface {
database.Database database.Database
Scheme() string Scheme() string
Cap(limit common.StorageSize) error Cap(limit common.StorageSize) error
DiskDB() ethdb.Database DiskDB() ethdb.Database
} }
const TopN = 3
type Inspector struct { type Inspector struct {
trie *Trie // traverse trie trie *Trie // traverse trie
db Database db Database
stateRootHash common.Hash stateRootHash common.Hash
blocknum uint64 blockNum uint64
root node // root of triedb root node // root of triedb
totalNum uint64
wg sync.WaitGroup
statLock sync.RWMutex
result map[string]*TrieTreeStat
sem *semaphore.Weighted sem *semaphore.Weighted
eoaAccountNums uint64 eoaAccountNums uint64
wg sync.WaitGroup
results stat
topN int
totalAccountNum atomic.Uint64
totalStorageNum atomic.Uint64
lastTime mclock.AbsTime
} }
type TrieTreeStat struct { type stat struct {
is_account_trie bool lock sync.RWMutex
theNodeStatByLevel [15]NodeStat account *trieStat
totalNodeStat NodeStat storageTopN []*trieStat
storageTopNTotal []uint64
storageTotal nodeStat
storageTrieNum uint64
} }
type NodeStat struct { type trieStat struct {
ShortNodeCnt uint64 owner common.Hash
FullNodeCnt uint64 totalNodeStat nodeStat
ValueNodeCnt uint64 nodeStatByLevel [16]nodeStat
} }
func (trieStat *TrieTreeStat) AtomicAdd(theNode node, height uint32) { type nodeStat struct {
ShortNodeCnt atomic.Uint64
FullNodeCnt atomic.Uint64
ValueNodeCnt atomic.Uint64
}
func (ns *nodeStat) IsEmpty() bool {
if ns.FullNodeCnt.Load() == 0 && ns.ShortNodeCnt.Load() == 0 && ns.ValueNodeCnt.Load() == 0 {
return true
}
return false
}
func (s *stat) add(ts *trieStat, topN int) {
s.lock.Lock()
defer s.lock.Unlock()
if ts.owner == (common.Hash{}) {
s.account = ts
return
}
total := ts.totalNodeStat.ValueNodeCnt.Load() + ts.totalNodeStat.FullNodeCnt.Load() + ts.totalNodeStat.ShortNodeCnt.Load()
if len(s.storageTopNTotal) == 0 || total > s.storageTopNTotal[len(s.storageTopNTotal)-1] {
var (
i int
t uint64
)
for i, t = range s.storageTopNTotal {
if total < t {
continue
}
break
}
s.storageTopNTotal = append(s.storageTopNTotal[:i], append([]uint64{total}, s.storageTopNTotal[i:]...)...)
s.storageTopN = append(s.storageTopN[:i], append([]*trieStat{ts}, s.storageTopN[i:]...)...)
if len(s.storageTopN) > topN {
s.storageTopNTotal = s.storageTopNTotal[:topN]
s.storageTopN = s.storageTopN[:topN]
}
}
s.storageTotal.ShortNodeCnt.Add(ts.totalNodeStat.ShortNodeCnt.Load())
s.storageTotal.ValueNodeCnt.Add(ts.totalNodeStat.ValueNodeCnt.Load())
s.storageTotal.FullNodeCnt.Add(ts.totalNodeStat.FullNodeCnt.Load())
s.storageTrieNum++
}
func (trieStat *trieStat) add(theNode node, height int) {
switch (theNode).(type) { switch (theNode).(type) {
case *shortNode: case *shortNode:
atomic.AddUint64(&trieStat.totalNodeStat.ShortNodeCnt, 1) trieStat.totalNodeStat.ShortNodeCnt.Add(1)
atomic.AddUint64(&(trieStat.theNodeStatByLevel[height].ShortNodeCnt), 1) trieStat.nodeStatByLevel[height].ShortNodeCnt.Add(1)
case *fullNode: case *fullNode:
atomic.AddUint64(&trieStat.totalNodeStat.FullNodeCnt, 1) trieStat.totalNodeStat.FullNodeCnt.Add(1)
atomic.AddUint64(&trieStat.theNodeStatByLevel[height].FullNodeCnt, 1) trieStat.nodeStatByLevel[height].FullNodeCnt.Add(1)
case valueNode: case valueNode:
atomic.AddUint64(&trieStat.totalNodeStat.ValueNodeCnt, 1) trieStat.totalNodeStat.ValueNodeCnt.Add(1)
atomic.AddUint64(&((trieStat.theNodeStatByLevel[height]).ValueNodeCnt), 1) trieStat.nodeStatByLevel[height].ValueNodeCnt.Add(1)
default:
panic(errors.New("Invalid node type to statistics"))
} }
} }
func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) { func (trieStat *trieStat) Display(ownerAddress string, treeType string) string {
table := tablewriter.NewWriter(os.Stdout) sw := new(strings.Builder)
table := tablewriter.NewWriter(sw)
table.SetHeader([]string{"-", "Level", "ShortNodeCnt", "FullNodeCnt", "ValueNodeCnt"}) table.SetHeader([]string{"-", "Level", "ShortNodeCnt", "FullNodeCnt", "ValueNodeCnt"})
if ownerAddress == "" { if ownerAddress == "" {
table.SetCaption(true, fmt.Sprintf("%v", treeType)) table.SetCaption(true, fmt.Sprintf("%v", treeType))
@@ -90,38 +138,27 @@ func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) {
table.SetCaption(true, fmt.Sprintf("%v-%v", treeType, ownerAddress)) table.SetCaption(true, fmt.Sprintf("%v-%v", treeType, ownerAddress))
} }
table.SetAlignment(1) table.SetAlignment(1)
for i := 0; i < len(trieStat.theNodeStatByLevel); i++ {
nodeStat := trieStat.theNodeStatByLevel[i] for i := range trieStat.nodeStatByLevel {
if nodeStat.FullNodeCnt == 0 && nodeStat.ShortNodeCnt == 0 && nodeStat.ValueNodeCnt == 0 { if trieStat.nodeStatByLevel[i].IsEmpty() {
break continue
} }
table.AppendBulk([][]string{ table.AppendBulk([][]string{
{"-", strconv.Itoa(i), nodeStat.ShortNodeCount(), nodeStat.FullNodeCount(), nodeStat.ValueNodeCount()}, {"-", fmt.Sprintf("%d", i),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ShortNodeCnt.Load()),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].FullNodeCnt.Load()),
fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ValueNodeCnt.Load())},
}) })
} }
table.AppendBulk([][]string{ table.AppendBulk([][]string{
{"Total", "-", trieStat.totalNodeStat.ShortNodeCount(), trieStat.totalNodeStat.FullNodeCount(), trieStat.totalNodeStat.ValueNodeCount()}, {"Total", "-", fmt.Sprintf("%d", trieStat.totalNodeStat.ShortNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.FullNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.ValueNodeCnt.Load())},
}) })
table.Render() table.Render()
} return sw.String()
func Uint64ToString(cnt uint64) string {
return fmt.Sprintf("%v", cnt)
}
func (nodeStat *NodeStat) ShortNodeCount() string {
return Uint64ToString(nodeStat.ShortNodeCnt)
}
func (nodeStat *NodeStat) FullNodeCount() string {
return Uint64ToString(nodeStat.FullNodeCnt)
}
func (nodeStat *NodeStat) ValueNodeCount() string {
return Uint64ToString(nodeStat.ValueNodeCnt)
} }
// NewInspector return a inspector obj // NewInspector return a inspector obj
func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Inspector, error) { func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blockNum uint64, jobNum uint64, topN int) (*Inspector, error) {
if tr == nil { if tr == nil {
return nil, errors.New("trie is nil") return nil, errors.New("trie is nil")
} }
@@ -131,15 +168,20 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin
} }
ins := &Inspector{ ins := &Inspector{
trie: tr, trie: tr,
db: db, db: db,
stateRootHash: stateRootHash, stateRootHash: stateRootHash,
blocknum: blocknum, blockNum: blockNum,
root: tr.root, root: tr.root,
result: make(map[string]*TrieTreeStat), results: stat{},
totalNum: (uint64)(0), topN: topN,
wg: sync.WaitGroup{}, totalAccountNum: atomic.Uint64{},
sem: semaphore.NewWeighted(int64(jobnum)), totalStorageNum: atomic.Uint64{},
lastTime: mclock.Now(),
sem: semaphore.NewWeighted(int64(jobNum)),
wg: sync.WaitGroup{},
eoaAccountNums: 0, eoaAccountNums: 0,
} }
@@ -147,155 +189,123 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin
} }
// Run statistics, external call // Run statistics, external call
func (inspect *Inspector) Run() { func (s *Inspector) Run() {
accountTrieStat := &TrieTreeStat{ ticker := time.NewTicker(30 * time.Second)
is_account_trie: true, go func() {
} defer ticker.Stop()
if inspect.db.Scheme() == rawdb.HashScheme { for range ticker.C {
ticker := time.NewTicker(30 * time.Second) if s.db.Scheme() == rawdb.HashScheme {
go func() { s.db.Cap(DEFAULT_TRIEDBCACHE_SIZE)
defer ticker.Stop()
for range ticker.C {
inspect.db.Cap(DEFAULT_TRIEDBCACHE_SIZE)
} }
}() runtime.GC()
} }
}()
if _, ok := inspect.result[""]; !ok { log.Info("Find Account Trie Tree", "rootHash: ", s.trie.Hash().String(), "BlockNum: ", s.blockNum)
inspect.result[""] = accountTrieStat
}
log.Info("Find Account Trie Tree", "rootHash: ", inspect.trie.Hash().String(), "BlockNum: ", inspect.blocknum)
inspect.ConcurrentTraversal(inspect.trie, accountTrieStat, inspect.root, 0, []byte{}) ts := &trieStat{
inspect.wg.Wait() owner: common.Hash{},
}
s.traversal(s.trie, ts, s.root, 0, []byte{})
s.results.add(ts, s.topN)
s.wg.Wait()
} }
func (inspect *Inspector) SubConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) { func (s *Inspector) traversal(trie *Trie, ts *trieStat, n node, height int, path []byte) {
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, theNode, height, path)
inspect.wg.Done()
}
func (inspect *Inspector) ConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) {
// print process progress
total_num := atomic.AddUint64(&inspect.totalNum, 1)
if total_num%100000 == 0 {
fmt.Printf("Complete progress: %v, go routines Num: %v\n", total_num, runtime.NumGoroutine())
}
// nil node // nil node
if theNode == nil { if n == nil {
return return
} }
switch current := (theNode).(type) { ts.add(n, height)
switch current := (n).(type) {
case *shortNode: case *shortNode:
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, current.Val, height, append(path, current.Key...)) s.traversal(trie, ts, current.Val, height, append(path, current.Key...))
case *fullNode: case *fullNode:
for idx, child := range current.Children { for idx, child := range current.Children {
if child == nil { if child == nil {
continue continue
} }
childPath := append(path, byte(idx)) p := common.CopyBytes(append(path, byte(idx)))
if inspect.sem.TryAcquire(1) { s.traversal(trie, ts, child, height+1, p)
inspect.wg.Add(1)
dst := make([]byte, len(childPath))
copy(dst, childPath)
go inspect.SubConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, dst)
} else {
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, childPath)
}
} }
case hashNode: case hashNode:
n, err := theTrie.resloveWithoutTrack(current, path) tn, err := trie.resloveWithoutTrack(current, path)
if err != nil { if err != nil {
fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, theTrie.Hash().String(), height+1, path) fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, trie.Hash().String(), height+1, path)
return return
} }
inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, n, height, path) s.PrintProgress(trie)
return s.traversal(trie, ts, tn, height, path)
case valueNode: case valueNode:
if !hasTerm(path) { if !hasTerm(path) {
break break
} }
var account Account var account types.StateAccount
if err := rlp.Decode(bytes.NewReader(current), &account); err != nil { if err := rlp.Decode(bytes.NewReader(current), &account); err != nil {
break break
} }
if common.BytesToHash(account.CodeHash) == types.EmptyCodeHash { if common.BytesToHash(account.CodeHash) == types.EmptyCodeHash {
inspect.eoaAccountNums++ s.eoaAccountNums++
} }
if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash { if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash {
break break
} }
ownerAddress := common.BytesToHash(hexToCompact(path)) ownerAddress := common.BytesToHash(hexToCompact(path))
contractTrie, err := New(StorageTrieID(inspect.stateRootHash, ownerAddress, account.Root), inspect.db) contractTrie, err := New(StorageTrieID(s.stateRootHash, ownerAddress, account.Root), s.db)
if err != nil { if err != nil {
fmt.Printf("New contract trie node: %v, error: %v, Height: %v, Path: %v\n", theNode, err, height, path) panic(err)
break
} }
contractTrie.tracer.reset() contractTrie.tracer.reset()
trieStat := &TrieTreeStat{
is_account_trie: false,
}
inspect.statLock.Lock() if s.sem.TryAcquire(1) {
if _, ok := inspect.result[ownerAddress.String()]; !ok { s.wg.Add(1)
inspect.result[ownerAddress.String()] = trieStat go func() {
t := &trieStat{
owner: ownerAddress,
}
s.traversal(contractTrie, t, contractTrie.root, 0, []byte{})
s.results.add(t, s.topN)
s.sem.Release(1)
s.wg.Done()
}()
} else {
t := &trieStat{
owner: ownerAddress,
}
s.traversal(contractTrie, t, contractTrie.root, 0, []byte{})
s.results.add(t, s.topN)
} }
inspect.statLock.Unlock()
// log.Info("Find Contract Trie Tree, rootHash: ", contractTrie.Hash().String(), "")
inspect.wg.Add(1)
go inspect.SubConcurrentTraversal(contractTrie, trieStat, contractTrie.root, 0, []byte{})
default: default:
panic(errors.New("Invalid node type to traverse.")) panic(errors.New("invalid node type to traverse"))
} }
theTrieTreeStat.AtomicAdd(theNode, height)
} }
func (inspect *Inspector) DisplayResult() { func (s *Inspector) PrintProgress(t *Trie) {
var (
elapsed = mclock.Now().Sub(s.lastTime)
)
if t.owner == (common.Hash{}) {
s.totalAccountNum.Add(1)
} else {
s.totalStorageNum.Add(1)
}
if elapsed > 4*time.Second {
log.Info("traversal progress", "TotalAccountNum", s.totalAccountNum.Load(), "TotalStorageNum", s.totalStorageNum.Load(), "Goroutine", runtime.NumGoroutine())
s.lastTime = mclock.Now()
}
}
func (s *Inspector) DisplayResult() {
// display root hash // display root hash
if _, ok := inspect.result[""]; !ok { fmt.Println(s.results.account.Display("", "AccountTrie"))
log.Info("Display result error", "missing account trie") fmt.Println("EOA accounts num: ", s.eoaAccountNums)
return
}
inspect.result[""].Display("", "AccountTrie")
type SortedTrie struct {
totalNum uint64
ownerAddress string
}
// display contract trie // display contract trie
var sortedTriesByNums []SortedTrie for _, st := range s.results.storageTopN {
var totalContactsNodeStat NodeStat fmt.Println(st.Display(st.owner.String(), "StorageTrie"))
var contractTrieCnt uint64 = 0
for ownerAddress, stat := range inspect.result {
if ownerAddress == "" {
continue
}
contractTrieCnt++
totalContactsNodeStat.ShortNodeCnt += stat.totalNodeStat.ShortNodeCnt
totalContactsNodeStat.FullNodeCnt += stat.totalNodeStat.FullNodeCnt
totalContactsNodeStat.ValueNodeCnt += stat.totalNodeStat.ValueNodeCnt
totalNodeCnt := stat.totalNodeStat.ShortNodeCnt + stat.totalNodeStat.ValueNodeCnt + stat.totalNodeStat.FullNodeCnt
sortedTriesByNums = append(sortedTriesByNums, SortedTrie{totalNum: totalNodeCnt, ownerAddress: ownerAddress})
}
sort.Slice(sortedTriesByNums, func(i, j int) bool {
return sortedTriesByNums[i].totalNum > sortedTriesByNums[j].totalNum
})
fmt.Println("EOA accounts num: ", inspect.eoaAccountNums)
// only display top 5
for i, t := range sortedTriesByNums {
if i > 5 {
break
}
if stat, ok := inspect.result[t.ownerAddress]; !ok {
log.Error("Storage trie stat not found", "ownerAddress", t.ownerAddress)
} else {
stat.Display(t.ownerAddress, "ContractTrie")
}
} }
fmt.Printf("Contract Trie, total trie num: %v, ShortNodeCnt: %v, FullNodeCnt: %v, ValueNodeCnt: %v\n", fmt.Printf("Contract Trie, total trie num: %v, ShortNodeCnt: %v, FullNodeCnt: %v, ValueNodeCnt: %v\n",
contractTrieCnt, totalContactsNodeStat.ShortNodeCnt, totalContactsNodeStat.FullNodeCnt, totalContactsNodeStat.ValueNodeCnt) s.results.storageTrieNum, s.results.storageTotal.ShortNodeCnt.Load(), s.results.storageTotal.FullNodeCnt.Load(), s.results.storageTotal.ValueNodeCnt.Load())
} }