Merge pull request #1917 from bnb-chain/master
branch: merge v1.3.0 fix ups from master to develop
This commit is contained in:
commit
423d4137e5
2
.github/commitlint.config.js
vendored
2
.github/commitlint.config.js
vendored
@ -36,7 +36,7 @@ module.exports = {
|
||||
'header-max-length': [
|
||||
2,
|
||||
'always',
|
||||
72,
|
||||
80,
|
||||
],
|
||||
},
|
||||
helpUrl:
|
||||
|
@ -51,6 +51,9 @@ issues:
|
||||
- path: core/state/metrics.go
|
||||
linters:
|
||||
- unused
|
||||
- path: core/state/statedb_fuzz_test.go
|
||||
linters:
|
||||
- unused
|
||||
- path: core/txpool/legacypool/list.go
|
||||
linters:
|
||||
- staticcheck
|
||||
|
@ -277,7 +277,7 @@ func createPorts(ipStr string, port int, size int) []int {
|
||||
// Create config for node i in the cluster
|
||||
func createNodeConfig(baseConfig gethConfig, enodes []*enode.Node, ip string, port int, size int, i int) gethConfig {
|
||||
baseConfig.Node.HTTPHost = ip
|
||||
baseConfig.Node.P2P.ListenAddr = fmt.Sprintf(":%d", port+i)
|
||||
baseConfig.Node.P2P.ListenAddr = fmt.Sprintf(":%d", port)
|
||||
baseConfig.Node.P2P.BootstrapNodes = make([]*enode.Node, size-1)
|
||||
// Set the P2P connections between this node and the other nodes
|
||||
for j := 0; j < i; j++ {
|
||||
@ -294,11 +294,12 @@ func createNodeConfigs(baseConfig gethConfig, initDir string, ips []string, port
|
||||
// Create the nodes
|
||||
enodes := make([]*enode.Node, size)
|
||||
for i := 0; i < size; i++ {
|
||||
stack, err := node.New(&baseConfig.Node)
|
||||
nodeConfig := baseConfig.Node
|
||||
nodeConfig.DataDir = path.Join(initDir, fmt.Sprintf("node%d", i))
|
||||
stack, err := node.New(&nodeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stack.Config().DataDir = path.Join(initDir, fmt.Sprintf("node%d", i))
|
||||
pk := stack.Config().NodeKey()
|
||||
enodes[i] = enode.NewV4(&pk.PublicKey, net.ParseIP(ips[i]), ports[i], ports[i])
|
||||
}
|
||||
|
147
cmd/geth/initnetwork_test.go
Normal file
147
cmd/geth/initnetwork_test.go
Normal file
@ -0,0 +1,147 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var size int
|
||||
var basePort int
|
||||
var configPath string
|
||||
var genesisPath string
|
||||
|
||||
func setup(t *testing.T) {
|
||||
size = 4
|
||||
_, filename, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
t.Fatalf("error getting current file path")
|
||||
}
|
||||
currentDirectory := filepath.Dir(filename)
|
||||
configPath = filepath.Join(currentDirectory, "testdata/config.toml")
|
||||
genesisPath = filepath.Join(currentDirectory, "testdata/parlia.json")
|
||||
basePort = 30311
|
||||
}
|
||||
|
||||
func TestInitNetworkLocalhost(t *testing.T) {
|
||||
setup(t)
|
||||
ipStr := ""
|
||||
testInitNetwork(t, size, basePort, ipStr, configPath, genesisPath)
|
||||
}
|
||||
|
||||
func TestInitNetworkRemoteHosts(t *testing.T) {
|
||||
setup(t)
|
||||
ipStr := "192.168.24.103,172.15.67.89,10.0.17.36,203.113.45.76"
|
||||
testInitNetwork(t, size, basePort, ipStr, configPath, genesisPath)
|
||||
}
|
||||
|
||||
func testInitNetwork(t *testing.T, size, basePort int, ipStr, configPath, genesisPath string) {
|
||||
dir := t.TempDir()
|
||||
geth := runGeth(t, "init-network", "--init.dir", dir, "--init.size", strconv.Itoa(size),
|
||||
"--init.ips", ipStr, "--init.p2p-port", strconv.Itoa(basePort), "--config", configPath,
|
||||
genesisPath)
|
||||
// expect the command to complete first
|
||||
geth.WaitExit()
|
||||
|
||||
// Read the output of the command
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(files) != size {
|
||||
t.Fatalf("expected %d node folders but found %d instead", size, len(files))
|
||||
}
|
||||
|
||||
for i, file := range files {
|
||||
if file.IsDir() {
|
||||
expectedNodeDirName := fmt.Sprintf("node%d", i)
|
||||
if file.Name() != expectedNodeDirName {
|
||||
t.Fatalf("node dir name is %s but %s was expected", file.Name(), expectedNodeDirName)
|
||||
}
|
||||
configFilePath := filepath.Join(dir, file.Name(), "config.toml")
|
||||
var config gethConfig
|
||||
err := loadConfig(configFilePath, &config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load config.toml : %v", err)
|
||||
}
|
||||
if ipStr == "" {
|
||||
verifyConfigFileLocalhost(t, &config, i, basePort, size)
|
||||
} else {
|
||||
verifyConfigFileRemoteHosts(t, &config, ipStr, i, basePort, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyConfigFileRemoteHosts(t *testing.T, config *gethConfig, ipStr string, i, basePort, size int) {
|
||||
// 1. check ip string
|
||||
ips := strings.Split(ipStr, ",")
|
||||
if len(ips) != size {
|
||||
t.Fatalf("found %d ips in ipStr=%s instead of %d", len(ips), ipStr, size)
|
||||
}
|
||||
|
||||
// 2. check listening port
|
||||
expectedListenAddr := fmt.Sprintf(":%d", basePort)
|
||||
if config.Node.P2P.ListenAddr != expectedListenAddr {
|
||||
t.Fatalf("expected ListenAddr to be %s but it is %s instead", expectedListenAddr, config.Node.P2P.ListenAddr)
|
||||
}
|
||||
|
||||
bootnodes := config.Node.P2P.BootstrapNodes
|
||||
|
||||
// 3. check correctness of peers' hosts
|
||||
for j := 0; j < i; j++ {
|
||||
ip := bootnodes[j].IP().String()
|
||||
if ip != ips[j] {
|
||||
t.Fatalf("expected IP of bootnode to be %s but found %s instead", ips[j], ip)
|
||||
}
|
||||
}
|
||||
|
||||
for j := i + 1; j < size; j++ {
|
||||
ip := bootnodes[j-1].IP().String()
|
||||
if ip != ips[j] {
|
||||
t.Fatalf("expected IP of bootnode to be %s but found %s instead", ips[j-1], ip)
|
||||
}
|
||||
}
|
||||
|
||||
// 4. check correctness of peer port numbers
|
||||
for j := 0; j < size-1; j++ {
|
||||
if bootnodes[j].UDP() != basePort {
|
||||
t.Fatalf("expected bootnode port at position %d to be %d but got %d instead", j, basePort, bootnodes[j].UDP())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyConfigFileLocalhost(t *testing.T, config *gethConfig, i int, basePort int, size int) {
|
||||
// 1. check listening port
|
||||
expectedListenAddr := fmt.Sprintf(":%d", basePort+i)
|
||||
if config.Node.P2P.ListenAddr != expectedListenAddr {
|
||||
t.Fatalf("expected ListenAddr to be %s but it is %s instead", expectedListenAddr, config.Node.P2P.ListenAddr)
|
||||
}
|
||||
|
||||
bootnodes := config.Node.P2P.BootstrapNodes
|
||||
// 2. check correctness of peers' hosts
|
||||
localhost := "127.0.0.1"
|
||||
for j := 0; j < size-1; j++ {
|
||||
ip := bootnodes[j].IP().String()
|
||||
if ip != localhost {
|
||||
t.Fatalf("expected IP of bootnode to be %s but found %s instead", localhost, ip)
|
||||
}
|
||||
}
|
||||
|
||||
// 3. check correctness of peer port numbers
|
||||
for j := 0; j < i; j++ {
|
||||
if bootnodes[j].UDP() != basePort+j {
|
||||
t.Fatalf("expected bootnode port at position %d to be %d but got %d instead", j, basePort+j, bootnodes[j].UDP())
|
||||
}
|
||||
}
|
||||
for j := i + 1; j < size; j++ {
|
||||
if bootnodes[j-1].UDP() != basePort+j {
|
||||
t.Fatalf("expected bootnode port at position %d to be %d but got %d instead", j-1, basePort+j, bootnodes[j-1].UDP())
|
||||
}
|
||||
}
|
||||
}
|
62
cmd/geth/testdata/config.toml
vendored
Normal file
62
cmd/geth/testdata/config.toml
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
[Eth]
|
||||
NetworkId = 714
|
||||
SyncMode = "full"
|
||||
NoPruning = false
|
||||
NoPrefetch = false
|
||||
LightPeers = 100
|
||||
DatabaseCache = 512
|
||||
DatabaseFreezer = ""
|
||||
TrieCleanCache = 256
|
||||
TrieDirtyCache = 256
|
||||
TriesInMemory = 128
|
||||
TrieTimeout = 3600000000000
|
||||
EnablePreimageRecording = false
|
||||
|
||||
[Eth.Miner]
|
||||
GasFloor = 30000000
|
||||
GasCeil = 40000000
|
||||
GasPrice = 10000000000
|
||||
Recommit = 10000000000
|
||||
|
||||
[Eth.TxPool]
|
||||
Locals = []
|
||||
NoLocals = true
|
||||
Journal = "transactions.rlp"
|
||||
Rejournal = 3600000000000
|
||||
PriceLimit = 10000000000
|
||||
PriceBump = 10
|
||||
AccountSlots = 16
|
||||
GlobalSlots = 4096
|
||||
AccountQueue = 64
|
||||
GlobalQueue = 1024
|
||||
Lifetime = 10800000000000
|
||||
|
||||
|
||||
[Node]
|
||||
IPCPath = "geth.ipc"
|
||||
HTTPHost = "0.0.0.0"
|
||||
NoUSB = true
|
||||
InsecureUnlockAllowed = true
|
||||
HTTPPort = 8545
|
||||
HTTPVirtualHosts = ["*"]
|
||||
HTTPModules = ["eth", "net", "web3", "txpool", "parlia"]
|
||||
WSHost = "0.0.0.0"
|
||||
WSPort = 8545
|
||||
|
||||
[Node.P2P]
|
||||
MaxPeers = 50
|
||||
NoDiscovery = false
|
||||
StaticNodes = []
|
||||
TrustedNodes = []
|
||||
EnableMsgEvents = false
|
||||
|
||||
[Node.HTTPTimeouts]
|
||||
ReadTimeout = 30000000000
|
||||
WriteTimeout = 30000000000
|
||||
IdleTimeout = 120000000000
|
||||
|
||||
[Node.LogConfig]
|
||||
FilePath = "bsc.log"
|
||||
MaxBytesSize = 10485760
|
||||
Level = "info"
|
||||
FileRoot = ""
|
33
cmd/geth/testdata/parlia.json
vendored
Normal file
33
cmd/geth/testdata/parlia.json
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
{
|
||||
"config": {
|
||||
"chainId": 714,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0,
|
||||
"istanbulBlock": 0,
|
||||
"muirGlacierBlock": 0,
|
||||
"ramanujanBlock": 0,
|
||||
"nielsBlock": 0,
|
||||
"mirrorSyncBlock":1,
|
||||
"brunoBlock": 1,
|
||||
"eulerBlock": 2,
|
||||
"gibbsBlock": 3,
|
||||
"parlia": {
|
||||
"period": 3,
|
||||
"epoch": 200
|
||||
}
|
||||
},
|
||||
"nonce": "0x0",
|
||||
"timestamp": "0x5e9da7ce",
|
||||
"extraData": "0x00000000000000000000000000000000000000000000000000000000000000009fb29aac15b9a4b7f17c3385939b007540f4d7910000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"gasLimit": "0x2625a00",
|
||||
"difficulty": "0x1",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"coinbase": "0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE",
|
||||
"alloc": {}
|
||||
}
|
@ -389,7 +389,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
}
|
||||
// Make sure the state associated with the block is available
|
||||
head := bc.CurrentBlock()
|
||||
if !bc.HasState(head.Root) {
|
||||
if !bc.stateCache.NoTries() && !bc.HasState(head.Root) {
|
||||
// Head state is missing, before the state recovery, find out the
|
||||
// disk layer point of snapshot(if it's enabled). Make sure the
|
||||
// rewound point is lower than disk layer.
|
||||
@ -1180,6 +1180,7 @@ func (bc *BlockChain) Stop() {
|
||||
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
|
||||
if !bc.cacheConfig.TrieDirtyDisabled {
|
||||
triedb := bc.triedb
|
||||
var once sync.Once
|
||||
|
||||
for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
|
||||
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
|
||||
@ -1187,7 +1188,12 @@ func (bc *BlockChain) Stop() {
|
||||
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
|
||||
if err := triedb.Commit(recent.Root(), true); err != nil {
|
||||
log.Error("Failed to commit recent state trie", "err", err)
|
||||
}
|
||||
} else {
|
||||
rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64())
|
||||
once.Do(func() {
|
||||
rawdb.WriteHeadBlockHash(bc.db, recent.Hash())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
if snapBase != (common.Hash{}) {
|
||||
@ -1208,7 +1214,7 @@ func (bc *BlockChain) Stop() {
|
||||
}
|
||||
}
|
||||
for !bc.triegc.Empty() {
|
||||
go triedb.Dereference(bc.triegc.PopItem())
|
||||
triedb.Dereference(bc.triegc.PopItem())
|
||||
}
|
||||
if size, _ := triedb.Size(); size != 0 {
|
||||
log.Error("Dangling trie nodes after full cleanup")
|
||||
|
@ -633,13 +633,16 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
|
||||
request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx}
|
||||
|
||||
result := <-request
|
||||
|
||||
// Deliver a result before s.Close() to avoid a deadlock
|
||||
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
|
||||
|
||||
if result.Error != nil {
|
||||
s.errLock.Lock()
|
||||
s.err = result.Error
|
||||
s.errLock.Unlock()
|
||||
s.Close()
|
||||
}
|
||||
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1350,6 +1350,10 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
|
||||
if err != nil {
|
||||
return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
|
||||
}
|
||||
// skip deleting storages for EmptyTrie
|
||||
if _, ok := tr.(*trie.EmptyTrie); ok {
|
||||
return false, nil, nil, nil
|
||||
}
|
||||
it, err := tr.NodeIterator(nil)
|
||||
if err != nil {
|
||||
return false, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
|
||||
@ -1422,7 +1426,13 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
|
||||
// In case (d), **original** account along with its storages should be deleted,
|
||||
// with their values be tracked as original value.
|
||||
func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) {
|
||||
// Short circuit if geth is running with hash mode. This procedure can consume
|
||||
// considerable time and storage deletion isn't supported in hash mode, thus
|
||||
// preemptively avoiding unnecessary expenses.
|
||||
incomplete := make(map[common.Address]struct{})
|
||||
if s.db.TrieDB().Scheme() == rawdb.HashScheme {
|
||||
return incomplete, nil
|
||||
}
|
||||
for addr, prev := range s.stateObjectsDestruct {
|
||||
// The original account was non-existing, and it's marked as destructed
|
||||
// in the scope of block. It can be case (a) or (b).
|
||||
@ -1748,7 +1758,6 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc
|
||||
} else {
|
||||
s.snap = nil
|
||||
}
|
||||
|
||||
if root == (common.Hash{}) {
|
||||
root = types.EmptyRootHash
|
||||
}
|
||||
|
@ -365,7 +365,8 @@ func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Datab
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestStateChanges(t *testing.T) {
|
||||
// TODO(Nathan): enable this case after enabling pbss
|
||||
func testStateChanges(t *testing.T) {
|
||||
config := &quick.Config{MaxCount: 1000}
|
||||
err := quick.Check((*stateTest).run, config)
|
||||
if cerr, ok := err.(*quick.CheckError); ok {
|
||||
|
@ -321,7 +321,10 @@ func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction
|
||||
func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||
subs := make([]event.Subscription, len(p.subpools))
|
||||
for i, subpool := range p.subpools {
|
||||
subs[i] = subpool.SubscribeTransactions(ch)
|
||||
sub := subpool.SubscribeTransactions(ch)
|
||||
if sub != nil { // sub will be nil when subpool have been shut down
|
||||
subs[i] = sub
|
||||
}
|
||||
}
|
||||
return p.subs.Track(event.JoinSubscriptions(subs...))
|
||||
}
|
||||
@ -331,7 +334,10 @@ func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscrip
|
||||
func (p *TxPool) SubscribeReannoTxsEvent(ch chan<- core.ReannoTxsEvent) event.Subscription {
|
||||
subs := make([]event.Subscription, len(p.subpools))
|
||||
for i, subpool := range p.subpools {
|
||||
subs[i] = subpool.SubscribeReannoTxsEvent(ch)
|
||||
sub := subpool.SubscribeReannoTxsEvent(ch)
|
||||
if sub != nil { // sub will be nil when subpool have been shut down
|
||||
subs[i] = sub
|
||||
}
|
||||
}
|
||||
return p.subs.Track(event.JoinSubscriptions(subs...))
|
||||
}
|
||||
|
@ -152,6 +152,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
|
||||
evm.Config = config
|
||||
evm.chainConfig = chainConfig
|
||||
evm.chainRules = chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time)
|
||||
evm.abort.Store(false)
|
||||
evm.callGasTemp = 0
|
||||
evm.depth = 0
|
||||
|
||||
|
@ -191,7 +191,7 @@ func (r *StandardRegistry) Unregister(name string) {
|
||||
|
||||
func (r *StandardRegistry) loadOrRegister(name string, i interface{}) (interface{}, bool, bool) {
|
||||
switch i.(type) {
|
||||
case Counter, CounterFloat64, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
|
||||
case Counter, CounterFloat64, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer, Label:
|
||||
default:
|
||||
return nil, false, false
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user