Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e74ea650d | ||
|
|
5378df3702 | ||
|
|
40cae45436 | ||
|
|
361e8413e6 | ||
|
|
36a283ef98 | ||
|
|
78d1cade19 | ||
|
|
82beb2c5f3 | ||
|
|
3761bf0426 | ||
|
|
29427c51fd | ||
|
|
220be95117 | ||
|
|
f0d9f61bf6 | ||
|
|
d49da4348c | ||
|
|
fecd2bfafe | ||
|
|
ef13f3194d |
14
CHANGELOG.md
14
CHANGELOG.md
@@ -1,4 +1,18 @@
|
||||
# Changelog
|
||||
## v1.3.9
|
||||
FEATURE
|
||||
* [\#2186](https://github.com/bnb-chain/bsc/pull/2186) log: support maxBackups in config.toml
|
||||
|
||||
BUGFIX
|
||||
* [\#2160](https://github.com/bnb-chain/bsc/pull/2160) cmd: fix dump cli cannot work in path mode
|
||||
* [\#2183](https://github.com/bnb-chain/bsc/pull/2183) p2p: resolved deadlock on p2p server shutdown
|
||||
|
||||
IMPROVEMENT
|
||||
* [\#2177](https://github.com/bnb-chain/bsc/pull/0000) build(deps): bump github.com/quic-go/quic-go from 0.39.3 to 0.39.4
|
||||
* [\#2185](https://github.com/bnb-chain/bsc/pull/2185) consensus/parlia: set nonce before evm run
|
||||
* [\#2190](https://github.com/bnb-chain/bsc/pull/2190) fix(legacypool): deprecate already known error
|
||||
* [\#2195](https://github.com/bnb-chain/bsc/pull/2195) eth/fetcher: downgrade state tx log
|
||||
|
||||
## v1.3.8
|
||||
FEATURE
|
||||
* [\#2074](https://github.com/bnb-chain/bsc/pull/2074) faucet: new faucet client
|
||||
|
||||
@@ -30,6 +30,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@@ -44,7 +47,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -191,6 +195,21 @@ It's deprecated, please use "geth db export" instead.
|
||||
}, utils.DatabasePathFlags),
|
||||
Description: `
|
||||
This command dumps out the state for a given block (or latest, if none provided).
|
||||
If you use "dump" command in path mode, please firstly use "dump-roothash" command to get all available state root hash.
|
||||
`,
|
||||
}
|
||||
dumpRootHashCommand = &cli.Command{
|
||||
Action: dumpAllRootHashInPath,
|
||||
Name: "dump-roothash",
|
||||
Usage: "Dump all available state root hash in path mode",
|
||||
Flags: flags.Merge([]cli.Flag{
|
||||
utils.StateSchemeFlag,
|
||||
}, utils.DatabasePathFlags),
|
||||
Description: `
|
||||
The dump-roothash command dump all available state root hash in path mode.
|
||||
If you use "dump" command in path mode, please note that it only keeps at most 129 blocks which belongs to diffLayer or diskLayer.
|
||||
Therefore, you must specify the blockNumber or blockHash that locates in diffLayer or diskLayer.
|
||||
"geth" will print all available blockNumber and related block state root hash, and you can query block hash by block number.
|
||||
`,
|
||||
}
|
||||
)
|
||||
@@ -590,11 +609,20 @@ func exportPreimages(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
var header *types.Header
|
||||
if ctx.NArg() > 1 {
|
||||
return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
|
||||
}
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db)
|
||||
if err != nil {
|
||||
return nil, nil, common.Hash{}, err
|
||||
}
|
||||
if scheme == rawdb.PathScheme {
|
||||
fmt.Println("You are using geth dump in path mode, please use `geth dump-roothash` command to get all available blocks.")
|
||||
}
|
||||
|
||||
header := &types.Header{}
|
||||
if ctx.NArg() == 1 {
|
||||
arg := ctx.Args().First()
|
||||
if hashish(arg) {
|
||||
@@ -617,11 +645,22 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
|
||||
}
|
||||
} else {
|
||||
// Use latest
|
||||
header = rawdb.ReadHeadHeader(db)
|
||||
if scheme == rawdb.PathScheme {
|
||||
triedb := trie.NewDatabase(db, &trie.Config{PathDB: pathdb.ReadOnly})
|
||||
defer triedb.Close()
|
||||
if stateRoot := triedb.Head(); stateRoot != (common.Hash{}) {
|
||||
header.Root = stateRoot
|
||||
} else {
|
||||
return nil, nil, common.Hash{}, fmt.Errorf("no top state root hash in path db")
|
||||
}
|
||||
} else {
|
||||
header = rawdb.ReadHeadHeader(db)
|
||||
}
|
||||
}
|
||||
if header == nil {
|
||||
return nil, nil, common.Hash{}, errors.New("no head block found")
|
||||
}
|
||||
|
||||
startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
|
||||
var start common.Hash
|
||||
switch len(startArg) {
|
||||
@@ -634,6 +673,7 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
|
||||
default:
|
||||
return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
|
||||
}
|
||||
|
||||
var conf = &state.DumpConfig{
|
||||
SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name),
|
||||
SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name),
|
||||
@@ -641,9 +681,10 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
|
||||
Start: start.Bytes(),
|
||||
Max: ctx.Uint64(utils.DumpLimitFlag.Name),
|
||||
}
|
||||
conf.StateScheme = scheme
|
||||
log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
|
||||
"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
|
||||
"start", hexutil.Encode(conf.Start), "limit", conf.Max)
|
||||
"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage, "start", hexutil.Encode(conf.Start),
|
||||
"limit", conf.Max, "state scheme", conf.StateScheme)
|
||||
return conf, db, header.Root, nil
|
||||
}
|
||||
|
||||
@@ -675,6 +716,29 @@ func dump(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpAllRootHashInPath(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
||||
defer db.Close()
|
||||
triedb := trie.NewDatabase(db, &trie.Config{PathDB: pathdb.ReadOnly})
|
||||
defer triedb.Close()
|
||||
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if scheme == rawdb.HashScheme {
|
||||
return errors.New("incorrect state scheme, you should use it in path mode")
|
||||
}
|
||||
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Block Number", "Block State Root Hash"})
|
||||
table.AppendBulk(triedb.GetAllRooHash())
|
||||
table.Render()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hashish returns true for strings that look like hashes.
|
||||
func hashish(x string) bool {
|
||||
_, err := strconv.Atoi(x)
|
||||
|
||||
@@ -239,6 +239,7 @@ func init() {
|
||||
removedbCommand,
|
||||
dumpCommand,
|
||||
dumpGenesisCommand,
|
||||
dumpRootHashCommand,
|
||||
// See accountcmd.go:
|
||||
accountCommand,
|
||||
walletCommand,
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
@@ -1884,7 +1883,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
if ctx.IsSet(StateHistoryFlag.Name) {
|
||||
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
|
||||
}
|
||||
scheme, err := compareCLIWithConfig(ctx)
|
||||
scheme, err := ParseCLIAndConfigStateScheme(ctx.String(StateSchemeFlag.Name), cfg.StateScheme)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
@@ -2353,11 +2352,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
|
||||
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
provided, err := compareCLIWithConfig(ctx)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
scheme, err := rawdb.ParseStateScheme(provided, chainDb)
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
@@ -2425,11 +2420,7 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
|
||||
config := &trie.Config{
|
||||
Preimages: preimage,
|
||||
}
|
||||
provided, err := compareCLIWithConfig(ctx)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
scheme, err := rawdb.ParseStateScheme(provided, disk)
|
||||
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
@@ -2448,26 +2439,15 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
|
||||
return trie.NewDatabase(disk, config)
|
||||
}
|
||||
|
||||
func compareCLIWithConfig(ctx *cli.Context) (string, error) {
|
||||
var (
|
||||
cfgScheme string
|
||||
err error
|
||||
)
|
||||
if file := ctx.String("config"); file != "" {
|
||||
// we don't validate cfgScheme because it's already checked in cmd/geth/loadBaseConfig
|
||||
if cfgScheme, err = scanConfigForStateScheme(file); err != nil {
|
||||
log.Error("Failed to parse config file", "error", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if !ctx.IsSet(StateSchemeFlag.Name) {
|
||||
// ParseCLIAndConfigStateScheme parses state scheme in CLI and config.
|
||||
func ParseCLIAndConfigStateScheme(cliScheme, cfgScheme string) (string, error) {
|
||||
if cliScheme == "" {
|
||||
if cfgScheme != "" {
|
||||
log.Info("Use config state scheme", "config", cfgScheme)
|
||||
}
|
||||
return cfgScheme, nil
|
||||
}
|
||||
|
||||
cliScheme := ctx.String(StateSchemeFlag.Name)
|
||||
if !rawdb.ValidateStateScheme(cliScheme) {
|
||||
return "", fmt.Errorf("invalid state scheme in CLI: %s", cliScheme)
|
||||
}
|
||||
@@ -2477,35 +2457,3 @@ func compareCLIWithConfig(ctx *cli.Context) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("incompatible state scheme, CLI: %s, config: %s", cliScheme, cfgScheme)
|
||||
}
|
||||
|
||||
func scanConfigForStateScheme(file string) (string, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
targetStr := "StateScheme"
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.Contains(line, targetStr) {
|
||||
return indexStateScheme(line), nil
|
||||
}
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func indexStateScheme(str string) string {
|
||||
i1 := strings.Index(str, "\"")
|
||||
i2 := strings.LastIndex(str, "\"")
|
||||
|
||||
if i1 != -1 && i2 != -1 && i1 < i2 {
|
||||
return str[i1+1 : i2]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -18,13 +18,8 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
)
|
||||
|
||||
func Test_SplitTagsFlag(t *testing.T) {
|
||||
@@ -67,126 +62,3 @@ func Test_SplitTagsFlag(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fn func() string
|
||||
wantedResult string
|
||||
wantedIsErr bool
|
||||
wantedErrStr string
|
||||
}{
|
||||
{
|
||||
name: "path",
|
||||
fn: func() string {
|
||||
tomlString := `[Eth]NetworkId = 56StateScheme = "path"`
|
||||
return createTempTomlFile(t, tomlString)
|
||||
},
|
||||
wantedResult: rawdb.PathScheme,
|
||||
wantedIsErr: false,
|
||||
wantedErrStr: "",
|
||||
},
|
||||
{
|
||||
name: "hash",
|
||||
fn: func() string {
|
||||
tomlString := `[Eth]NetworkId = 56StateScheme = "hash"`
|
||||
return createTempTomlFile(t, tomlString)
|
||||
},
|
||||
wantedResult: rawdb.HashScheme,
|
||||
wantedIsErr: false,
|
||||
wantedErrStr: "",
|
||||
},
|
||||
{
|
||||
name: "empty state scheme",
|
||||
fn: func() string {
|
||||
tomlString := `[Eth]NetworkId = 56StateScheme = ""`
|
||||
return createTempTomlFile(t, tomlString)
|
||||
},
|
||||
wantedResult: "",
|
||||
wantedIsErr: false,
|
||||
wantedErrStr: "",
|
||||
},
|
||||
{
|
||||
name: "unset state scheme",
|
||||
fn: func() string {
|
||||
tomlString := `[Eth]NetworkId = 56`
|
||||
return createTempTomlFile(t, tomlString)
|
||||
},
|
||||
wantedResult: "",
|
||||
wantedIsErr: false,
|
||||
wantedErrStr: "",
|
||||
},
|
||||
{
|
||||
name: "failed to open file",
|
||||
fn: func() string { return "" },
|
||||
wantedResult: "",
|
||||
wantedIsErr: true,
|
||||
wantedErrStr: "open : no such file or directory",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := scanConfigForStateScheme(tt.fn())
|
||||
if tt.wantedIsErr {
|
||||
assert.Contains(t, err.Error(), tt.wantedErrStr)
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
assert.Equal(t, tt.wantedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// createTempTomlFile is a helper function to create a temp file with the provided TOML content
|
||||
func createTempTomlFile(t *testing.T, content string) string {
|
||||
t.Helper()
|
||||
|
||||
dir := t.TempDir()
|
||||
file, err := os.CreateTemp(dir, "config.toml")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create temporary file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = file.WriteString(content)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to write to temporary file: %v", err)
|
||||
}
|
||||
return file.Name()
|
||||
}
|
||||
|
||||
func Test_parseString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
arg string
|
||||
wantResult string
|
||||
}{
|
||||
{
|
||||
name: "hash string",
|
||||
arg: "\"hash\"",
|
||||
wantResult: rawdb.HashScheme,
|
||||
},
|
||||
{
|
||||
name: "path string",
|
||||
arg: "\"path\"",
|
||||
wantResult: rawdb.PathScheme,
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
arg: "",
|
||||
wantResult: "",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
arg: "\"\"",
|
||||
wantResult: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := indexStateScheme(tt.arg); got != tt.wantResult {
|
||||
t.Errorf("parseString() = %v, want %v", got, tt.wantResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1751,7 +1751,6 @@ func (p *Parlia) applyTransaction(
|
||||
receipt.BlockNumber = header.Number
|
||||
receipt.TransactionIndex = uint(state.TxIndex())
|
||||
*receipts = append(*receipts, receipt)
|
||||
state.SetNonce(msg.From(), nonce+1)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1975,6 +1974,8 @@ func applyMessage(
|
||||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(context, vm.TxContext{Origin: msg.From(), GasPrice: big.NewInt(0)}, state, chainConfig, vm.Config{})
|
||||
// Apply the transaction to the current state (included in the env)
|
||||
// Increment the nonce for the next transaction
|
||||
state.SetNonce(msg.From(), state.GetNonce(msg.From())+1)
|
||||
ret, returnGas, err := vmenv.Call(
|
||||
vm.AccountRef(msg.From()),
|
||||
*msg.To(),
|
||||
|
||||
@@ -335,7 +335,7 @@ func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
|
||||
if stored == "" {
|
||||
// use default scheme for empty database, flip it when
|
||||
// path mode is chosen as default
|
||||
log.Info("State schema set to default", "scheme", "hash")
|
||||
log.Info("State scheme set to default", "scheme", "hash")
|
||||
return HashScheme, nil
|
||||
}
|
||||
log.Info("State scheme set to already existing disk db", "scheme", stored)
|
||||
|
||||
@@ -129,6 +129,8 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
|
||||
switch freezerName {
|
||||
case chainFreezerName:
|
||||
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
|
||||
case stateFreezerName:
|
||||
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
|
||||
default:
|
||||
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
|
||||
}
|
||||
|
||||
@@ -188,19 +188,27 @@ func (batch *freezerTableBatch) maybeCommit() error {
|
||||
|
||||
// commit writes the batched items to the backing freezerTable.
|
||||
func (batch *freezerTableBatch) commit() error {
|
||||
// Write data.
|
||||
// Write data. The head file is fsync'd after write to ensure the
|
||||
// data is truly transferred to disk.
|
||||
_, err := batch.t.head.Write(batch.dataBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := batch.t.head.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
dataSize := int64(len(batch.dataBuffer))
|
||||
batch.dataBuffer = batch.dataBuffer[:0]
|
||||
|
||||
// Write indices.
|
||||
// Write indices. The index file is fsync'd after write to ensure the
|
||||
// data indexes are truly transferred to disk.
|
||||
_, err = batch.t.index.Write(batch.indexBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := batch.t.index.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
indexSize := int64(len(batch.indexBuffer))
|
||||
batch.indexBuffer = batch.indexBuffer[:0]
|
||||
|
||||
|
||||
@@ -223,7 +223,9 @@ func (t *freezerTable) repair() error {
|
||||
if t.readonly {
|
||||
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
|
||||
}
|
||||
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
|
||||
if err := truncateFreezerFile(t.index, stat.Size()-overflow); err != nil {
|
||||
return err
|
||||
} // New file can't trigger this path
|
||||
}
|
||||
// Retrieve the file sizes and prepare for truncation
|
||||
if stat, err = t.index.Stat(); err != nil {
|
||||
@@ -268,8 +270,8 @@ func (t *freezerTable) repair() error {
|
||||
// Print an error log if the index is corrupted due to an incorrect
|
||||
// last index item. While it is theoretically possible to have a zero offset
|
||||
// by storing all zero-size items, it is highly unlikely to occur in practice.
|
||||
if lastIndex.offset == 0 && offsetsSize%indexEntrySize > 1 {
|
||||
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "items", offsetsSize%indexEntrySize-1)
|
||||
if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 {
|
||||
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize)
|
||||
}
|
||||
if t.readonly {
|
||||
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
|
||||
@@ -424,6 +426,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.index.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Calculate the new expected size of the data file and truncate it
|
||||
var expected indexEntry
|
||||
if length == 0 {
|
||||
@@ -446,6 +451,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
// Release any files _after the current head -- both the previous head
|
||||
// and any files which may have been opened for reading
|
||||
t.releaseFilesAfter(expected.filenum, true)
|
||||
|
||||
// Set back the historic head
|
||||
t.head = newHead
|
||||
t.headId = expected.filenum
|
||||
@@ -453,6 +459,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.head.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
// All data files truncated, set internal counters and return
|
||||
t.headBytes = int64(expected.offset)
|
||||
t.items.Store(items)
|
||||
@@ -597,10 +606,12 @@ func (t *freezerTable) Close() error {
|
||||
// error on Windows.
|
||||
doClose(t.index, true, true)
|
||||
doClose(t.meta, true, true)
|
||||
|
||||
// The preopened non-head data-files are all opened in readonly.
|
||||
// The head is opened in rw-mode, so we sync it here - but since it's also
|
||||
// part of t.files, it will be closed in the loop below.
|
||||
doClose(t.head, true, false) // sync but do not close
|
||||
|
||||
for _, f := range t.files {
|
||||
doClose(f, false, true) // close but do not sync
|
||||
}
|
||||
|
||||
@@ -73,11 +73,7 @@ func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) e
|
||||
return err
|
||||
}
|
||||
f = nil
|
||||
|
||||
if err := os.Rename(fname, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return os.Rename(fname, destPath)
|
||||
}
|
||||
|
||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@@ -37,6 +38,7 @@ type DumpConfig struct {
|
||||
OnlyWithAddresses bool
|
||||
Start []byte
|
||||
Max uint64
|
||||
StateScheme string
|
||||
}
|
||||
|
||||
// DumpCollector interface which the state trie calls during iteration
|
||||
@@ -57,7 +59,6 @@ type DumpAccount struct {
|
||||
Storage map[common.Hash]string `json:"storage,omitempty"`
|
||||
Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode
|
||||
SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key
|
||||
|
||||
}
|
||||
|
||||
// Dump represents the full dump in a collected format, as one large map.
|
||||
@@ -177,7 +178,13 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
|
||||
}
|
||||
if !conf.SkipStorage {
|
||||
account.Storage = make(map[common.Hash]string)
|
||||
tr, err := obj.getTrie()
|
||||
var tr Trie
|
||||
if conf.StateScheme == rawdb.PathScheme {
|
||||
tr, err = trie.NewStateTrie(trie.StorageTrieID(obj.db.originalRoot, common.BytesToHash(it.Key),
|
||||
obj.data.Root), obj.db.db.TrieDB())
|
||||
} else {
|
||||
tr, err = obj.getTrie()
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to load storage trie", "err", err)
|
||||
continue
|
||||
|
||||
@@ -57,10 +57,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAlreadyKnown is returned if the transactions is already contained
|
||||
// within the pool.
|
||||
ErrAlreadyKnown = errors.New("already known")
|
||||
|
||||
// ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
|
||||
// another remote transaction.
|
||||
ErrTxPoolOverflow = errors.New("txpool is full")
|
||||
@@ -715,7 +711,7 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
|
||||
if pool.all.Get(hash) != nil {
|
||||
log.Trace("Discarding already known transaction", "hash", hash)
|
||||
knownTxMeter.Mark(1)
|
||||
return false, ErrAlreadyKnown
|
||||
return false, txpool.ErrAlreadyKnown
|
||||
}
|
||||
// Make the local flag. If it's from local source or it's from the network but
|
||||
// the sender is marked as local previously, treat it as the local transaction.
|
||||
@@ -1038,7 +1034,7 @@ func (pool *LegacyPool) addTxs(txs []*types.Transaction, local, sync bool) []err
|
||||
for i, tx := range txs {
|
||||
// If the transaction is known, pre-set the error slot
|
||||
if pool.all.Get(tx.Hash()) != nil {
|
||||
errs[i] = ErrAlreadyKnown
|
||||
errs[i] = txpool.ErrAlreadyKnown
|
||||
knownTxMeter.Mark(1)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
|
||||
if stateDb == nil {
|
||||
return state.Dump{}, errors.New("pending state is not available")
|
||||
}
|
||||
opts.StateScheme = stateDb.Database().TrieDB().Scheme()
|
||||
return stateDb.RawDump(opts), nil
|
||||
}
|
||||
var header *types.Header
|
||||
@@ -83,6 +84,7 @@ func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
|
||||
if err != nil {
|
||||
return state.Dump{}, err
|
||||
}
|
||||
opts.StateScheme = stateDb.Database().TrieDB().Scheme()
|
||||
return stateDb.RawDump(opts), nil
|
||||
}
|
||||
|
||||
@@ -188,6 +190,7 @@ func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hex
|
||||
OnlyWithAddresses: !incompletes,
|
||||
Start: start,
|
||||
Max: uint64(maxResults),
|
||||
StateScheme: stateDb.Database().TrieDB().Scheme(),
|
||||
}
|
||||
if maxResults > AccountRangeMaxResults || maxResults <= 0 {
|
||||
opts.Max = AccountRangeMaxResults
|
||||
|
||||
@@ -338,7 +338,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
|
||||
// If 'other reject' is >25% of the deliveries in any batch, sleep a bit.
|
||||
if otherreject > 128/4 {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
log.Warn("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
|
||||
log.Debug("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
|
||||
}
|
||||
}
|
||||
select {
|
||||
|
||||
@@ -381,8 +381,6 @@ func (h *handler) protoTracker() {
|
||||
<-h.handlerDoneCh
|
||||
}
|
||||
return
|
||||
case <-h.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,8 +132,6 @@ func (cs *chainSyncer) loop() {
|
||||
<-cs.doneCh
|
||||
}
|
||||
return
|
||||
case <-cs.handler.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
go.mod
2
go.mod
@@ -234,7 +234,7 @@ require (
|
||||
github.com/prysmaticlabs/prysm v0.0.0-20220124113610-e26cde5e091b // indirect
|
||||
github.com/quic-go/qpack v0.4.0 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.3.4 // indirect
|
||||
github.com/quic-go/quic-go v0.39.3 // indirect
|
||||
github.com/quic-go/quic-go v0.39.4 // indirect
|
||||
github.com/quic-go/webtransport-go v0.6.0 // indirect
|
||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -1434,8 +1434,8 @@ github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
||||
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
||||
github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg=
|
||||
github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
|
||||
github.com/quic-go/quic-go v0.39.3 h1:o3YB6t2SR+HU/pgwF29kJ6g4jJIJEwEZ8CKia1h1TKg=
|
||||
github.com/quic-go/quic-go v0.39.3/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q=
|
||||
github.com/quic-go/quic-go v0.39.4 h1:PelfiuG7wXEffUT2yceiqz5V6Pc0TA5ruOd1LcmFc1s=
|
||||
github.com/quic-go/quic-go v0.39.4/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q=
|
||||
github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY=
|
||||
github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc=
|
||||
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc/go.mod h1:S8xSOnV3CgpNrWd0GQ/OoQfMtlg2uPRSuTzcSGrzwK8=
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const backupTimeFormat = "2006-01-02_15"
|
||||
|
||||
type TimeTicker struct {
|
||||
stop chan struct{}
|
||||
C <-chan time.Time
|
||||
@@ -69,19 +71,24 @@ type AsyncFileWriter struct {
|
||||
buf chan []byte
|
||||
stop chan struct{}
|
||||
timeTicker *TimeTicker
|
||||
|
||||
rotateHours uint
|
||||
maxBackups int
|
||||
}
|
||||
|
||||
func NewAsyncFileWriter(filePath string, maxBytesSize int64, rotateHours uint) *AsyncFileWriter {
|
||||
func NewAsyncFileWriter(filePath string, maxBytesSize int64, maxBackups int, rotateHours uint) *AsyncFileWriter {
|
||||
absFilePath, err := filepath.Abs(filePath)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("get file path of logger error. filePath=%s, err=%s", filePath, err))
|
||||
}
|
||||
|
||||
return &AsyncFileWriter{
|
||||
filePath: absFilePath,
|
||||
buf: make(chan []byte, maxBytesSize),
|
||||
stop: make(chan struct{}),
|
||||
timeTicker: NewTimeTicker(rotateHours),
|
||||
filePath: absFilePath,
|
||||
buf: make(chan []byte, maxBytesSize),
|
||||
stop: make(chan struct{}),
|
||||
rotateHours: rotateHours,
|
||||
maxBackups: maxBackups,
|
||||
timeTicker: NewTimeTicker(rotateHours),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,6 +185,9 @@ func (w *AsyncFileWriter) rotateFile() {
|
||||
if err := w.initLogFile(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "init log file error. err=%s", err)
|
||||
}
|
||||
if err := w.removeExpiredFile(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "remove expired file error. err=%s", err)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
@@ -222,5 +232,29 @@ func (w *AsyncFileWriter) flushAndClose() error {
|
||||
}
|
||||
|
||||
func (w *AsyncFileWriter) timeFilePath(filePath string) string {
|
||||
return filePath + "." + time.Now().Format("2006-01-02_15")
|
||||
return filePath + "." + time.Now().Format(backupTimeFormat)
|
||||
}
|
||||
|
||||
func (w *AsyncFileWriter) getExpiredFile(filePath string, maxBackups int, rotateHours uint) string {
|
||||
if rotateHours > 0 {
|
||||
maxBackups = int(rotateHours) * maxBackups
|
||||
}
|
||||
return filePath + "." + time.Now().Add(-time.Hour*time.Duration(maxBackups)).Format(backupTimeFormat)
|
||||
}
|
||||
|
||||
func (w *AsyncFileWriter) removeExpiredFile() error {
|
||||
if w.maxBackups == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
oldFilepath := w.getExpiredFile(w.filePath, w.maxBackups, w.rotateHours)
|
||||
_, err := os.Stat(oldFilepath)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
errRemove := os.Remove(oldFilepath)
|
||||
if err != nil {
|
||||
return errRemove
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,10 +6,12 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWriterHourly(t *testing.T) {
|
||||
w := NewAsyncFileWriter("./hello.log", 100, 1)
|
||||
w := NewAsyncFileWriter("./hello.log", 100, 1, 1)
|
||||
w.Start()
|
||||
w.Write([]byte("hello\n"))
|
||||
w.Write([]byte("world\n"))
|
||||
@@ -67,3 +69,22 @@ func TestGetNextRotationHour(t *testing.T) {
|
||||
t.Run("TestGetNextRotationHour_"+strconv.Itoa(i), test(tc.now, tc.delta, tc.expectedHour))
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearBackups(t *testing.T) {
|
||||
dir := "./test"
|
||||
os.Mkdir(dir, 0700)
|
||||
w := NewAsyncFileWriter("./test/bsc.log", 100, 1, 1)
|
||||
defer os.RemoveAll(dir)
|
||||
fakeCurrentTime := time.Now()
|
||||
name := ""
|
||||
data := []byte("data")
|
||||
for i := 0; i < 5; i++ {
|
||||
name = w.filePath + "." + fakeCurrentTime.Format(backupTimeFormat)
|
||||
_ = os.WriteFile(name, data, 0700)
|
||||
fakeCurrentTime = fakeCurrentTime.Add(-time.Hour * 1)
|
||||
}
|
||||
oldFile := w.getExpiredFile(w.filePath, w.maxBackups, w.rotateHours)
|
||||
w.removeExpiredFile()
|
||||
_, err := os.Stat(oldFile)
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
}
|
||||
|
||||
@@ -75,14 +75,14 @@ func FileHandler(path string, fmtr Format) (Handler, error) {
|
||||
// RotatingFileHandler returns a handler which writes log records to file chunks
|
||||
// at the given path. When a file's size reaches the limit, the handler creates
|
||||
// a new file named after the timestamp of the first log record it will contain.
|
||||
func RotatingFileHandler(filePath string, limit uint, formatter Format, rotateHours uint) (Handler, error) {
|
||||
func RotatingFileHandler(filePath string, limit uint, maxBackups uint, formatter Format, rotateHours uint) (Handler, error) {
|
||||
if _, err := os.Stat(path.Dir(filePath)); os.IsNotExist(err) {
|
||||
err := os.MkdirAll(path.Dir(filePath), 0755)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create directory %s, %v", path.Dir(filePath), err)
|
||||
}
|
||||
}
|
||||
fileWriter := NewAsyncFileWriter(filePath, int64(limit), rotateHours)
|
||||
fileWriter := NewAsyncFileWriter(filePath, int64(limit), int(maxBackups), rotateHours)
|
||||
fileWriter.Start()
|
||||
return StreamHandler(fileWriter, formatter), nil
|
||||
}
|
||||
|
||||
@@ -290,8 +290,8 @@ func (c Ctx) toArray() []interface{} {
|
||||
return arr
|
||||
}
|
||||
|
||||
func NewFileLvlHandler(logPath string, maxBytesSize uint, level string, rotateHours uint) Handler {
|
||||
rfh, err := RotatingFileHandler(logPath, maxBytesSize, LogfmtFormat(), rotateHours)
|
||||
func NewFileLvlHandler(logPath string, maxBytesSize uint, maxBackups uint, level string, rotateHours uint) Handler {
|
||||
rfh, err := RotatingFileHandler(logPath, maxBytesSize, maxBackups, LogfmtFormat(), rotateHours)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -513,6 +513,7 @@ type LogConfig struct {
|
||||
MaxBytesSize *uint `toml:",omitempty"`
|
||||
Level *string `toml:",omitempty"`
|
||||
RotateHours *uint `toml:",omitempty"`
|
||||
MaxBackups *uint `toml:",omitempty"`
|
||||
|
||||
// TermTimeFormat is the time format used for console logging.
|
||||
TermTimeFormat *string `toml:",omitempty"`
|
||||
|
||||
@@ -118,7 +118,12 @@ func New(conf *Config) (*Node, error) {
|
||||
rotateHours = *conf.LogConfig.RotateHours
|
||||
}
|
||||
|
||||
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, *conf.LogConfig.Level, rotateHours))
|
||||
maxBackups := uint(0)
|
||||
if conf.LogConfig.MaxBackups != nil {
|
||||
maxBackups = *conf.LogConfig.MaxBackups
|
||||
}
|
||||
|
||||
log.Root().SetHandler(log.NewFileLvlHandler(logFilePath, *conf.LogConfig.MaxBytesSize, maxBackups, *conf.LogConfig.Level, rotateHours))
|
||||
}
|
||||
}
|
||||
if conf.Logger == nil {
|
||||
|
||||
@@ -65,9 +65,6 @@ const (
|
||||
|
||||
// Maximum amount of time allowed for writing a complete message.
|
||||
frameWriteTimeout = 20 * time.Second
|
||||
|
||||
// Maximum time to wait before stop the p2p server
|
||||
stopTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -457,7 +454,7 @@ func (srv *Server) Stop() {
|
||||
|
||||
select {
|
||||
case <-stopChan:
|
||||
case <-time.After(stopTimeout):
|
||||
case <-time.After(defaultDialTimeout): // we should use defaultDialTimeout as we can dial just before the shutdown
|
||||
srv.log.Warn("stop p2p server timeout, forcing stop")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,8 +223,8 @@ func TestServerStopTimeout(t *testing.T) {
|
||||
|
||||
select {
|
||||
case <-stopChan:
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Error("server should be shutdown in 10 seconds")
|
||||
case <-time.After(defaultDialTimeout + 1*time.Second):
|
||||
t.Error("server should be shutdown in defaultDialTimeout + 1 seconds")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 3 // Minor version component of the current release
|
||||
VersionPatch = 8 // Patch version component of the current release
|
||||
VersionPatch = 9 // Patch version component of the current release
|
||||
VersionMeta = "" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
||||
@@ -355,7 +355,7 @@ func (db *Database) SetBufferSize(size int) error {
|
||||
}
|
||||
|
||||
// Head return the top non-fork difflayer/disklayer root hash for rewinding.
|
||||
// It's only supported by path-based database and will return an error for
|
||||
// It's only supported by path-based database and will return empty hash for
|
||||
// others.
|
||||
func (db *Database) Head() common.Hash {
|
||||
pdb, ok := db.backend.(*pathdb.Database)
|
||||
@@ -364,3 +364,15 @@ func (db *Database) Head() common.Hash {
|
||||
}
|
||||
return pdb.Head()
|
||||
}
|
||||
|
||||
// GetAllHash returns all MPT root hash in diffLayer and diskLayer.
|
||||
// It's only supported by path-based database and will return nil for
|
||||
// others.
|
||||
func (db *Database) GetAllRooHash() [][]string {
|
||||
pdb, ok := db.backend.(*pathdb.Database)
|
||||
if !ok {
|
||||
log.Error("Not supported")
|
||||
return nil
|
||||
}
|
||||
return pdb.GetAllRooHash()
|
||||
}
|
||||
|
||||
@@ -226,7 +226,7 @@ func (nc *nodecache) node(owner common.Hash, path []byte, hash common.Hash) (*tr
|
||||
}
|
||||
if n.Hash != hash {
|
||||
dirtyFalseMeter.Mark(1)
|
||||
log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
|
||||
log.Error("Unexpected trie node in async node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
|
||||
return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob)
|
||||
}
|
||||
return n, nil
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -441,3 +443,24 @@ func (db *Database) Head() common.Hash {
|
||||
defer db.lock.Unlock()
|
||||
return db.tree.front()
|
||||
}
|
||||
|
||||
// GetAllRooHash returns all diffLayer and diskLayer root hash
|
||||
func (db *Database) GetAllRooHash() [][]string {
|
||||
db.lock.Lock()
|
||||
defer db.lock.Unlock()
|
||||
|
||||
data := make([][]string, 0, len(db.tree.layers))
|
||||
for _, v := range db.tree.layers {
|
||||
if dl, ok := v.(*diffLayer); ok {
|
||||
data = append(data, []string{fmt.Sprintf("%d", dl.block), dl.rootHash().String()})
|
||||
}
|
||||
}
|
||||
sort.Slice(data, func(i, j int) bool {
|
||||
block1, _ := strconv.Atoi(data[i][0])
|
||||
block2, _ := strconv.Atoi(data[j][0])
|
||||
return block1 > block2
|
||||
})
|
||||
|
||||
data = append(data, []string{"-1", db.tree.bottom().rootHash().String()})
|
||||
return data
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user