2015-07-07 02:54:22 +02:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of go-ethereum.
|
|
|
|
//
|
|
|
|
// go-ethereum is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// go-ethereum is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 18:48:40 +02:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 02:54:22 +02:00
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2015-07-22 18:48:40 +02:00
|
|
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 02:54:22 +02:00
|
|
|
|
2015-05-27 13:43:49 +02:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-03-02 14:03:33 +01:00
|
|
|
"encoding/json"
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
"errors"
|
2015-05-27 13:43:49 +02:00
|
|
|
"fmt"
|
2023-09-08 04:36:53 +02:00
|
|
|
"io"
|
2020-05-20 11:46:45 +08:00
|
|
|
"net"
|
2015-05-27 13:43:49 +02:00
|
|
|
"os"
|
2020-05-20 11:46:45 +08:00
|
|
|
"path"
|
2016-10-21 11:40:00 +03:00
|
|
|
"runtime"
|
2015-05-27 13:43:49 +02:00
|
|
|
"strconv"
|
2020-05-20 11:46:45 +08:00
|
|
|
"strings"
|
2016-10-21 11:40:00 +03:00
|
|
|
"sync/atomic"
|
2015-05-27 13:43:49 +02:00
|
|
|
"time"
|
|
|
|
|
2024-01-25 22:07:44 +08:00
|
|
|
"github.com/olekukonko/tablewriter"
|
|
|
|
"github.com/urfave/cli/v2"
|
|
|
|
|
2015-05-27 13:43:49 +02:00
|
|
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
2015-05-27 13:43:49 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2018-09-24 15:57:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-05-27 13:43:49 +02:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2024-04-15 16:40:03 +08:00
|
|
|
"github.com/ethereum/go-ethereum/eth"
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2024-02-07 09:18:27 -07:00
|
|
|
"github.com/ethereum/go-ethereum/internal/era"
|
2022-07-25 17:07:44 +08:00
|
|
|
"github.com/ethereum/go-ethereum/internal/flags"
|
2017-02-22 14:10:07 +02:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2021-03-19 13:23:44 +08:00
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2020-05-20 11:46:45 +08:00
|
|
|
"github.com/ethereum/go-ethereum/node"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
2024-02-07 09:18:27 -07:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2024-03-08 15:36:25 +08:00
|
|
|
"github.com/ethereum/go-ethereum/triedb"
|
|
|
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
2015-05-27 13:43:49 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2022-06-27 18:22:36 +02:00
|
|
|
initCommand = &cli.Command{
|
|
|
|
Action: initGenesis,
|
2016-11-30 13:34:24 +02:00
|
|
|
Name: "init",
|
|
|
|
Usage: "Bootstrap and initialize a new genesis block",
|
|
|
|
ArgsUsage: "<genesisPath>",
|
all: activate pbss as experimental feature from eth (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
|
|
|
Flags: flags.Merge([]cli.Flag{
|
|
|
|
utils.CachePreimagesFlag,
|
2024-05-30 11:51:34 +08:00
|
|
|
utils.OverrideBohr,
|
2023-10-27 04:15:14 +02:00
|
|
|
utils.OverrideVerkle,
|
2024-07-16 22:37:03 +08:00
|
|
|
utils.MultiDataBaseFlag,
|
2023-09-22 14:56:48 +03:00
|
|
|
}, utils.DatabaseFlags),
|
2016-11-30 13:34:24 +02:00
|
|
|
Description: `
|
|
|
|
The init command initializes a new genesis block and definition for the network.
|
|
|
|
This is a destructive action and changes the network in which you will be
|
|
|
|
participating.
|
2017-05-02 10:55:45 +02:00
|
|
|
|
2020-05-20 11:46:45 +08:00
|
|
|
It expects the genesis file as argument.`,
|
|
|
|
}
|
2023-08-23 17:46:08 +08:00
|
|
|
initNetworkCommand = &cli.Command{
|
|
|
|
Action: initNetwork,
|
2020-05-20 11:46:45 +08:00
|
|
|
Name: "init-network",
|
|
|
|
Usage: "Bootstrap and initialize a new genesis block, and nodekey, config files for network nodes",
|
|
|
|
ArgsUsage: "<genesisPath>",
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.InitNetworkDir,
|
|
|
|
utils.InitNetworkPort,
|
|
|
|
utils.InitNetworkSize,
|
|
|
|
utils.InitNetworkIps,
|
|
|
|
configFileFlag,
|
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The init-network command initializes a new genesis block, definition for the network, config files for network nodes.
|
2017-05-02 10:55:45 +02:00
|
|
|
It expects the genesis file as argument.`,
|
2020-02-04 04:49:13 -06:00
|
|
|
}
|
2022-06-27 18:22:36 +02:00
|
|
|
dumpGenesisCommand = &cli.Command{
|
|
|
|
Action: dumpGenesis,
|
2020-02-04 04:49:13 -06:00
|
|
|
Name: "dumpgenesis",
|
|
|
|
Usage: "Dumps genesis block JSON configuration to stdout",
|
|
|
|
ArgsUsage: "",
|
2022-09-26 05:55:18 -06:00
|
|
|
Flags: append([]cli.Flag{utils.DataDirFlag}, utils.NetworkFlags...),
|
2020-02-04 04:49:13 -06:00
|
|
|
Description: `
|
2022-09-26 05:55:18 -06:00
|
|
|
The dumpgenesis command prints the genesis configuration of the network preset
|
|
|
|
if one is set. Otherwise it prints the genesis from the datadir.`,
|
2016-11-30 13:34:24 +02:00
|
|
|
}
|
2022-06-27 18:22:36 +02:00
|
|
|
importCommand = &cli.Command{
|
|
|
|
Action: importChain,
|
2016-11-10 19:00:09 +08:00
|
|
|
Name: "import",
|
|
|
|
Usage: "Import a blockchain file",
|
2017-03-08 12:26:19 +01:00
|
|
|
ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
|
2022-07-25 17:07:44 +08:00
|
|
|
Flags: flags.Merge([]cli.Flag{
|
2017-05-02 10:55:45 +02:00
|
|
|
utils.CacheFlag,
|
2018-08-15 11:01:49 +03:00
|
|
|
utils.SyncModeFlag,
|
2018-02-15 09:16:59 +01:00
|
|
|
utils.GCModeFlag,
|
2020-01-19 20:57:56 +01:00
|
|
|
utils.SnapshotFlag,
|
2018-02-15 09:16:59 +01:00
|
|
|
utils.CacheDatabaseFlag,
|
|
|
|
utils.CacheGCFlag,
|
2020-04-07 10:23:57 +02:00
|
|
|
utils.MetricsEnabledFlag,
|
|
|
|
utils.MetricsEnabledExpensiveFlag,
|
2020-07-03 12:12:22 -05:00
|
|
|
utils.MetricsHTTPFlag,
|
|
|
|
utils.MetricsPortFlag,
|
2020-04-07 10:23:57 +02:00
|
|
|
utils.MetricsEnableInfluxDBFlag,
|
2021-08-17 18:40:14 +02:00
|
|
|
utils.MetricsEnableInfluxDBV2Flag,
|
2020-04-07 10:23:57 +02:00
|
|
|
utils.MetricsInfluxDBEndpointFlag,
|
|
|
|
utils.MetricsInfluxDBDatabaseFlag,
|
|
|
|
utils.MetricsInfluxDBUsernameFlag,
|
|
|
|
utils.MetricsInfluxDBPasswordFlag,
|
|
|
|
utils.MetricsInfluxDBTagsFlag,
|
2021-08-17 18:40:14 +02:00
|
|
|
utils.MetricsInfluxDBTokenFlag,
|
|
|
|
utils.MetricsInfluxDBBucketFlag,
|
|
|
|
utils.MetricsInfluxDBOrganizationFlag,
|
2020-05-11 17:58:43 +02:00
|
|
|
utils.TxLookupLimitFlag,
|
all: activate pbss as experimental feature from eth (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
|
|
|
utils.TransactionHistoryFlag,
|
|
|
|
utils.StateHistoryFlag,
|
2023-09-22 14:56:48 +03:00
|
|
|
}, utils.DatabaseFlags),
|
2016-11-10 19:00:09 +08:00
|
|
|
Description: `
|
2017-05-03 13:35:47 +03:00
|
|
|
The import command imports blocks from an RLP-encoded form. The form can be one file
|
|
|
|
with several RLP-encoded blocks, or several files can be used.
|
2017-05-02 10:55:45 +02:00
|
|
|
|
2017-07-10 15:48:42 +01:00
|
|
|
If only one file is used, import error will result in failure. If several files are used,
|
2017-05-02 10:55:45 +02:00
|
|
|
processing will proceed even if an individual RLP-file import failure occurs.`,
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
2022-06-27 18:22:36 +02:00
|
|
|
exportCommand = &cli.Command{
|
|
|
|
Action: exportChain,
|
2016-11-10 19:00:09 +08:00
|
|
|
Name: "export",
|
|
|
|
Usage: "Export blockchain into file",
|
|
|
|
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
|
2022-07-25 17:07:44 +08:00
|
|
|
Flags: flags.Merge([]cli.Flag{
|
2017-05-02 10:55:45 +02:00
|
|
|
utils.CacheFlag,
|
2018-08-15 11:01:49 +03:00
|
|
|
utils.SyncModeFlag,
|
2023-09-22 14:56:48 +03:00
|
|
|
}, utils.DatabaseFlags),
|
2015-06-06 00:02:32 -04:00
|
|
|
Description: `
|
|
|
|
Requires a first argument of the file to write to.
|
|
|
|
Optional second and third arguments control the first and
|
|
|
|
last block to write. In this mode, the file will be appended
|
2018-07-26 23:26:24 +12:00
|
|
|
if already existing. If the file ends with .gz, the output will
|
|
|
|
be gzipped.`,
|
2024-02-07 09:18:27 -07:00
|
|
|
}
|
|
|
|
importHistoryCommand = &cli.Command{
|
|
|
|
Action: importHistory,
|
|
|
|
Name: "import-history",
|
|
|
|
Usage: "Import an Era archive",
|
|
|
|
ArgsUsage: "<dir>",
|
|
|
|
Flags: flags.Merge([]cli.Flag{
|
|
|
|
utils.TxLookupLimitFlag,
|
|
|
|
},
|
|
|
|
utils.DatabaseFlags,
|
|
|
|
utils.NetworkFlags,
|
|
|
|
),
|
|
|
|
Description: `
|
|
|
|
The import-history command will import blocks and their corresponding receipts
|
|
|
|
from Era archives.
|
|
|
|
`,
|
|
|
|
}
|
|
|
|
exportHistoryCommand = &cli.Command{
|
|
|
|
Action: exportHistory,
|
|
|
|
Name: "export-history",
|
|
|
|
Usage: "Export blockchain history to Era archives",
|
|
|
|
ArgsUsage: "<dir> <first> <last>",
|
|
|
|
Flags: flags.Merge(utils.DatabaseFlags),
|
|
|
|
Description: `
|
|
|
|
The export-history command will export blocks and their corresponding receipts
|
|
|
|
into Era archives. Eras are typically packaged in steps of 8192 blocks.
|
|
|
|
`,
|
2018-03-26 13:34:21 +03:00
|
|
|
}
|
2022-06-27 18:22:36 +02:00
|
|
|
importPreimagesCommand = &cli.Command{
|
|
|
|
Action: importPreimages,
|
2018-03-26 13:34:21 +03:00
|
|
|
Name: "import-preimages",
|
|
|
|
Usage: "Import the preimage database from an RLP stream",
|
|
|
|
ArgsUsage: "<datafile>",
|
2022-07-25 17:07:44 +08:00
|
|
|
Flags: flags.Merge([]cli.Flag{
|
2018-03-26 13:34:21 +03:00
|
|
|
utils.CacheFlag,
|
2018-08-15 11:01:49 +03:00
|
|
|
utils.SyncModeFlag,
|
2023-09-22 14:56:48 +03:00
|
|
|
}, utils.DatabaseFlags),
|
2018-03-26 13:34:21 +03:00
|
|
|
Description: `
|
2021-11-02 18:31:45 +08:00
|
|
|
The import-preimages command imports hash preimages from an RLP encoded stream.
|
|
|
|
It's deprecated, please use "geth db import" instead.
|
|
|
|
`,
|
2018-03-26 13:34:21 +03:00
|
|
|
}
|
2023-11-22 14:48:25 +01:00
|
|
|
|
2022-06-27 18:22:36 +02:00
|
|
|
dumpCommand = &cli.Command{
|
|
|
|
Action: dump,
|
2016-11-10 19:00:09 +08:00
|
|
|
Name: "dump",
|
|
|
|
Usage: "Dump a specific block from storage",
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
ArgsUsage: "[? <blockHash> | <blockNum>]",
|
2022-07-25 17:07:44 +08:00
|
|
|
Flags: flags.Merge([]cli.Flag{
|
2017-05-02 10:55:45 +02:00
|
|
|
utils.CacheFlag,
|
2019-06-24 16:16:44 +02:00
|
|
|
utils.IterativeOutputFlag,
|
|
|
|
utils.ExcludeCodeFlag,
|
|
|
|
utils.ExcludeStorageFlag,
|
|
|
|
utils.IncludeIncompletesFlag,
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
utils.StartKeyFlag,
|
|
|
|
utils.DumpLimitFlag,
|
2023-09-22 14:56:48 +03:00
|
|
|
}, utils.DatabaseFlags),
|
2015-05-27 13:43:49 +02:00
|
|
|
Description: `
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
This command dumps out the state for a given block (or latest, if none provided).
|
2024-01-25 22:07:44 +08:00
|
|
|
If you use "dump" command in path mode, please firstly use "dump-roothash" command to get all available state root hash.
|
|
|
|
`,
|
|
|
|
}
|
|
|
|
dumpRootHashCommand = &cli.Command{
|
|
|
|
Action: dumpAllRootHashInPath,
|
|
|
|
Name: "dump-roothash",
|
|
|
|
Usage: "Dump all available state root hash in path mode",
|
2024-02-02 15:43:33 +08:00
|
|
|
Flags: flags.Merge([]cli.Flag{}, utils.DatabaseFlags),
|
2024-01-25 22:07:44 +08:00
|
|
|
Description: `
|
|
|
|
The dump-roothash command dump all available state root hash in path mode.
|
|
|
|
If you use "dump" command in path mode, please note that it only keeps at most 129 blocks which belongs to diffLayer or diskLayer.
|
|
|
|
Therefore, you must specify the blockNumber or blockHash that locates in diffLayer or diskLayer.
|
|
|
|
"geth" will print all available blockNumber and related block state root hash, and you can query block hash by block number.
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
`,
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2016-11-30 13:34:24 +02:00
|
|
|
// initGenesis will initialise the given JSON format genesis file and writes it as
|
|
|
|
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
|
|
|
func initGenesis(ctx *cli.Context) error {
|
2022-07-04 19:52:19 +02:00
|
|
|
if ctx.Args().Len() != 1 {
|
|
|
|
utils.Fatalf("need genesis.json file as the only argument")
|
|
|
|
}
|
2016-11-30 13:34:24 +02:00
|
|
|
genesisPath := ctx.Args().First()
|
|
|
|
if len(genesisPath) == 0 {
|
2022-07-04 19:52:19 +02:00
|
|
|
utils.Fatalf("invalid path to genesis file")
|
2016-11-30 13:34:24 +02:00
|
|
|
}
|
2017-03-02 14:03:33 +01:00
|
|
|
file, err := os.Open(genesisPath)
|
2016-11-30 13:34:24 +02:00
|
|
|
if err != nil {
|
2017-05-03 13:35:47 +03:00
|
|
|
utils.Fatalf("Failed to read genesis file: %v", err)
|
2016-11-30 13:34:24 +02:00
|
|
|
}
|
2017-03-02 14:03:33 +01:00
|
|
|
defer file.Close()
|
2016-11-30 13:34:24 +02:00
|
|
|
|
2017-03-02 14:03:33 +01:00
|
|
|
genesis := new(core.Genesis)
|
|
|
|
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
|
|
|
utils.Fatalf("invalid genesis file: %v", err)
|
|
|
|
}
|
2020-08-06 09:24:36 +02:00
|
|
|
// Open and initialise both full and light databases
|
2020-08-03 19:40:46 +02:00
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 11:40:36 +01:00
|
|
|
defer stack.Close()
|
|
|
|
|
2023-10-27 04:15:14 +02:00
|
|
|
var overrides core.ChainOverrides
|
2024-05-30 11:51:34 +08:00
|
|
|
if ctx.IsSet(utils.OverrideBohr.Name) {
|
|
|
|
v := ctx.Uint64(utils.OverrideBohr.Name)
|
|
|
|
overrides.OverrideBohr = &v
|
|
|
|
}
|
2023-10-27 04:15:14 +02:00
|
|
|
if ctx.IsSet(utils.OverrideVerkle.Name) {
|
|
|
|
v := ctx.Uint64(utils.OverrideVerkle.Name)
|
|
|
|
overrides.OverrideVerkle = &v
|
|
|
|
}
|
2017-05-03 13:35:47 +03:00
|
|
|
for _, name := range []string{"chaindata", "lightchaindata"} {
|
2023-09-07 16:39:29 +08:00
|
|
|
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false, false, false, false)
|
2017-05-03 13:35:47 +03:00
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to open database: %v", err)
|
|
|
|
}
|
all: activate pbss as experimental feature from eth (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
|
|
|
defer chaindb.Close()
|
|
|
|
|
2024-03-08 16:07:29 +08:00
|
|
|
// if the trie data dir has been set, new trie db with a new state database
|
2024-04-18 15:12:05 +08:00
|
|
|
if ctx.IsSet(utils.MultiDataBaseFlag.Name) {
|
2024-03-08 16:07:29 +08:00
|
|
|
statediskdb, dbErr := stack.OpenDatabaseWithFreezer(name+"/state", 0, 0, "", "", false, false, false, false)
|
|
|
|
if dbErr != nil {
|
|
|
|
utils.Fatalf("Failed to open separate trie database: %v", dbErr)
|
|
|
|
}
|
|
|
|
chaindb.SetStateStore(statediskdb)
|
2024-04-18 15:12:05 +08:00
|
|
|
blockdb, err := stack.OpenDatabaseWithFreezer(name+"/block", 0, 0, "", "", false, false, false, false)
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to open separate block database: %v", err)
|
|
|
|
}
|
|
|
|
chaindb.SetBlockStore(blockdb)
|
|
|
|
log.Warn("Multi-database is an experimental feature")
|
2024-03-08 16:07:29 +08:00
|
|
|
}
|
|
|
|
|
2024-04-28 11:02:14 +08:00
|
|
|
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
|
all: activate pbss as experimental feature from eth (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
|
|
|
defer triedb.Close()
|
|
|
|
|
2023-10-27 04:15:14 +02:00
|
|
|
_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
|
2017-05-03 13:35:47 +03:00
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to write genesis block: %v", err)
|
|
|
|
}
|
2024-02-02 15:43:33 +08:00
|
|
|
log.Info("Successfully wrote genesis state", "database", name, "hash", hash.String())
|
2016-11-30 13:34:24 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-08 04:36:53 +02:00
|
|
|
func parseIps(ipStr string, size int) ([]string, error) {
|
2020-05-20 11:46:45 +08:00
|
|
|
var ips []string
|
|
|
|
if len(ipStr) != 0 {
|
|
|
|
ips = strings.Split(ipStr, ",")
|
|
|
|
if len(ips) != size {
|
2023-09-08 04:36:53 +02:00
|
|
|
return nil, errors.New("mismatch of size and length of ips")
|
2020-05-20 11:46:45 +08:00
|
|
|
}
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
_, err := net.ResolveIPAddr("", ips[i])
|
|
|
|
if err != nil {
|
2023-09-08 04:36:53 +02:00
|
|
|
return nil, errors.New("invalid format of ip")
|
2020-05-20 11:46:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ips = make([]string, size)
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
ips[i] = "127.0.0.1"
|
|
|
|
}
|
|
|
|
}
|
2023-09-08 04:36:53 +02:00
|
|
|
return ips, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func createPorts(ipStr string, port int, size int) []int {
|
|
|
|
ports := make([]int, size)
|
|
|
|
if len(ipStr) == 0 { // localhost , so different ports
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
ports[i] = port + i
|
|
|
|
}
|
|
|
|
} else { // different machines, keep same port
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
ports[i] = port
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ports
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create config for node i in the cluster
|
|
|
|
func createNodeConfig(baseConfig gethConfig, enodes []*enode.Node, ip string, port int, size int, i int) gethConfig {
|
|
|
|
baseConfig.Node.HTTPHost = ip
|
2023-09-27 10:43:13 +08:00
|
|
|
baseConfig.Node.P2P.ListenAddr = fmt.Sprintf(":%d", port)
|
2023-09-08 04:36:53 +02:00
|
|
|
baseConfig.Node.P2P.BootstrapNodes = make([]*enode.Node, size-1)
|
|
|
|
// Set the P2P connections between this node and the other nodes
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
baseConfig.Node.P2P.BootstrapNodes[j] = enodes[j]
|
|
|
|
}
|
|
|
|
for j := i + 1; j < size; j++ {
|
|
|
|
baseConfig.Node.P2P.BootstrapNodes[j-1] = enodes[j]
|
|
|
|
}
|
|
|
|
return baseConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create configs for nodes in the cluster
|
|
|
|
func createNodeConfigs(baseConfig gethConfig, initDir string, ips []string, ports []int, size int) ([]gethConfig, error) {
|
|
|
|
// Create the nodes
|
|
|
|
enodes := make([]*enode.Node, size)
|
|
|
|
for i := 0; i < size; i++ {
|
2023-09-29 14:54:20 +02:00
|
|
|
nodeConfig := baseConfig.Node
|
|
|
|
nodeConfig.DataDir = path.Join(initDir, fmt.Sprintf("node%d", i))
|
|
|
|
stack, err := node.New(&nodeConfig)
|
2023-09-08 04:36:53 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
pk := stack.Config().NodeKey()
|
|
|
|
enodes[i] = enode.NewV4(&pk.PublicKey, net.ParseIP(ips[i]), ports[i], ports[i])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the configs
|
|
|
|
configs := make([]gethConfig, size)
|
|
|
|
for i := 0; i < size; i++ {
|
|
|
|
configs[i] = createNodeConfig(baseConfig, enodes, ips[i], ports[i], size, i)
|
|
|
|
}
|
|
|
|
return configs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// initNetwork will bootstrap and initialize a new genesis block, and nodekey, config files for network nodes
|
|
|
|
func initNetwork(ctx *cli.Context) error {
|
|
|
|
initDir := ctx.String(utils.InitNetworkDir.Name)
|
|
|
|
if len(initDir) == 0 {
|
|
|
|
utils.Fatalf("init.dir is required")
|
|
|
|
}
|
|
|
|
size := ctx.Int(utils.InitNetworkSize.Name)
|
|
|
|
if size <= 0 {
|
|
|
|
utils.Fatalf("size should be greater than 0")
|
|
|
|
}
|
|
|
|
port := ctx.Int(utils.InitNetworkPort.Name)
|
|
|
|
if port <= 0 {
|
|
|
|
utils.Fatalf("port should be greater than 0")
|
|
|
|
}
|
|
|
|
ipStr := ctx.String(utils.InitNetworkIps.Name)
|
|
|
|
cfgFile := ctx.String(configFileFlag.Name)
|
|
|
|
|
|
|
|
if len(cfgFile) == 0 {
|
|
|
|
utils.Fatalf("config file is required")
|
|
|
|
}
|
|
|
|
|
|
|
|
ips, err := parseIps(ipStr, size)
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to pase ips string: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ports := createPorts(ipStr, port, size)
|
2020-05-20 11:46:45 +08:00
|
|
|
|
|
|
|
// Make sure we have a valid genesis JSON
|
|
|
|
genesisPath := ctx.Args().First()
|
|
|
|
if len(genesisPath) == 0 {
|
|
|
|
utils.Fatalf("Must supply path to genesis JSON file")
|
|
|
|
}
|
2023-09-08 04:36:53 +02:00
|
|
|
inGenesisFile, err := os.Open(genesisPath)
|
2020-05-20 11:46:45 +08:00
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to read genesis file: %v", err)
|
|
|
|
}
|
2023-09-08 04:36:53 +02:00
|
|
|
defer inGenesisFile.Close()
|
2020-05-20 11:46:45 +08:00
|
|
|
|
|
|
|
genesis := new(core.Genesis)
|
2023-09-08 04:36:53 +02:00
|
|
|
if err := json.NewDecoder(inGenesisFile).Decode(genesis); err != nil {
|
2020-05-20 11:46:45 +08:00
|
|
|
utils.Fatalf("invalid genesis file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// load config
|
|
|
|
var config gethConfig
|
|
|
|
err = loadConfig(cfgFile, &config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-09-08 04:36:53 +02:00
|
|
|
|
|
|
|
configs, err := createNodeConfigs(config, initDir, ips, ports, size)
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to create node configs: %v", err)
|
|
|
|
}
|
2020-05-20 11:46:45 +08:00
|
|
|
|
|
|
|
for i := 0; i < size; i++ {
|
2023-09-08 04:36:53 +02:00
|
|
|
// Write config.toml
|
|
|
|
configBytes, err := tomlSettings.Marshal(configs[i])
|
2020-05-20 11:46:45 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-09-08 04:36:53 +02:00
|
|
|
configFile, err := os.OpenFile(path.Join(initDir, fmt.Sprintf("node%d", i), "config.toml"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-05-20 11:46:45 +08:00
|
|
|
}
|
2023-09-08 04:36:53 +02:00
|
|
|
defer configFile.Close()
|
|
|
|
configFile.Write(configBytes)
|
|
|
|
|
|
|
|
// Write the input genesis.json to the node's directory
|
|
|
|
outGenesisFile, err := os.OpenFile(path.Join(initDir, fmt.Sprintf("node%d", i), "genesis.json"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-05-20 11:46:45 +08:00
|
|
|
}
|
2023-09-08 04:36:53 +02:00
|
|
|
_, err = inGenesisFile.Seek(0, io.SeekStart)
|
2020-05-20 11:46:45 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-09-08 04:36:53 +02:00
|
|
|
_, err = io.Copy(outGenesisFile, inGenesisFile)
|
2020-05-20 11:46:45 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-03-19 13:23:44 +08:00
|
|
|
return nil
|
2020-05-20 11:46:45 +08:00
|
|
|
}
|
|
|
|
|
2020-02-04 04:49:13 -06:00
|
|
|
func dumpGenesis(ctx *cli.Context) error {
|
2023-11-22 19:08:39 +08:00
|
|
|
// check if there is a testnet preset enabled
|
|
|
|
var genesis *core.Genesis
|
2022-09-26 05:55:18 -06:00
|
|
|
if utils.IsNetworkPreset(ctx) {
|
2023-11-22 19:08:39 +08:00
|
|
|
genesis = utils.MakeGenesis(ctx)
|
|
|
|
} else if ctx.IsSet(utils.DeveloperFlag.Name) && !ctx.IsSet(utils.DataDirFlag.Name) {
|
|
|
|
genesis = core.DeveloperGenesisBlock(11_500_000, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
if genesis != nil {
|
2022-09-26 05:55:18 -06:00
|
|
|
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
|
|
|
|
utils.Fatalf("could not encode genesis: %s", err)
|
|
|
|
}
|
|
|
|
return nil
|
2020-02-04 04:49:13 -06:00
|
|
|
}
|
2023-11-22 19:08:39 +08:00
|
|
|
|
2022-09-26 05:55:18 -06:00
|
|
|
// dump whatever already exists in the datadir
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
|
|
|
for _, name := range []string{"chaindata", "lightchaindata"} {
|
|
|
|
db, err := stack.OpenDatabase(name, 0, 0, "", true)
|
|
|
|
if err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2024-04-18 15:12:05 +08:00
|
|
|
// set the separate state & block database
|
|
|
|
if stack.CheckIfMultiDataBase() && err == nil {
|
|
|
|
stateDiskDb := utils.MakeStateDataBase(ctx, stack, true, false)
|
|
|
|
db.SetStateStore(stateDiskDb)
|
|
|
|
blockDb := utils.MakeBlockDatabase(ctx, stack, true, false)
|
|
|
|
db.SetBlockStore(blockDb)
|
|
|
|
}
|
2022-09-26 05:55:18 -06:00
|
|
|
genesis, err := core.ReadGenesis(db)
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("failed to read genesis: %s", err)
|
|
|
|
}
|
|
|
|
db.Close()
|
|
|
|
|
|
|
|
if err := json.NewEncoder(os.Stdout).Encode(*genesis); err != nil {
|
|
|
|
utils.Fatalf("could not encode stored genesis: %s", err)
|
|
|
|
}
|
|
|
|
return nil
|
2020-02-04 04:49:13 -06:00
|
|
|
}
|
2022-09-26 05:55:18 -06:00
|
|
|
if ctx.IsSet(utils.DataDirFlag.Name) {
|
|
|
|
utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
|
2020-02-04 04:49:13 -06:00
|
|
|
}
|
2023-11-22 19:08:39 +08:00
|
|
|
utils.Fatalf("no network preset provided, and no genesis exists in the default datadir")
|
2020-02-04 04:49:13 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-10 11:23:00 +03:00
|
|
|
func importChain(ctx *cli.Context) error {
|
2022-06-27 18:22:36 +02:00
|
|
|
if ctx.Args().Len() < 1 {
|
2017-02-22 17:22:50 +02:00
|
|
|
utils.Fatalf("This command requires an argument.")
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
2020-04-07 10:23:57 +02:00
|
|
|
// Start metrics export if enabled
|
|
|
|
utils.SetupMetrics(ctx)
|
|
|
|
// Start system runtime metrics collection
|
|
|
|
go metrics.CollectProcessMetrics(3 * time.Second)
|
2020-08-06 07:02:05 +02:00
|
|
|
|
2024-04-15 16:40:03 +08:00
|
|
|
stack, cfg := makeConfigNode(ctx)
|
2019-02-07 11:40:36 +01:00
|
|
|
defer stack.Close()
|
|
|
|
|
2024-04-15 16:40:03 +08:00
|
|
|
backend, err := eth.New(stack, &cfg.Eth)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chain := backend.BlockChain()
|
|
|
|
db := backend.ChainDb()
|
2018-09-24 15:57:49 +03:00
|
|
|
defer db.Close()
|
2016-10-18 13:45:16 +03:00
|
|
|
|
2016-10-21 11:40:00 +03:00
|
|
|
// Start periodically gathering memory profiles
|
2023-07-24 11:22:54 +01:00
|
|
|
var peakMemAlloc, peakMemSys atomic.Uint64
|
2016-10-21 11:40:00 +03:00
|
|
|
go func() {
|
|
|
|
stats := new(runtime.MemStats)
|
|
|
|
for {
|
|
|
|
runtime.ReadMemStats(stats)
|
2023-07-24 11:22:54 +01:00
|
|
|
if peakMemAlloc.Load() < stats.Alloc {
|
|
|
|
peakMemAlloc.Store(stats.Alloc)
|
2016-10-21 11:40:00 +03:00
|
|
|
}
|
2023-07-24 11:22:54 +01:00
|
|
|
if peakMemSys.Load() < stats.Sys {
|
|
|
|
peakMemSys.Store(stats.Sys)
|
2016-10-21 11:40:00 +03:00
|
|
|
}
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
}
|
|
|
|
}()
|
2016-10-18 13:45:16 +03:00
|
|
|
// Import the chain
|
2015-05-27 13:43:49 +02:00
|
|
|
start := time.Now()
|
2017-03-08 12:26:19 +01:00
|
|
|
|
2020-06-24 15:01:58 -05:00
|
|
|
var importErr error
|
|
|
|
|
2022-06-27 18:22:36 +02:00
|
|
|
if ctx.Args().Len() == 1 {
|
2017-03-08 12:26:19 +01:00
|
|
|
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
|
2020-06-24 15:01:58 -05:00
|
|
|
importErr = err
|
2018-02-05 18:40:32 +02:00
|
|
|
log.Error("Import error", "err", err)
|
2017-03-08 12:26:19 +01:00
|
|
|
}
|
|
|
|
} else {
|
2022-06-27 18:22:36 +02:00
|
|
|
for _, arg := range ctx.Args().Slice() {
|
2017-03-08 12:26:19 +01:00
|
|
|
if err := utils.ImportChain(chain, arg); err != nil {
|
2020-06-24 15:01:58 -05:00
|
|
|
importErr = err
|
2017-03-08 12:26:19 +01:00
|
|
|
log.Error("Import error", "file", arg, "err", err)
|
|
|
|
}
|
|
|
|
}
|
2015-05-27 16:02:08 +02:00
|
|
|
}
|
2018-02-05 18:40:32 +02:00
|
|
|
chain.Stop()
|
2016-10-21 11:40:00 +03:00
|
|
|
fmt.Printf("Import done in %v.\n\n", time.Since(start))
|
2016-10-18 13:45:16 +03:00
|
|
|
|
2016-10-21 11:40:00 +03:00
|
|
|
// Output pre-compaction stats mostly to see the import trashing
|
2021-02-23 11:27:32 +01:00
|
|
|
showLeveldbStats(db)
|
2018-03-08 14:59:00 +02:00
|
|
|
|
2016-10-21 11:40:00 +03:00
|
|
|
// Print the memory statistics used by the importing
|
|
|
|
mem := new(runtime.MemStats)
|
|
|
|
runtime.ReadMemStats(mem)
|
|
|
|
|
2023-07-24 11:22:54 +01:00
|
|
|
fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024)
|
|
|
|
fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024)
|
2016-10-21 11:40:00 +03:00
|
|
|
fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
|
|
|
|
fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
|
|
|
|
|
2022-06-27 18:22:36 +02:00
|
|
|
if ctx.Bool(utils.NoCompactionFlag.Name) {
|
2017-03-08 12:26:19 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-21 11:40:00 +03:00
|
|
|
// Compact the entire database to more accurately measure disk io and print the stats
|
|
|
|
start = time.Now()
|
|
|
|
fmt.Println("Compacting entire database...")
|
2021-02-23 11:27:32 +01:00
|
|
|
if err := db.Compact(nil, nil); err != nil {
|
2017-02-22 17:22:50 +02:00
|
|
|
utils.Fatalf("Compaction failed: %v", err)
|
2016-10-18 13:45:16 +03:00
|
|
|
}
|
2016-10-21 11:40:00 +03:00
|
|
|
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
|
|
|
|
2021-02-23 11:27:32 +01:00
|
|
|
showLeveldbStats(db)
|
2020-06-24 15:01:58 -05:00
|
|
|
return importErr
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
|
|
|
|
2016-06-10 11:23:00 +03:00
|
|
|
func exportChain(ctx *cli.Context) error {
|
2022-06-27 18:22:36 +02:00
|
|
|
if ctx.Args().Len() < 1 {
|
2017-02-22 17:22:50 +02:00
|
|
|
utils.Fatalf("This command requires an argument.")
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
2020-08-06 07:02:05 +02:00
|
|
|
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 11:40:36 +01:00
|
|
|
defer stack.Close()
|
|
|
|
|
2023-09-26 00:28:20 +08:00
|
|
|
chain, db := utils.MakeChain(ctx, stack, true)
|
|
|
|
defer db.Close()
|
2015-05-27 13:43:49 +02:00
|
|
|
start := time.Now()
|
2015-06-06 00:02:32 -04:00
|
|
|
|
|
|
|
var err error
|
2015-06-06 10:04:13 -04:00
|
|
|
fp := ctx.Args().First()
|
2022-06-27 18:22:36 +02:00
|
|
|
if ctx.Args().Len() < 3 {
|
2015-06-06 10:04:13 -04:00
|
|
|
err = utils.ExportChain(chain, fp)
|
2015-06-06 00:02:32 -04:00
|
|
|
} else {
|
|
|
|
// This can be improved to allow for numbers larger than 9223372036854775807
|
|
|
|
first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
|
|
|
|
last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
|
|
|
|
if ferr != nil || lerr != nil {
|
2017-02-22 17:22:50 +02:00
|
|
|
utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
|
2015-06-06 00:02:32 -04:00
|
|
|
}
|
2015-06-06 10:04:13 -04:00
|
|
|
if first < 0 || last < 0 {
|
2017-02-22 17:22:50 +02:00
|
|
|
utils.Fatalf("Export error: block number must be greater than 0\n")
|
2015-06-06 10:04:13 -04:00
|
|
|
}
|
2023-03-02 08:29:15 +02:00
|
|
|
if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
|
|
|
|
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
|
2021-03-22 19:11:10 +01:00
|
|
|
}
|
2015-06-06 10:04:13 -04:00
|
|
|
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
2015-06-06 00:02:32 -04:00
|
|
|
}
|
2024-02-07 09:18:27 -07:00
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Export error: %v\n", err)
|
|
|
|
}
|
|
|
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func importHistory(ctx *cli.Context) error {
|
|
|
|
if ctx.Args().Len() != 1 {
|
|
|
|
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
|
|
|
|
}
|
|
|
|
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
|
|
|
defer stack.Close()
|
|
|
|
|
|
|
|
chain, db := utils.MakeChain(ctx, stack, false)
|
|
|
|
defer db.Close()
|
|
|
|
|
|
|
|
var (
|
|
|
|
start = time.Now()
|
|
|
|
dir = ctx.Args().Get(0)
|
|
|
|
network string
|
|
|
|
)
|
|
|
|
|
|
|
|
// Determine network.
|
|
|
|
if utils.IsNetworkPreset(ctx) {
|
|
|
|
switch {
|
2024-03-08 15:36:25 +08:00
|
|
|
case ctx.Bool(utils.BSCMainnetFlag.Name):
|
2024-02-07 09:18:27 -07:00
|
|
|
network = "mainnet"
|
2024-03-08 15:36:25 +08:00
|
|
|
case ctx.Bool(utils.ChapelFlag.Name):
|
|
|
|
network = "chapel"
|
2024-02-07 09:18:27 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// No network flag set, try to determine network based on files
|
|
|
|
// present in directory.
|
|
|
|
var networks []string
|
|
|
|
for _, n := range params.NetworkNames {
|
|
|
|
entries, err := era.ReadDir(dir, n)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error reading %s: %w", dir, err)
|
|
|
|
}
|
|
|
|
if len(entries) > 0 {
|
|
|
|
networks = append(networks, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(networks) == 0 {
|
|
|
|
return fmt.Errorf("no era1 files found in %s", dir)
|
|
|
|
}
|
|
|
|
if len(networks) > 1 {
|
|
|
|
return fmt.Errorf("multiple networks found, use a network flag to specify desired network")
|
|
|
|
}
|
|
|
|
network = networks[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := utils.ImportHistory(chain, db, dir, network); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fmt.Printf("Import done in %v\n", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
2015-06-06 00:02:32 -04:00
|
|
|
|
2024-02-07 09:18:27 -07:00
|
|
|
// exportHistory exports chain history in Era archives at a specified
|
|
|
|
// directory.
|
|
|
|
func exportHistory(ctx *cli.Context) error {
|
|
|
|
if ctx.Args().Len() != 3 {
|
|
|
|
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
|
|
|
|
}
|
|
|
|
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
|
|
|
defer stack.Close()
|
|
|
|
|
|
|
|
chain, _ := utils.MakeChain(ctx, stack, true)
|
|
|
|
start := time.Now()
|
2015-06-06 00:02:32 -04:00
|
|
|
|
2024-02-07 09:18:27 -07:00
|
|
|
var (
|
|
|
|
dir = ctx.Args().Get(0)
|
|
|
|
first, ferr = strconv.ParseInt(ctx.Args().Get(1), 10, 64)
|
|
|
|
last, lerr = strconv.ParseInt(ctx.Args().Get(2), 10, 64)
|
|
|
|
)
|
|
|
|
if ferr != nil || lerr != nil {
|
|
|
|
utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
|
|
|
|
}
|
|
|
|
if first < 0 || last < 0 {
|
|
|
|
utils.Fatalf("Export error: block number must be greater than 0\n")
|
|
|
|
}
|
|
|
|
if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
|
|
|
|
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
|
|
|
|
}
|
|
|
|
err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
|
2015-06-06 00:02:32 -04:00
|
|
|
if err != nil {
|
2017-02-22 17:22:50 +02:00
|
|
|
utils.Fatalf("Export error: %v\n", err)
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
2018-03-26 13:34:21 +03:00
|
|
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// importPreimages imports preimage data from the specified file.
|
2023-11-22 14:48:25 +01:00
|
|
|
// it is deprecated, and the export function has been removed, but
|
|
|
|
// the import function is kept around for the time being so that
|
|
|
|
// older file formats can still be imported.
|
2018-03-26 13:34:21 +03:00
|
|
|
func importPreimages(ctx *cli.Context) error {
|
2022-06-27 18:22:36 +02:00
|
|
|
if ctx.Args().Len() < 1 {
|
2018-03-26 13:34:21 +03:00
|
|
|
utils.Fatalf("This command requires an argument.")
|
|
|
|
}
|
2020-08-06 09:24:36 +02:00
|
|
|
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 11:40:36 +01:00
|
|
|
defer stack.Close()
|
2018-03-26 13:34:21 +03:00
|
|
|
|
2022-07-05 11:14:21 +08:00
|
|
|
db := utils.MakeChainDatabase(ctx, stack, false, false)
|
2023-09-26 00:28:20 +08:00
|
|
|
defer db.Close()
|
2018-03-26 13:34:21 +03:00
|
|
|
start := time.Now()
|
2019-02-07 11:40:36 +01:00
|
|
|
|
2018-09-24 15:57:49 +03:00
|
|
|
if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
|
2018-09-19 05:29:40 -05:00
|
|
|
utils.Fatalf("Import error: %v\n", err)
|
2018-03-26 13:34:21 +03:00
|
|
|
}
|
2018-09-19 05:29:40 -05:00
|
|
|
fmt.Printf("Import done in %v\n", time.Since(start))
|
2018-03-26 13:34:21 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
|
|
|
|
if ctx.NArg() > 1 {
|
|
|
|
return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
|
|
|
|
}
|
2024-01-25 22:07:44 +08:00
|
|
|
|
|
|
|
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
|
|
|
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, common.Hash{}, err
|
|
|
|
}
|
|
|
|
if scheme == rawdb.PathScheme {
|
|
|
|
fmt.Println("You are using geth dump in path mode, please use `geth dump-roothash` command to get all available blocks.")
|
|
|
|
}
|
|
|
|
|
2024-03-08 16:07:29 +08:00
|
|
|
header := &types.Header{}
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
if ctx.NArg() == 1 {
|
|
|
|
arg := ctx.Args().First()
|
2015-05-27 13:43:49 +02:00
|
|
|
if hashish(arg) {
|
2021-03-23 02:06:30 +08:00
|
|
|
hash := common.HexToHash(arg)
|
2024-07-16 22:37:03 +08:00
|
|
|
if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
|
2021-03-23 02:06:30 +08:00
|
|
|
header = rawdb.ReadHeader(db, hash, *number)
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
} else {
|
|
|
|
return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
|
2021-03-23 02:06:30 +08:00
|
|
|
}
|
2015-05-27 13:43:49 +02:00
|
|
|
} else {
|
2022-08-19 01:03:45 -05:00
|
|
|
number, err := strconv.ParseUint(arg, 10, 64)
|
2015-10-06 16:35:55 +02:00
|
|
|
if err != nil {
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
return nil, nil, common.Hash{}, err
|
2015-10-06 16:35:55 +02:00
|
|
|
}
|
2022-08-19 01:03:45 -05:00
|
|
|
if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
|
|
|
|
header = rawdb.ReadHeader(db, hash, number)
|
2019-06-24 16:16:44 +02:00
|
|
|
} else {
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
|
2019-06-24 16:16:44 +02:00
|
|
|
}
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
} else {
|
|
|
|
// Use latest
|
2024-08-07 17:52:55 +08:00
|
|
|
// TODO:: if versa, scheme = VersaScheme
|
2024-01-25 22:07:44 +08:00
|
|
|
if scheme == rawdb.PathScheme {
|
2024-04-28 11:02:14 +08:00
|
|
|
triedb := triedb.NewDatabase(db, &triedb.Config{PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly)})
|
2024-01-25 22:07:44 +08:00
|
|
|
defer triedb.Close()
|
|
|
|
if stateRoot := triedb.Head(); stateRoot != (common.Hash{}) {
|
|
|
|
header.Root = stateRoot
|
|
|
|
} else {
|
2024-03-11 14:52:33 +08:00
|
|
|
return nil, nil, common.Hash{}, errors.New("no top state root hash in path db")
|
2024-01-25 22:07:44 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
header = rawdb.ReadHeadHeader(db)
|
|
|
|
}
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
}
|
|
|
|
if header == nil {
|
|
|
|
return nil, nil, common.Hash{}, errors.New("no head block found")
|
|
|
|
}
|
2024-01-25 22:07:44 +08:00
|
|
|
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
|
|
|
|
var start common.Hash
|
|
|
|
switch len(startArg) {
|
|
|
|
case 0: // common.Hash
|
|
|
|
case 32:
|
|
|
|
start = common.BytesToHash(startArg)
|
|
|
|
case 20:
|
|
|
|
start = crypto.Keccak256Hash(startArg)
|
|
|
|
log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
|
|
|
|
default:
|
|
|
|
return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
|
|
|
|
}
|
2024-01-25 22:07:44 +08:00
|
|
|
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
var conf = &state.DumpConfig{
|
|
|
|
SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name),
|
|
|
|
SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name),
|
|
|
|
OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
|
|
|
|
Start: start.Bytes(),
|
|
|
|
Max: ctx.Uint64(utils.DumpLimitFlag.Name),
|
|
|
|
}
|
2024-01-25 22:07:44 +08:00
|
|
|
conf.StateScheme = scheme
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
|
2024-01-25 22:07:44 +08:00
|
|
|
"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage, "start", hexutil.Encode(conf.Start),
|
|
|
|
"limit", conf.Max, "state scheme", conf.StateScheme)
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
return conf, db, header.Root, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func dump(ctx *cli.Context) error {
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
|
|
|
defer stack.Close()
|
|
|
|
|
|
|
|
conf, db, root, err := parseDumpConfig(ctx, stack)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-04-18 15:12:05 +08:00
|
|
|
defer db.Close()
|
2024-04-28 11:02:14 +08:00
|
|
|
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false) // always enable preimage lookup
|
all: activate pbss as experimental feature from eth (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
|
|
|
defer triedb.Close()
|
|
|
|
|
2024-08-07 17:52:55 +08:00
|
|
|
// TODO:: state.NewDatabase internally compatible with versa is sufficient.
|
all: activate pbss as experimental feature from eth (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
|
|
|
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
|
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump
* cmd/evm: dump API fixes
* cmd/geth, core, eth: fix some remaining errors
* cmd/evm: dump - add limit, support address startkey, address review concerns
* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-05-12 10:05:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if ctx.Bool(utils.IterativeOutputFlag.Name) {
|
|
|
|
state.IterativeDump(conf, json.NewEncoder(os.Stdout))
|
|
|
|
} else {
|
|
|
|
fmt.Println(string(state.Dump(conf)))
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
2016-06-10 11:23:00 +03:00
|
|
|
return nil
|
2015-05-27 13:43:49 +02:00
|
|
|
}
|
|
|
|
|
2024-01-25 22:07:44 +08:00
|
|
|
func dumpAllRootHashInPath(ctx *cli.Context) error {
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
|
|
|
defer stack.Close()
|
|
|
|
db := utils.MakeChainDatabase(ctx, stack, true, false)
|
|
|
|
defer db.Close()
|
2024-08-07 17:52:55 +08:00
|
|
|
// TODO:: ignore cmd
|
2024-04-28 11:02:14 +08:00
|
|
|
triedb := triedb.NewDatabase(db, &triedb.Config{PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly)})
|
2024-01-25 22:07:44 +08:00
|
|
|
defer triedb.Close()
|
|
|
|
|
|
|
|
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if scheme == rawdb.HashScheme {
|
|
|
|
return errors.New("incorrect state scheme, you should use it in path mode")
|
|
|
|
}
|
|
|
|
|
|
|
|
table := tablewriter.NewWriter(os.Stdout)
|
|
|
|
table.SetHeader([]string{"Block Number", "Block State Root Hash"})
|
|
|
|
table.AppendBulk(triedb.GetAllRooHash())
|
|
|
|
table.Render()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-27 13:43:49 +02:00
|
|
|
// hashish returns true for strings that look like hashes.
|
|
|
|
func hashish(x string) bool {
|
|
|
|
_, err := strconv.Atoi(x)
|
|
|
|
return err != nil
|
|
|
|
}
|