2015-07-07 03:54:22 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of go-ethereum.
|
|
|
|
//
|
|
|
|
// go-ethereum is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// go-ethereum is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 19:48:40 +03:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 03:54:22 +03:00
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2015-07-22 19:48:40 +03:00
|
|
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 03:54:22 +03:00
|
|
|
|
2015-05-27 14:43:49 +03:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-03-02 16:03:33 +03:00
|
|
|
"encoding/json"
|
2015-05-27 14:43:49 +03:00
|
|
|
"fmt"
|
|
|
|
"os"
|
2016-10-21 11:40:00 +03:00
|
|
|
"runtime"
|
2015-05-27 14:43:49 +03:00
|
|
|
"strconv"
|
2016-10-21 11:40:00 +03:00
|
|
|
"sync/atomic"
|
2015-05-27 14:43:49 +03:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2018-09-24 15:57:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-05-27 14:43:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2017-07-10 17:48:42 +03:00
|
|
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2017-02-22 15:10:07 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2020-04-07 11:23:57 +03:00
|
|
|
"github.com/ethereum/go-ethereum/metrics"
|
2019-05-13 15:28:01 +03:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2016-06-09 12:44:42 +03:00
|
|
|
"gopkg.in/urfave/cli.v1"
|
2015-05-27 14:43:49 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2016-11-30 14:34:24 +03:00
|
|
|
initCommand = cli.Command{
|
2017-05-02 11:55:45 +03:00
|
|
|
Action: utils.MigrateFlags(initGenesis),
|
2016-11-30 14:34:24 +03:00
|
|
|
Name: "init",
|
|
|
|
Usage: "Bootstrap and initialize a new genesis block",
|
|
|
|
ArgsUsage: "<genesisPath>",
|
2017-05-02 11:55:45 +03:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2016-11-30 14:34:24 +03:00
|
|
|
Description: `
|
|
|
|
The init command initializes a new genesis block and definition for the network.
|
|
|
|
This is a destructive action and changes the network in which you will be
|
|
|
|
participating.
|
2017-05-02 11:55:45 +03:00
|
|
|
|
|
|
|
It expects the genesis file as argument.`,
|
2020-02-04 13:49:13 +03:00
|
|
|
}
|
|
|
|
dumpGenesisCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(dumpGenesis),
|
|
|
|
Name: "dumpgenesis",
|
|
|
|
Usage: "Dumps genesis block JSON configuration to stdout",
|
|
|
|
ArgsUsage: "",
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
|
2016-11-30 14:34:24 +03:00
|
|
|
}
|
2015-05-27 14:43:49 +03:00
|
|
|
importCommand = cli.Command{
|
2017-05-02 11:55:45 +03:00
|
|
|
Action: utils.MigrateFlags(importChain),
|
2016-11-10 14:00:09 +03:00
|
|
|
Name: "import",
|
|
|
|
Usage: "Import a blockchain file",
|
2017-03-08 14:26:19 +03:00
|
|
|
ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
|
2017-05-02 11:55:45 +03:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 11:01:49 +03:00
|
|
|
utils.SyncModeFlag,
|
2018-02-15 11:16:59 +03:00
|
|
|
utils.GCModeFlag,
|
2020-01-19 22:57:56 +03:00
|
|
|
utils.SnapshotFlag,
|
2018-02-15 11:16:59 +03:00
|
|
|
utils.CacheDatabaseFlag,
|
|
|
|
utils.CacheGCFlag,
|
2020-04-07 11:23:57 +03:00
|
|
|
utils.MetricsEnabledFlag,
|
|
|
|
utils.MetricsEnabledExpensiveFlag,
|
2020-07-03 20:12:22 +03:00
|
|
|
utils.MetricsHTTPFlag,
|
|
|
|
utils.MetricsPortFlag,
|
2020-04-07 11:23:57 +03:00
|
|
|
utils.MetricsEnableInfluxDBFlag,
|
|
|
|
utils.MetricsInfluxDBEndpointFlag,
|
|
|
|
utils.MetricsInfluxDBDatabaseFlag,
|
|
|
|
utils.MetricsInfluxDBUsernameFlag,
|
|
|
|
utils.MetricsInfluxDBPasswordFlag,
|
|
|
|
utils.MetricsInfluxDBTagsFlag,
|
2020-05-11 18:58:43 +03:00
|
|
|
utils.TxLookupLimitFlag,
|
2017-05-02 11:55:45 +03:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2016-11-10 14:00:09 +03:00
|
|
|
Description: `
|
2017-05-03 13:35:47 +03:00
|
|
|
The import command imports blocks from an RLP-encoded form. The form can be one file
|
|
|
|
with several RLP-encoded blocks, or several files can be used.
|
2017-05-02 11:55:45 +03:00
|
|
|
|
2017-07-10 17:48:42 +03:00
|
|
|
If only one file is used, import error will result in failure. If several files are used,
|
2017-05-02 11:55:45 +03:00
|
|
|
processing will proceed even if an individual RLP-file import failure occurs.`,
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
|
|
|
exportCommand = cli.Command{
|
2017-05-02 11:55:45 +03:00
|
|
|
Action: utils.MigrateFlags(exportChain),
|
2016-11-10 14:00:09 +03:00
|
|
|
Name: "export",
|
|
|
|
Usage: "Export blockchain into file",
|
|
|
|
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
|
2017-05-02 11:55:45 +03:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 11:01:49 +03:00
|
|
|
utils.SyncModeFlag,
|
2017-05-02 11:55:45 +03:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2015-06-06 07:02:32 +03:00
|
|
|
Description: `
|
|
|
|
Requires a first argument of the file to write to.
|
|
|
|
Optional second and third arguments control the first and
|
|
|
|
last block to write. In this mode, the file will be appended
|
2018-07-26 14:26:24 +03:00
|
|
|
if already existing. If the file ends with .gz, the output will
|
|
|
|
be gzipped.`,
|
2018-03-26 13:34:21 +03:00
|
|
|
}
|
|
|
|
importPreimagesCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(importPreimages),
|
|
|
|
Name: "import-preimages",
|
|
|
|
Usage: "Import the preimage database from an RLP stream",
|
|
|
|
ArgsUsage: "<datafile>",
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 11:01:49 +03:00
|
|
|
utils.SyncModeFlag,
|
2018-03-26 13:34:21 +03:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The import-preimages command imports hash preimages from an RLP encoded stream.`,
|
|
|
|
}
|
|
|
|
exportPreimagesCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(exportPreimages),
|
|
|
|
Name: "export-preimages",
|
|
|
|
Usage: "Export the preimage database into an RLP stream",
|
|
|
|
ArgsUsage: "<dumpfile>",
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 11:01:49 +03:00
|
|
|
utils.SyncModeFlag,
|
2018-03-26 13:34:21 +03:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The export-preimages command export hash preimages to an RLP encoded stream`,
|
2017-07-10 17:48:42 +03:00
|
|
|
}
|
|
|
|
copydbCommand = cli.Command{
|
|
|
|
Action: utils.MigrateFlags(copyDb),
|
|
|
|
Name: "copydb",
|
2017-10-10 15:51:09 +03:00
|
|
|
Usage: "Create a local chain from a target chaindata folder",
|
|
|
|
ArgsUsage: "<sourceChaindataDir>",
|
2017-07-10 17:48:42 +03:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
|
|
|
utils.SyncModeFlag,
|
|
|
|
utils.FakePoWFlag,
|
2021-01-05 16:31:23 +03:00
|
|
|
utils.MainnetFlag,
|
2020-04-09 12:09:58 +03:00
|
|
|
utils.RopstenFlag,
|
2017-10-10 15:51:09 +03:00
|
|
|
utils.RinkebyFlag,
|
2020-05-11 18:58:43 +03:00
|
|
|
utils.TxLookupLimitFlag,
|
2020-04-09 12:09:58 +03:00
|
|
|
utils.GoerliFlag,
|
2021-01-28 23:19:07 +03:00
|
|
|
utils.YoloV3Flag,
|
2020-04-09 12:09:58 +03:00
|
|
|
utils.LegacyTestnetFlag,
|
2017-07-10 17:48:42 +03:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
The first argument must be the directory containing the blockchain to download from`,
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
|
|
|
dumpCommand = cli.Command{
|
2017-05-02 11:55:45 +03:00
|
|
|
Action: utils.MigrateFlags(dump),
|
2016-11-10 14:00:09 +03:00
|
|
|
Name: "dump",
|
|
|
|
Usage: "Dump a specific block from storage",
|
|
|
|
ArgsUsage: "[<blockHash> | <blockNum>]...",
|
2017-05-02 11:55:45 +03:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
utils.DataDirFlag,
|
|
|
|
utils.CacheFlag,
|
2018-08-15 11:01:49 +03:00
|
|
|
utils.SyncModeFlag,
|
2019-06-24 17:16:44 +03:00
|
|
|
utils.IterativeOutputFlag,
|
|
|
|
utils.ExcludeCodeFlag,
|
|
|
|
utils.ExcludeStorageFlag,
|
|
|
|
utils.IncludeIncompletesFlag,
|
2017-05-02 11:55:45 +03:00
|
|
|
},
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2015-05-27 14:43:49 +03:00
|
|
|
Description: `
|
|
|
|
The arguments are interpreted as block numbers or hashes.
|
2017-05-02 11:55:45 +03:00
|
|
|
Use "ethereum dump 0" to dump the genesis block.`,
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2016-11-30 14:34:24 +03:00
|
|
|
// initGenesis will initialise the given JSON format genesis file and writes it as
|
|
|
|
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
|
|
|
func initGenesis(ctx *cli.Context) error {
|
2017-05-03 13:35:47 +03:00
|
|
|
// Make sure we have a valid genesis JSON
|
2016-11-30 14:34:24 +03:00
|
|
|
genesisPath := ctx.Args().First()
|
|
|
|
if len(genesisPath) == 0 {
|
2017-05-03 13:35:47 +03:00
|
|
|
utils.Fatalf("Must supply path to genesis JSON file")
|
2016-11-30 14:34:24 +03:00
|
|
|
}
|
2017-03-02 16:03:33 +03:00
|
|
|
file, err := os.Open(genesisPath)
|
2016-11-30 14:34:24 +03:00
|
|
|
if err != nil {
|
2017-05-03 13:35:47 +03:00
|
|
|
utils.Fatalf("Failed to read genesis file: %v", err)
|
2016-11-30 14:34:24 +03:00
|
|
|
}
|
2017-03-02 16:03:33 +03:00
|
|
|
defer file.Close()
|
2016-11-30 14:34:24 +03:00
|
|
|
|
2017-03-02 16:03:33 +03:00
|
|
|
genesis := new(core.Genesis)
|
|
|
|
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
|
|
|
utils.Fatalf("invalid genesis file: %v", err)
|
|
|
|
}
|
2020-08-06 10:24:36 +03:00
|
|
|
// Open and initialise both full and light databases
|
2020-08-03 20:40:46 +03:00
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 13:40:36 +03:00
|
|
|
defer stack.Close()
|
|
|
|
|
2017-05-03 13:35:47 +03:00
|
|
|
for _, name := range []string{"chaindata", "lightchaindata"} {
|
2018-09-24 15:57:49 +03:00
|
|
|
chaindb, err := stack.OpenDatabase(name, 0, 0, "")
|
2017-05-03 13:35:47 +03:00
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to open database: %v", err)
|
|
|
|
}
|
|
|
|
_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to write genesis block: %v", err)
|
|
|
|
}
|
2018-09-24 15:57:49 +03:00
|
|
|
chaindb.Close()
|
2017-05-03 13:35:47 +03:00
|
|
|
log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
|
2016-11-30 14:34:24 +03:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-04 13:49:13 +03:00
|
|
|
func dumpGenesis(ctx *cli.Context) error {
|
|
|
|
genesis := utils.MakeGenesis(ctx)
|
|
|
|
if genesis == nil {
|
|
|
|
genesis = core.DefaultGenesisBlock()
|
|
|
|
}
|
|
|
|
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
|
|
|
|
utils.Fatalf("could not encode genesis")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-10 11:23:00 +03:00
|
|
|
func importChain(ctx *cli.Context) error {
|
2017-03-08 14:26:19 +03:00
|
|
|
if len(ctx.Args()) < 1 {
|
2017-02-22 18:22:50 +03:00
|
|
|
utils.Fatalf("This command requires an argument.")
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
2020-04-07 11:23:57 +03:00
|
|
|
// Start metrics export if enabled
|
|
|
|
utils.SetupMetrics(ctx)
|
|
|
|
// Start system runtime metrics collection
|
|
|
|
go metrics.CollectProcessMetrics(3 * time.Second)
|
2020-08-06 08:02:05 +03:00
|
|
|
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 13:40:36 +03:00
|
|
|
defer stack.Close()
|
|
|
|
|
2020-05-11 18:58:43 +03:00
|
|
|
chain, db := utils.MakeChain(ctx, stack, false)
|
2018-09-24 15:57:49 +03:00
|
|
|
defer db.Close()
|
2016-10-18 13:45:16 +03:00
|
|
|
|
2016-10-21 11:40:00 +03:00
|
|
|
// Start periodically gathering memory profiles
|
|
|
|
var peakMemAlloc, peakMemSys uint64
|
|
|
|
go func() {
|
|
|
|
stats := new(runtime.MemStats)
|
|
|
|
for {
|
|
|
|
runtime.ReadMemStats(stats)
|
|
|
|
if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
|
|
|
|
atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
|
|
|
|
}
|
|
|
|
if atomic.LoadUint64(&peakMemSys) < stats.Sys {
|
|
|
|
atomic.StoreUint64(&peakMemSys, stats.Sys)
|
|
|
|
}
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
}
|
|
|
|
}()
|
2016-10-18 13:45:16 +03:00
|
|
|
// Import the chain
|
2015-05-27 14:43:49 +03:00
|
|
|
start := time.Now()
|
2017-03-08 14:26:19 +03:00
|
|
|
|
2020-06-24 23:01:58 +03:00
|
|
|
var importErr error
|
|
|
|
|
2017-03-08 14:26:19 +03:00
|
|
|
if len(ctx.Args()) == 1 {
|
|
|
|
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
|
2020-06-24 23:01:58 +03:00
|
|
|
importErr = err
|
2018-02-05 19:40:32 +03:00
|
|
|
log.Error("Import error", "err", err)
|
2017-03-08 14:26:19 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, arg := range ctx.Args() {
|
|
|
|
if err := utils.ImportChain(chain, arg); err != nil {
|
2020-06-24 23:01:58 +03:00
|
|
|
importErr = err
|
2017-03-08 14:26:19 +03:00
|
|
|
log.Error("Import error", "file", arg, "err", err)
|
|
|
|
}
|
|
|
|
}
|
2015-05-27 17:02:08 +03:00
|
|
|
}
|
2018-02-05 19:40:32 +03:00
|
|
|
chain.Stop()
|
2016-10-21 11:40:00 +03:00
|
|
|
fmt.Printf("Import done in %v.\n\n", time.Since(start))
|
2016-10-18 13:45:16 +03:00
|
|
|
|
2016-10-21 11:40:00 +03:00
|
|
|
// Output pre-compaction stats mostly to see the import trashing
|
2021-02-23 13:27:32 +03:00
|
|
|
showLeveldbStats(db)
|
2018-03-08 15:59:00 +03:00
|
|
|
|
2016-10-21 11:40:00 +03:00
|
|
|
// Print the memory statistics used by the importing
|
|
|
|
mem := new(runtime.MemStats)
|
|
|
|
runtime.ReadMemStats(mem)
|
|
|
|
|
|
|
|
fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
|
|
|
|
fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
|
|
|
|
fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
|
|
|
|
fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
|
|
|
|
|
2019-06-24 17:16:44 +03:00
|
|
|
if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
|
2017-03-08 14:26:19 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-21 11:40:00 +03:00
|
|
|
// Compact the entire database to more accurately measure disk io and print the stats
|
|
|
|
start = time.Now()
|
|
|
|
fmt.Println("Compacting entire database...")
|
2021-02-23 13:27:32 +03:00
|
|
|
if err := db.Compact(nil, nil); err != nil {
|
2017-02-22 18:22:50 +03:00
|
|
|
utils.Fatalf("Compaction failed: %v", err)
|
2016-10-18 13:45:16 +03:00
|
|
|
}
|
2016-10-21 11:40:00 +03:00
|
|
|
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
|
|
|
|
2021-02-23 13:27:32 +03:00
|
|
|
showLeveldbStats(db)
|
2020-06-24 23:01:58 +03:00
|
|
|
return importErr
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
|
|
|
|
2016-06-10 11:23:00 +03:00
|
|
|
func exportChain(ctx *cli.Context) error {
|
2015-06-06 16:50:23 +03:00
|
|
|
if len(ctx.Args()) < 1 {
|
2017-02-22 18:22:50 +03:00
|
|
|
utils.Fatalf("This command requires an argument.")
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
2020-08-06 08:02:05 +03:00
|
|
|
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 13:40:36 +03:00
|
|
|
defer stack.Close()
|
|
|
|
|
2020-05-11 18:58:43 +03:00
|
|
|
chain, _ := utils.MakeChain(ctx, stack, true)
|
2015-05-27 14:43:49 +03:00
|
|
|
start := time.Now()
|
2015-06-06 07:02:32 +03:00
|
|
|
|
|
|
|
var err error
|
2015-06-06 17:04:13 +03:00
|
|
|
fp := ctx.Args().First()
|
2015-06-06 07:02:32 +03:00
|
|
|
if len(ctx.Args()) < 3 {
|
2015-06-06 17:04:13 +03:00
|
|
|
err = utils.ExportChain(chain, fp)
|
2015-06-06 07:02:32 +03:00
|
|
|
} else {
|
|
|
|
// This can be improved to allow for numbers larger than 9223372036854775807
|
|
|
|
first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
|
|
|
|
last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
|
|
|
|
if ferr != nil || lerr != nil {
|
2017-02-22 18:22:50 +03:00
|
|
|
utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
|
2015-06-06 07:02:32 +03:00
|
|
|
}
|
2015-06-06 17:04:13 +03:00
|
|
|
if first < 0 || last < 0 {
|
2017-02-22 18:22:50 +03:00
|
|
|
utils.Fatalf("Export error: block number must be greater than 0\n")
|
2015-06-06 17:04:13 +03:00
|
|
|
}
|
|
|
|
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
2015-06-06 07:02:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2017-02-22 18:22:50 +03:00
|
|
|
utils.Fatalf("Export error: %v\n", err)
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
2018-03-26 13:34:21 +03:00
|
|
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// importPreimages imports preimage data from the specified file.
|
|
|
|
func importPreimages(ctx *cli.Context) error {
|
|
|
|
if len(ctx.Args()) < 1 {
|
|
|
|
utils.Fatalf("This command requires an argument.")
|
|
|
|
}
|
2020-08-06 10:24:36 +03:00
|
|
|
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 13:40:36 +03:00
|
|
|
defer stack.Close()
|
2018-03-26 13:34:21 +03:00
|
|
|
|
2018-09-24 15:57:49 +03:00
|
|
|
db := utils.MakeChainDatabase(ctx, stack)
|
2018-03-26 13:34:21 +03:00
|
|
|
start := time.Now()
|
2019-02-07 13:40:36 +03:00
|
|
|
|
2018-09-24 15:57:49 +03:00
|
|
|
if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
|
2018-09-19 13:29:40 +03:00
|
|
|
utils.Fatalf("Import error: %v\n", err)
|
2018-03-26 13:34:21 +03:00
|
|
|
}
|
2018-09-19 13:29:40 +03:00
|
|
|
fmt.Printf("Import done in %v\n", time.Since(start))
|
2018-03-26 13:34:21 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// exportPreimages dumps the preimage data to specified json file in streaming way.
|
|
|
|
func exportPreimages(ctx *cli.Context) error {
|
|
|
|
if len(ctx.Args()) < 1 {
|
|
|
|
utils.Fatalf("This command requires an argument.")
|
|
|
|
}
|
2020-08-06 10:24:36 +03:00
|
|
|
|
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 13:40:36 +03:00
|
|
|
defer stack.Close()
|
2018-03-26 13:34:21 +03:00
|
|
|
|
2018-09-24 15:57:49 +03:00
|
|
|
db := utils.MakeChainDatabase(ctx, stack)
|
2018-03-26 13:34:21 +03:00
|
|
|
start := time.Now()
|
2019-02-07 13:40:36 +03:00
|
|
|
|
2018-09-24 15:57:49 +03:00
|
|
|
if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
|
2018-03-26 13:34:21 +03:00
|
|
|
utils.Fatalf("Export error: %v\n", err)
|
|
|
|
}
|
|
|
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
2016-06-10 11:23:00 +03:00
|
|
|
return nil
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
|
|
|
|
2017-07-10 17:48:42 +03:00
|
|
|
func copyDb(ctx *cli.Context) error {
|
2017-10-10 15:51:09 +03:00
|
|
|
// Ensure we have a source chain directory to copy
|
2019-03-08 16:56:20 +03:00
|
|
|
if len(ctx.Args()) < 1 {
|
2017-10-10 15:51:09 +03:00
|
|
|
utils.Fatalf("Source chaindata directory path argument missing")
|
2017-07-10 17:48:42 +03:00
|
|
|
}
|
2019-03-08 16:56:20 +03:00
|
|
|
if len(ctx.Args()) < 2 {
|
|
|
|
utils.Fatalf("Source ancient chain directory path argument missing")
|
|
|
|
}
|
2017-10-10 15:51:09 +03:00
|
|
|
// Initialize a new chain for the running node to sync into
|
2020-08-06 10:24:36 +03:00
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 13:40:36 +03:00
|
|
|
defer stack.Close()
|
2017-07-10 17:48:42 +03:00
|
|
|
|
2020-05-11 18:58:43 +03:00
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack, false)
|
2019-05-13 15:28:01 +03:00
|
|
|
syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
|
|
|
|
|
|
|
|
var syncBloom *trie.SyncBloom
|
|
|
|
if syncMode == downloader.FastSync {
|
|
|
|
syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
|
|
|
|
}
|
|
|
|
dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
|
2017-07-10 17:48:42 +03:00
|
|
|
|
2017-10-10 15:51:09 +03:00
|
|
|
// Create a source peer to satisfy downloader requests from
|
2019-03-08 16:56:20 +03:00
|
|
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
|
2017-07-10 17:48:42 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-10 15:51:09 +03:00
|
|
|
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
|
2017-07-10 17:48:42 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-10 15:51:09 +03:00
|
|
|
peer := downloader.NewFakePeer("local", db, hc, dl)
|
|
|
|
if err = dl.RegisterPeer("local", 63, peer); err != nil {
|
2017-07-10 17:48:42 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-10-10 15:51:09 +03:00
|
|
|
// Synchronise with the simulated peer
|
|
|
|
start := time.Now()
|
2017-07-10 17:48:42 +03:00
|
|
|
|
|
|
|
currentHeader := hc.CurrentHeader()
|
2019-05-13 15:28:01 +03:00
|
|
|
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
|
2017-07-10 17:48:42 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for dl.Synchronising() {
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
}
|
2017-10-10 15:51:09 +03:00
|
|
|
fmt.Printf("Database copy done in %v\n", time.Since(start))
|
2017-07-10 17:48:42 +03:00
|
|
|
|
2017-10-10 15:51:09 +03:00
|
|
|
// Compact the entire database to remove any sync overhead
|
2017-07-10 17:48:42 +03:00
|
|
|
start = time.Now()
|
|
|
|
fmt.Println("Compacting entire database...")
|
2018-09-24 15:57:49 +03:00
|
|
|
if err = db.Compact(nil, nil); err != nil {
|
2017-07-10 17:48:42 +03:00
|
|
|
utils.Fatalf("Compaction failed: %v", err)
|
|
|
|
}
|
|
|
|
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-10 11:23:00 +03:00
|
|
|
func dump(ctx *cli.Context) error {
|
2020-08-06 10:24:36 +03:00
|
|
|
stack, _ := makeConfigNode(ctx)
|
2019-02-07 13:40:36 +03:00
|
|
|
defer stack.Close()
|
|
|
|
|
2020-05-11 18:58:43 +03:00
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack, true)
|
2019-06-24 17:16:44 +03:00
|
|
|
defer chainDb.Close()
|
2015-05-27 14:43:49 +03:00
|
|
|
for _, arg := range ctx.Args() {
|
|
|
|
var block *types.Block
|
|
|
|
if hashish(arg) {
|
2016-04-05 16:22:04 +03:00
|
|
|
block = chain.GetBlockByHash(common.HexToHash(arg))
|
2015-05-27 14:43:49 +03:00
|
|
|
} else {
|
|
|
|
num, _ := strconv.Atoi(arg)
|
|
|
|
block = chain.GetBlockByNumber(uint64(num))
|
|
|
|
}
|
|
|
|
if block == nil {
|
|
|
|
fmt.Println("{}")
|
2017-02-22 18:22:50 +03:00
|
|
|
utils.Fatalf("block not found")
|
2015-05-27 14:43:49 +03:00
|
|
|
} else {
|
2019-08-06 13:40:28 +03:00
|
|
|
state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
|
2015-10-06 17:35:55 +03:00
|
|
|
if err != nil {
|
2017-02-22 18:22:50 +03:00
|
|
|
utils.Fatalf("could not create new state: %v", err)
|
2015-10-06 17:35:55 +03:00
|
|
|
}
|
2019-06-24 17:16:44 +03:00
|
|
|
excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
|
|
|
|
excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
|
|
|
|
includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
|
|
|
|
if ctx.Bool(utils.IterativeOutputFlag.Name) {
|
|
|
|
state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
|
|
|
|
} else {
|
|
|
|
if includeMissing {
|
|
|
|
fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
|
|
|
|
" otherwise the accounts will overwrite each other in the resulting mapping.")
|
|
|
|
}
|
|
|
|
fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
|
|
|
|
}
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
|
|
|
}
|
2016-06-10 11:23:00 +03:00
|
|
|
return nil
|
2015-05-27 14:43:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// hashish returns true for strings that look like hashes.
|
|
|
|
func hashish(x string) bool {
|
|
|
|
_, err := strconv.Atoi(x)
|
|
|
|
return err != nil
|
|
|
|
}
|