2022-05-24 20:39:40 +02:00
// Copyright 2021 The go-ethereum Authors
2021-02-23 11:27:32 +01:00
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
2021-11-02 18:31:45 +08:00
"bytes"
2021-02-23 11:27:32 +01:00
"fmt"
"os"
2021-11-02 18:31:45 +08:00
"os/signal"
2021-02-23 11:27:32 +01:00
"path/filepath"
2024-10-31 19:26:02 +02:00
"slices"
2021-03-30 13:57:21 +02:00
"strconv"
2021-11-02 18:31:45 +08:00
"strings"
"syscall"
2021-02-23 11:27:32 +01:00
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
2022-01-18 11:30:41 +01:00
"github.com/ethereum/go-ethereum/core/state/snapshot"
2024-03-22 20:12:10 +08:00
"github.com/ethereum/go-ethereum/core/types"
2022-05-17 13:01:46 +02:00
"github.com/ethereum/go-ethereum/crypto"
2021-02-23 11:27:32 +01:00
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
2024-03-22 20:12:10 +08:00
"github.com/ethereum/go-ethereum/rlp"
2021-03-30 13:57:21 +02:00
"github.com/ethereum/go-ethereum/trie"
2024-03-22 20:12:10 +08:00
"github.com/ethereum/go-ethereum/triedb"
2022-01-18 11:30:41 +01:00
"github.com/olekukonko/tablewriter"
2022-06-27 18:22:36 +02:00
"github.com/urfave/cli/v2"
2021-02-23 11:27:32 +01:00
)
var (
2024-01-09 08:56:01 +01:00
removeStateDataFlag = & cli . BoolFlag {
Name : "remove.state" ,
Usage : "If set, selects the state data for removal" ,
}
removeChainDataFlag = & cli . BoolFlag {
Name : "remove.chain" ,
Usage : "If set, selects the state data for removal" ,
}
2022-06-27 18:22:36 +02:00
removedbCommand = & cli . Command {
Action : removeDB ,
2021-02-23 11:27:32 +01:00
Name : "removedb" ,
Usage : "Remove blockchain and state databases" ,
ArgsUsage : "" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( utils . DatabaseFlags ,
2024-01-09 08:56:01 +01:00
[ ] cli . Flag { removeStateDataFlag , removeChainDataFlag } ) ,
2021-02-23 11:27:32 +01:00
Description : `
Remove blockchain and state databases ` ,
}
2022-06-27 18:22:36 +02:00
dbCommand = & cli . Command {
2021-02-23 11:27:32 +01:00
Name : "db" ,
Usage : "Low level database operations" ,
ArgsUsage : "" ,
2022-06-27 18:22:36 +02:00
Subcommands : [ ] * cli . Command {
2021-02-23 11:27:32 +01:00
dbInspectCmd ,
dbStatCmd ,
dbCompactCmd ,
dbGetCmd ,
dbDeleteCmd ,
dbPutCmd ,
2021-03-30 13:57:21 +02:00
dbGetSlotsCmd ,
2021-04-13 15:45:30 +02:00
dbDumpFreezerIndex ,
2021-11-02 18:31:45 +08:00
dbImportCmd ,
dbExportCmd ,
2022-01-18 11:30:41 +01:00
dbMetadataCmd ,
2022-05-17 13:01:46 +02:00
dbCheckStateContentCmd ,
2024-03-22 20:12:10 +08:00
dbInspectHistoryCmd ,
2021-02-23 11:27:32 +01:00
} ,
}
2022-06-27 18:22:36 +02:00
dbInspectCmd = & cli . Command {
Action : inspect ,
2021-02-23 11:27:32 +01:00
Name : "inspect" ,
ArgsUsage : "<prefix> <start>" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-03-23 02:06:30 +08:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2021-02-23 11:27:32 +01:00
Usage : "Inspect the storage size for each type of data in the database" ,
Description : ` This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data. ` ,
}
2022-06-27 18:22:36 +02:00
dbCheckStateContentCmd = & cli . Command {
Action : checkStateContent ,
2022-05-17 13:01:46 +02:00
Name : "check-state-content" ,
ArgsUsage : "<start (optional)>" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( utils . NetworkFlags , utils . DatabaseFlags ) ,
2022-05-17 13:01:46 +02:00
Usage : "Verify that state data is cryptographically correct" ,
Description : ` This command iterates the entire database for 32 - byte keys , looking for rlp - encoded trie nodes .
For each trie node encountered , it checks that the key corresponds to the keccak256 ( value ) . If this is not true , this indicates
a data corruption . ` ,
}
2022-06-27 18:22:36 +02:00
dbStatCmd = & cli . Command {
Action : dbStats ,
2021-02-23 11:27:32 +01:00
Name : "stats" ,
Usage : "Print leveldb statistics" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-03-23 02:06:30 +08:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2021-02-23 11:27:32 +01:00
}
2022-06-27 18:22:36 +02:00
dbCompactCmd = & cli . Command {
Action : dbCompact ,
2021-02-23 11:27:32 +01:00
Name : "compact" ,
Usage : "Compact leveldb database. WARNING: May take a very long time" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-03-23 02:06:30 +08:00
utils . SyncModeFlag ,
utils . CacheFlag ,
utils . CacheDatabaseFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2023-11-10 20:26:13 +08:00
Description : ` This command performs a database compaction .
2021-02-23 11:27:32 +01:00
WARNING : This operation may take a very long time to finish , and may cause database
corruption if it is aborted during execution ' ! ` ,
}
2022-06-27 18:22:36 +02:00
dbGetCmd = & cli . Command {
Action : dbGet ,
2021-03-23 02:06:30 +08:00
Name : "get" ,
Usage : "Show the value of a database key" ,
ArgsUsage : "<hex-encoded key>" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-03-23 02:06:30 +08:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2021-02-23 11:27:32 +01:00
Description : "This command looks up the specified database key from the database." ,
}
2022-06-27 18:22:36 +02:00
dbDeleteCmd = & cli . Command {
Action : dbDelete ,
2021-02-23 11:27:32 +01:00
Name : "delete" ,
Usage : "Delete a database key (WARNING: may corrupt your database)" ,
ArgsUsage : "<hex-encoded key>" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-03-23 02:06:30 +08:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2023-11-10 20:26:13 +08:00
Description : ` This command deletes the specified database key from the database .
2021-02-23 11:27:32 +01:00
WARNING : This is a low - level operation which may cause database corruption ! ` ,
}
2022-06-27 18:22:36 +02:00
dbPutCmd = & cli . Command {
Action : dbPut ,
2021-02-23 11:27:32 +01:00
Name : "put" ,
Usage : "Set the value of a database key (WARNING: may corrupt your database)" ,
ArgsUsage : "<hex-encoded key> <hex-encoded value>" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-03-23 02:06:30 +08:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2023-11-10 20:26:13 +08:00
Description : ` This command sets a given database key to the given value .
2021-02-23 11:27:32 +01:00
WARNING : This is a low - level operation which may cause database corruption ! ` ,
}
2022-06-27 18:22:36 +02:00
dbGetSlotsCmd = & cli . Command {
Action : dbDumpTrie ,
2021-03-30 13:57:21 +02:00
Name : "dumptrie" ,
Usage : "Show the storage key/values of a given storage trie" ,
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
ArgsUsage : "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-03-30 13:57:21 +02:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2021-03-30 13:57:21 +02:00
Description : "This command looks up the specified database key from the database." ,
}
2022-06-27 18:22:36 +02:00
dbDumpFreezerIndex = & cli . Command {
Action : freezerInspect ,
2021-04-13 15:45:30 +02:00
Name : "freezer-index" ,
2022-08-08 17:08:36 +08:00
Usage : "Dump out the index of a specific freezer table" ,
ArgsUsage : "<freezer-type> <table-type> <start (int)> <end (int)>" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-04-13 15:45:30 +02:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2021-04-13 15:45:30 +02:00
Description : "This command displays information about the freezer index." ,
}
2022-06-27 18:22:36 +02:00
dbImportCmd = & cli . Command {
Action : importLDBdata ,
2021-11-02 18:31:45 +08:00
Name : "import" ,
Usage : "Imports leveldb-data from an exported RLP dump." ,
ArgsUsage : "<dumpfile> <start (optional)" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-11-02 18:31:45 +08:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2021-11-02 18:31:45 +08:00
Description : "The import command imports the specific chain data from an RLP encoded stream." ,
}
2022-06-27 18:22:36 +02:00
dbExportCmd = & cli . Command {
Action : exportChaindata ,
2021-11-02 18:31:45 +08:00
Name : "export" ,
Usage : "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used." ,
ArgsUsage : "<type> <dumpfile>" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2021-11-02 18:31:45 +08:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2021-11-02 18:31:45 +08:00
Description : "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed." ,
}
2022-06-27 18:22:36 +02:00
dbMetadataCmd = & cli . Command {
Action : showMetaData ,
2022-01-18 11:30:41 +01:00
Name : "metadata" ,
Usage : "Shows metadata about the chain status." ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2022-01-18 11:30:41 +01:00
utils . SyncModeFlag ,
2023-09-22 14:56:48 +03:00
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
2022-01-18 11:30:41 +01:00
Description : "Shows metadata about the chain status." ,
}
2024-03-22 20:12:10 +08:00
dbInspectHistoryCmd = & cli . Command {
Action : inspectHistory ,
Name : "inspect-history" ,
Usage : "Inspect the state history within block range" ,
ArgsUsage : "<address> [OPTIONAL <storage-slot>]" ,
2024-10-31 19:26:02 +02:00
Flags : slices . Concat ( [ ] cli . Flag {
2024-03-22 20:12:10 +08:00
utils . SyncModeFlag ,
& cli . Uint64Flag {
Name : "start" ,
Usage : "block number of the range start, zero means earliest history" ,
} ,
& cli . Uint64Flag {
Name : "end" ,
Usage : "block number of the range end(included), zero means latest history" ,
} ,
& cli . BoolFlag {
Name : "raw" ,
Usage : "display the decoded raw state value (otherwise shows rlp-encoded value)" ,
} ,
} , utils . NetworkFlags , utils . DatabaseFlags ) ,
Description : "This command queries the history of the account or storage slot within the specified block range" ,
}
2021-02-23 11:27:32 +01:00
)
func removeDB ( ctx * cli . Context ) error {
stack , config := makeConfigNode ( ctx )
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
// Resolve folder paths.
var (
rootDir = stack . ResolvePath ( "chaindata" )
ancientDir = config . Eth . DatabaseFreezer
)
2021-02-23 11:27:32 +01:00
switch {
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
case ancientDir == "" :
ancientDir = filepath . Join ( stack . ResolvePath ( "chaindata" ) , "ancient" )
case ! filepath . IsAbs ( ancientDir ) :
ancientDir = config . Node . ResolvePath ( ancientDir )
2021-02-23 11:27:32 +01:00
}
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
// Delete state data
2024-04-30 22:25:35 +08:00
statePaths := [ ] string {
rootDir ,
2024-07-16 21:17:58 +08:00
filepath . Join ( ancientDir , rawdb . MerkleStateFreezerName ) ,
filepath . Join ( ancientDir , rawdb . VerkleStateFreezerName ) ,
2024-04-30 22:25:35 +08:00
}
2024-01-09 08:56:01 +01:00
confirmAndRemoveDB ( statePaths , "state data" , ctx , removeStateDataFlag . Name )
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
// Delete ancient chain
2024-04-30 22:25:35 +08:00
chainPaths := [ ] string { filepath . Join (
ancientDir ,
rawdb . ChainFreezerName ,
) }
2024-01-09 08:56:01 +01:00
confirmAndRemoveDB ( chainPaths , "ancient chain" , ctx , removeChainDataFlag . Name )
2021-02-23 11:27:32 +01:00
return nil
}
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
// removeFolder deletes all files (not folders) inside the directory 'dir' (but
// not files in subfolders).
func removeFolder ( dir string ) {
filepath . Walk ( dir , func ( path string , info os . FileInfo , err error ) error {
// If we're at the top level folder, recurse into
if path == dir {
return nil
}
// Delete all the files, but not subfolders
if ! info . IsDir ( ) {
os . Remove ( path )
return nil
}
return filepath . SkipDir
} )
}
2021-02-23 11:27:32 +01:00
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
// list of folders if accepted.
2024-01-09 08:56:01 +01:00
func confirmAndRemoveDB ( paths [ ] string , kind string , ctx * cli . Context , removeFlagName string ) {
var (
confirm bool
err error
)
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
msg := fmt . Sprintf ( "Location(s) of '%s': \n" , kind )
for _ , path := range paths {
msg += fmt . Sprintf ( "\t- %s\n" , path )
}
fmt . Println ( msg )
2024-01-09 08:56:01 +01:00
if ctx . IsSet ( removeFlagName ) {
confirm = ctx . Bool ( removeFlagName )
if confirm {
fmt . Printf ( "Remove '%s'? [y/n] y\n" , kind )
} else {
fmt . Printf ( "Remove '%s'? [y/n] n\n" , kind )
}
} else {
confirm , err = prompt . Stdin . PromptConfirm ( fmt . Sprintf ( "Remove '%s'?" , kind ) )
}
2021-02-23 11:27:32 +01:00
switch {
case err != nil :
utils . Fatalf ( "%v" , err )
case ! confirm :
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
log . Info ( "Database deletion skipped" , "kind" , kind , "paths" , paths )
2021-02-23 11:27:32 +01:00
default :
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
var (
deleted [ ] string
start = time . Now ( )
)
for _ , path := range paths {
if common . FileExist ( path ) {
removeFolder ( path )
deleted = append ( deleted , path )
} else {
log . Info ( "Folder is not existent" , "path" , path )
2021-02-23 11:27:32 +01:00
}
core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS <martin@swende.se>
2023-12-22 03:28:32 +08:00
}
log . Info ( "Database successfully deleted" , "kind" , kind , "paths" , deleted , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
2021-02-23 11:27:32 +01:00
}
}
func inspect ( ctx * cli . Context ) error {
var (
prefix [ ] byte
start [ ] byte
)
if ctx . NArg ( ) > 2 {
2022-08-08 17:08:36 +08:00
return fmt . Errorf ( "max 2 arguments: %v" , ctx . Command . ArgsUsage )
2021-02-23 11:27:32 +01:00
}
if ctx . NArg ( ) >= 1 {
if d , err := hexutil . Decode ( ctx . Args ( ) . Get ( 0 ) ) ; err != nil {
return fmt . Errorf ( "failed to hex-decode 'prefix': %v" , err )
} else {
prefix = d
}
}
if ctx . NArg ( ) >= 2 {
if d , err := hexutil . Decode ( ctx . Args ( ) . Get ( 1 ) ) ; err != nil {
return fmt . Errorf ( "failed to hex-decode 'start': %v" , err )
} else {
start = d
}
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-23 02:06:30 +08:00
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
2021-02-23 11:27:32 +01:00
2021-03-23 02:06:30 +08:00
return rawdb . InspectDatabase ( db , prefix , start )
2021-02-23 11:27:32 +01:00
}
2022-05-17 13:01:46 +02:00
func checkStateContent ( ctx * cli . Context ) error {
var (
prefix [ ] byte
start [ ] byte
)
if ctx . NArg ( ) > 1 {
2022-05-30 18:37:42 +08:00
return fmt . Errorf ( "max 1 argument: %v" , ctx . Command . ArgsUsage )
2022-05-17 13:01:46 +02:00
}
if ctx . NArg ( ) > 0 {
if d , err := hexutil . Decode ( ctx . Args ( ) . First ( ) ) ; err != nil {
return fmt . Errorf ( "failed to hex-decode 'start': %v" , err )
} else {
start = d
}
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
var (
it = rawdb . NewKeyLengthIterator ( db . NewIterator ( prefix , start ) , 32 )
hasher = crypto . NewKeccakState ( )
got = make ( [ ] byte , 32 )
errs int
count int
startTime = time . Now ( )
lastLog = time . Now ( )
)
for it . Next ( ) {
count ++
k := it . Key ( )
2022-05-30 18:37:42 +08:00
v := it . Value ( )
2022-05-17 13:01:46 +02:00
hasher . Reset ( )
hasher . Write ( v )
hasher . Read ( got )
if ! bytes . Equal ( k , got ) {
errs ++
2022-07-04 17:03:32 +09:00
fmt . Printf ( "Error at %#x\n" , k )
fmt . Printf ( " Hash: %#x\n" , got )
fmt . Printf ( " Data: %#x\n" , v )
2022-05-17 13:01:46 +02:00
}
if time . Since ( lastLog ) > 8 * time . Second {
log . Info ( "Iterating the database" , "at" , fmt . Sprintf ( "%#x" , k ) , "elapsed" , common . PrettyDuration ( time . Since ( startTime ) ) )
lastLog = time . Now ( )
}
}
if err := it . Error ( ) ; err != nil {
return err
}
log . Info ( "Iterated the state content" , "errors" , errs , "items" , count )
return nil
}
2024-06-19 13:47:17 +07:00
func showDBStats ( db ethdb . KeyValueStater ) {
stats , err := db . Stat ( )
if err != nil {
2021-02-23 11:27:32 +01:00
log . Warn ( "Failed to read database stats" , "error" , err )
2024-06-19 13:47:17 +07:00
return
2021-02-23 11:27:32 +01:00
}
2024-06-19 13:47:17 +07:00
fmt . Println ( stats )
2021-02-23 11:27:32 +01:00
}
func dbStats ( ctx * cli . Context ) error {
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-23 02:06:30 +08:00
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
2024-06-19 13:47:17 +07:00
showDBStats ( db )
2021-02-23 11:27:32 +01:00
return nil
}
func dbCompact ( ctx * cli . Context ) error {
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-23 02:06:30 +08:00
db := utils . MakeChainDatabase ( ctx , stack , false )
defer db . Close ( )
log . Info ( "Stats before compaction" )
2024-06-19 13:47:17 +07:00
showDBStats ( db )
2021-03-23 02:06:30 +08:00
2021-02-23 11:27:32 +01:00
log . Info ( "Triggering compaction" )
2021-03-23 02:06:30 +08:00
if err := db . Compact ( nil , nil ) ; err != nil {
2021-02-23 11:27:32 +01:00
log . Info ( "Compact err" , "error" , err )
2021-03-23 02:06:30 +08:00
return err
2021-02-23 11:27:32 +01:00
}
2021-03-23 02:06:30 +08:00
log . Info ( "Stats after compaction" )
2024-06-19 13:47:17 +07:00
showDBStats ( db )
2021-03-23 02:06:30 +08:00
return nil
2021-02-23 11:27:32 +01:00
}
// dbGet shows the value of a given database key
func dbGet ( ctx * cli . Context ) error {
if ctx . NArg ( ) != 1 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-23 02:06:30 +08:00
db := utils . MakeChainDatabase ( ctx , stack , true )
2021-02-23 11:27:32 +01:00
defer db . Close ( )
2021-03-23 02:06:30 +08:00
2022-04-27 08:37:48 +02:00
key , err := common . ParseHexOrString ( ctx . Args ( ) . Get ( 0 ) )
2021-02-23 11:27:32 +01:00
if err != nil {
log . Info ( "Could not decode the key" , "error" , err )
return err
}
2021-10-18 13:18:49 +03:00
2021-02-23 11:27:32 +01:00
data , err := db . Get ( key )
if err != nil {
2022-07-18 13:22:56 +02:00
log . Info ( "Get operation failed" , "key" , fmt . Sprintf ( "%#x" , key ) , "error" , err )
2021-02-23 11:27:32 +01:00
return err
}
2021-03-23 02:06:30 +08:00
fmt . Printf ( "key %#x: %#x\n" , key , data )
2021-02-23 11:27:32 +01:00
return nil
}
// dbDelete deletes a key from the database
func dbDelete ( ctx * cli . Context ) error {
if ctx . NArg ( ) != 1 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-23 02:06:30 +08:00
db := utils . MakeChainDatabase ( ctx , stack , false )
2021-02-23 11:27:32 +01:00
defer db . Close ( )
2021-03-23 02:06:30 +08:00
2022-04-27 08:37:48 +02:00
key , err := common . ParseHexOrString ( ctx . Args ( ) . Get ( 0 ) )
2021-02-23 11:27:32 +01:00
if err != nil {
log . Info ( "Could not decode the key" , "error" , err )
return err
}
2021-03-23 02:06:30 +08:00
data , err := db . Get ( key )
if err == nil {
fmt . Printf ( "Previous value: %#x\n" , data )
}
2021-02-23 11:27:32 +01:00
if err = db . Delete ( key ) ; err != nil {
2022-07-18 13:22:56 +02:00
log . Info ( "Delete operation returned an error" , "key" , fmt . Sprintf ( "%#x" , key ) , "error" , err )
2021-02-23 11:27:32 +01:00
return err
}
return nil
}
// dbPut overwrite a value in the database
func dbPut ( ctx * cli . Context ) error {
if ctx . NArg ( ) != 2 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-23 02:06:30 +08:00
db := utils . MakeChainDatabase ( ctx , stack , false )
2021-02-23 11:27:32 +01:00
defer db . Close ( )
2021-03-23 02:06:30 +08:00
2021-02-23 11:27:32 +01:00
var (
key [ ] byte
value [ ] byte
data [ ] byte
err error
)
2022-04-27 08:37:48 +02:00
key , err = common . ParseHexOrString ( ctx . Args ( ) . Get ( 0 ) )
2021-02-23 11:27:32 +01:00
if err != nil {
log . Info ( "Could not decode the key" , "error" , err )
return err
}
value , err = hexutil . Decode ( ctx . Args ( ) . Get ( 1 ) )
if err != nil {
log . Info ( "Could not decode the value" , "error" , err )
return err
}
data , err = db . Get ( key )
if err == nil {
2021-03-23 02:06:30 +08:00
fmt . Printf ( "Previous value: %#x\n" , data )
2021-02-23 11:27:32 +01:00
}
return db . Put ( key , value )
}
2021-03-30 13:57:21 +02:00
// dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie ( ctx * cli . Context ) error {
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
if ctx . NArg ( ) < 3 {
2021-03-30 13:57:21 +02:00
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
2023-11-14 13:09:40 +01:00
triedb := utils . MakeTrieDatabase ( ctx , db , false , true , false )
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
defer triedb . Close ( )
2021-03-30 13:57:21 +02:00
var (
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
state [ ] byte
storage [ ] byte
account [ ] byte
start [ ] byte
max = int64 ( - 1 )
err error
2021-03-30 13:57:21 +02:00
)
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
if state , err = hexutil . Decode ( ctx . Args ( ) . Get ( 0 ) ) ; err != nil {
log . Info ( "Could not decode the state root" , "error" , err )
2021-03-30 13:57:21 +02:00
return err
}
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
if account , err = hexutil . Decode ( ctx . Args ( ) . Get ( 1 ) ) ; err != nil {
log . Info ( "Could not decode the account hash" , "error" , err )
return err
}
if storage , err = hexutil . Decode ( ctx . Args ( ) . Get ( 2 ) ) ; err != nil {
log . Info ( "Could not decode the storage trie root" , "error" , err )
return err
}
if ctx . NArg ( ) > 3 {
if start , err = hexutil . Decode ( ctx . Args ( ) . Get ( 3 ) ) ; err != nil {
2021-03-30 13:57:21 +02:00
log . Info ( "Could not decode the seek position" , "error" , err )
return err
}
}
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
if ctx . NArg ( ) > 4 {
if max , err = strconv . ParseInt ( ctx . Args ( ) . Get ( 4 ) , 10 , 64 ) ; err != nil {
2021-03-30 13:57:21 +02:00
log . Info ( "Could not decode the max count" , "error" , err )
return err
}
}
cmd, core, eth, les, light: track deleted nodes (#25757)
* cmd, core, eth, les, light: track deleted nodes
* trie: add docs
* trie: address comments
* cmd, core, eth, les, light, trie: trie id
* trie: add tests
* trie, core: updates
* trie: fix imports
* trie: add utility print-method for nodeset
* trie: import err
* trie: fix go vet warnings
Co-authored-by: Martin Holst Swende <martin@swende.se>
2022-09-27 16:01:02 +08:00
id := trie . StorageTrieID ( common . BytesToHash ( state ) , common . BytesToHash ( account ) , common . BytesToHash ( storage ) )
all: activate pbss as experimental feature (#26274)
* all: activate pbss
* core/rawdb: fix compilation error
* cma, core, eth, les, trie: address comments
* cmd, core, eth, trie: polish code
* core, cmd, eth: address comments
* cmd, core, eth, les, light, tests: address comment
* cmd/utils: shorten log message
* trie/triedb/pathdb: limit node buffer size to 1gb
* cmd/utils: fix opening non-existing db
* cmd/utils: rename flag name
* cmd, core: group chain history flags and fix tests
* core, eth, trie: fix memory leak in snapshot generation
* cmd, eth, internal: deprecate flags
* all: enable state tests for pathdb, fixes
* cmd, core: polish code
* trie/triedb/pathdb: limit the node buffer size to 256mb
---------
Co-authored-by: Martin Holst Swende <martin@swende.se>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2023-08-11 03:21:36 +08:00
theTrie , err := trie . New ( id , triedb )
2021-03-30 13:57:21 +02:00
if err != nil {
return err
}
cmd, core/state, eth, tests, trie: improve state reader (#27428)
The state availability is checked during the creation of a state reader.
- In hash-based database, if the specified root node does not exist on disk disk, then
the state reader won't be created and an error will be returned.
- In path-based database, if the specified state layer is not available, then the
state reader won't be created and an error will be returned.
This change also contains a stricter semantics regarding the `Commit` operation: once it has been performed, the trie is no longer usable, and certain operations will return an error.
2023-06-21 03:31:45 +08:00
trieIt , err := theTrie . NodeIterator ( start )
if err != nil {
return err
}
2021-03-30 13:57:21 +02:00
var count int64
cmd, core/state, eth, tests, trie: improve state reader (#27428)
The state availability is checked during the creation of a state reader.
- In hash-based database, if the specified root node does not exist on disk disk, then
the state reader won't be created and an error will be returned.
- In path-based database, if the specified state layer is not available, then the
state reader won't be created and an error will be returned.
This change also contains a stricter semantics regarding the `Commit` operation: once it has been performed, the trie is no longer usable, and certain operations will return an error.
2023-06-21 03:31:45 +08:00
it := trie . NewIterator ( trieIt )
2021-03-30 13:57:21 +02:00
for it . Next ( ) {
if max > 0 && count == max {
fmt . Printf ( "Exiting after %d values\n" , count )
break
}
fmt . Printf ( " %d. key %#x: %#x\n" , count , it . Key , it . Value )
count ++
}
return it . Err
}
2021-04-13 15:45:30 +02:00
func freezerInspect ( ctx * cli . Context ) error {
2022-08-08 17:08:36 +08:00
if ctx . NArg ( ) < 4 {
2021-04-13 15:45:30 +02:00
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
2022-08-08 17:08:36 +08:00
var (
freezer = ctx . Args ( ) . Get ( 0 )
table = ctx . Args ( ) . Get ( 1 )
)
start , err := strconv . ParseInt ( ctx . Args ( ) . Get ( 2 ) , 10 , 64 )
if err != nil {
log . Info ( "Could not read start-param" , "err" , err )
2021-04-13 15:45:30 +02:00
return err
}
2022-08-08 17:08:36 +08:00
end , err := strconv . ParseInt ( ctx . Args ( ) . Get ( 3 ) , 10 , 64 )
if err != nil {
log . Info ( "Could not read count param" , "err" , err )
2021-04-13 15:45:30 +02:00
return err
}
stack , _ := makeConfigNode ( ctx )
2023-01-16 03:57:27 -05:00
ancient := stack . ResolveAncient ( "chaindata" , ctx . String ( utils . AncientFlag . Name ) )
stack . Close ( )
2022-08-08 17:08:36 +08:00
return rawdb . InspectFreezerTable ( ancient , freezer , table , start , end )
2021-04-13 15:45:30 +02:00
}
2021-10-18 13:18:49 +03:00
2021-11-02 18:31:45 +08:00
func importLDBdata ( ctx * cli . Context ) error {
start := 0
switch ctx . NArg ( ) {
case 1 :
break
case 2 :
s , err := strconv . Atoi ( ctx . Args ( ) . Get ( 1 ) )
if err != nil {
return fmt . Errorf ( "second arg must be an integer: %v" , err )
}
start = s
default :
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
var (
fName = ctx . Args ( ) . Get ( 0 )
stack , _ = makeConfigNode ( ctx )
interrupt = make ( chan os . Signal , 1 )
stop = make ( chan struct { } )
)
defer stack . Close ( )
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
log . Info ( "Interrupted during ldb import, stopping at next batch" )
}
close ( stop )
} ( )
db := utils . MakeChainDatabase ( ctx , stack , false )
2023-09-26 00:28:20 +08:00
defer db . Close ( )
2021-11-02 18:31:45 +08:00
return utils . ImportLDBData ( db , fName , int64 ( start ) , stop )
}
type preimageIterator struct {
iter ethdb . Iterator
}
func ( iter * preimageIterator ) Next ( ) ( byte , [ ] byte , [ ] byte , bool ) {
for iter . iter . Next ( ) {
key := iter . iter . Key ( )
if bytes . HasPrefix ( key , rawdb . PreimagePrefix ) && len ( key ) == ( len ( rawdb . PreimagePrefix ) + common . HashLength ) {
return utils . OpBatchAdd , key , iter . iter . Value ( ) , true
}
}
return 0 , nil , nil , false
}
func ( iter * preimageIterator ) Release ( ) {
iter . iter . Release ( )
}
type snapshotIterator struct {
init bool
account ethdb . Iterator
storage ethdb . Iterator
}
func ( iter * snapshotIterator ) Next ( ) ( byte , [ ] byte , [ ] byte , bool ) {
if ! iter . init {
iter . init = true
return utils . OpBatchDel , rawdb . SnapshotRootKey , nil , true
}
for iter . account . Next ( ) {
key := iter . account . Key ( )
if bytes . HasPrefix ( key , rawdb . SnapshotAccountPrefix ) && len ( key ) == ( len ( rawdb . SnapshotAccountPrefix ) + common . HashLength ) {
return utils . OpBatchAdd , key , iter . account . Value ( ) , true
}
}
for iter . storage . Next ( ) {
key := iter . storage . Key ( )
if bytes . HasPrefix ( key , rawdb . SnapshotStoragePrefix ) && len ( key ) == ( len ( rawdb . SnapshotStoragePrefix ) + 2 * common . HashLength ) {
return utils . OpBatchAdd , key , iter . storage . Value ( ) , true
}
}
return 0 , nil , nil , false
}
func ( iter * snapshotIterator ) Release ( ) {
iter . account . Release ( )
iter . storage . Release ( )
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map [ string ] func ( db ethdb . Database ) utils . ChainDataIterator {
"preimage" : func ( db ethdb . Database ) utils . ChainDataIterator {
iter := db . NewIterator ( rawdb . PreimagePrefix , nil )
return & preimageIterator { iter : iter }
} ,
"snapshot" : func ( db ethdb . Database ) utils . ChainDataIterator {
account := db . NewIterator ( rawdb . SnapshotAccountPrefix , nil )
storage := db . NewIterator ( rawdb . SnapshotStoragePrefix , nil )
return & snapshotIterator { account : account , storage : storage }
} ,
}
func exportChaindata ( ctx * cli . Context ) error {
if ctx . NArg ( ) < 2 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
// Parse the required chain data type, make sure it's supported.
kind := ctx . Args ( ) . Get ( 0 )
kind = strings . ToLower ( strings . Trim ( kind , " " ) )
exporter , ok := chainExporters [ kind ]
if ! ok {
var kinds [ ] string
for kind := range chainExporters {
kinds = append ( kinds , kind )
}
return fmt . Errorf ( "invalid data type %s, supported types: %s" , kind , strings . Join ( kinds , ", " ) )
}
var (
stack , _ = makeConfigNode ( ctx )
interrupt = make ( chan os . Signal , 1 )
stop = make ( chan struct { } )
)
defer stack . Close ( )
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
log . Info ( "Interrupted during db export, stopping at next batch" )
}
close ( stop )
} ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
2023-09-26 00:28:20 +08:00
defer db . Close ( )
2021-11-02 18:31:45 +08:00
return utils . ExportChaindata ( ctx . Args ( ) . Get ( 1 ) , kind , exporter ( db ) , stop )
}
2022-01-18 11:30:41 +01:00
func showMetaData ( ctx * cli . Context ) error {
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
2023-09-26 00:28:20 +08:00
defer db . Close ( )
2022-01-18 11:30:41 +01:00
ancients , err := db . Ancients ( )
if err != nil {
fmt . Fprintf ( os . Stderr , "Error accessing ancients: %v" , err )
}
2023-03-08 15:39:13 +08:00
data := rawdb . ReadChainMetadata ( db )
data = append ( data , [ ] string { "frozen" , fmt . Sprintf ( "%d items" , ancients ) } )
data = append ( data , [ ] string { "snapshotGenerator" , snapshot . ParseGeneratorStatus ( rawdb . ReadSnapshotGenerator ( db ) ) } )
2022-01-18 11:30:41 +01:00
if b := rawdb . ReadHeadBlock ( db ) ; b != nil {
data = append ( data , [ ] string { "headBlock.Hash" , fmt . Sprintf ( "%v" , b . Hash ( ) ) } )
data = append ( data , [ ] string { "headBlock.Root" , fmt . Sprintf ( "%v" , b . Root ( ) ) } )
2022-07-04 17:03:32 +09:00
data = append ( data , [ ] string { "headBlock.Number" , fmt . Sprintf ( "%d (%#x)" , b . Number ( ) , b . Number ( ) ) } )
2022-01-18 11:30:41 +01:00
}
if h := rawdb . ReadHeadHeader ( db ) ; h != nil {
data = append ( data , [ ] string { "headHeader.Hash" , fmt . Sprintf ( "%v" , h . Hash ( ) ) } )
data = append ( data , [ ] string { "headHeader.Root" , fmt . Sprintf ( "%v" , h . Root ) } )
2022-07-04 17:03:32 +09:00
data = append ( data , [ ] string { "headHeader.Number" , fmt . Sprintf ( "%d (%#x)" , h . Number , h . Number ) } )
2022-01-18 11:30:41 +01:00
}
table := tablewriter . NewWriter ( os . Stdout )
table . SetHeader ( [ ] string { "Field" , "Value" } )
table . AppendBulk ( data )
table . Render ( )
return nil
}
2024-03-22 20:12:10 +08:00
func inspectAccount ( db * triedb . Database , start uint64 , end uint64 , address common . Address , raw bool ) error {
stats , err := db . AccountHistory ( address , start , end )
if err != nil {
return err
}
fmt . Printf ( "Account history:\n\taddress: %s\n\tblockrange: [#%d-#%d]\n" , address . Hex ( ) , stats . Start , stats . End )
from := stats . Start
for i := 0 ; i < len ( stats . Blocks ) ; i ++ {
var content string
if len ( stats . Origins [ i ] ) == 0 {
content = "<empty>"
} else {
if ! raw {
content = fmt . Sprintf ( "%#x" , stats . Origins [ i ] )
} else {
account := new ( types . SlimAccount )
if err := rlp . DecodeBytes ( stats . Origins [ i ] , account ) ; err != nil {
panic ( err )
}
code := "<nil>"
if len ( account . CodeHash ) > 0 {
code = fmt . Sprintf ( "%#x" , account . CodeHash )
}
root := "<nil>"
if len ( account . Root ) > 0 {
root = fmt . Sprintf ( "%#x" , account . Root )
}
content = fmt . Sprintf ( "nonce: %d, balance: %d, codeHash: %s, root: %s" , account . Nonce , account . Balance , code , root )
}
}
fmt . Printf ( "#%d - #%d: %s\n" , from , stats . Blocks [ i ] , content )
from = stats . Blocks [ i ]
}
return nil
}
func inspectStorage ( db * triedb . Database , start uint64 , end uint64 , address common . Address , slot common . Hash , raw bool ) error {
// The hash of storage slot key is utilized in the history
// rather than the raw slot key, make the conversion.
slotHash := crypto . Keccak256Hash ( slot . Bytes ( ) )
stats , err := db . StorageHistory ( address , slotHash , start , end )
if err != nil {
return err
}
fmt . Printf ( "Storage history:\n\taddress: %s\n\tslot: %s\n\tblockrange: [#%d-#%d]\n" , address . Hex ( ) , slot . Hex ( ) , stats . Start , stats . End )
from := stats . Start
for i := 0 ; i < len ( stats . Blocks ) ; i ++ {
var content string
if len ( stats . Origins [ i ] ) == 0 {
content = "<empty>"
} else {
if ! raw {
content = fmt . Sprintf ( "%#x" , stats . Origins [ i ] )
} else {
_ , data , _ , err := rlp . Split ( stats . Origins [ i ] )
if err != nil {
fmt . Printf ( "Failed to decode storage slot, %v" , err )
return err
}
content = fmt . Sprintf ( "%#x" , data )
}
}
fmt . Printf ( "#%d - #%d: %s\n" , from , stats . Blocks [ i ] , content )
from = stats . Blocks [ i ]
}
return nil
}
func inspectHistory ( ctx * cli . Context ) error {
if ctx . NArg ( ) == 0 || ctx . NArg ( ) > 2 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
var (
address common . Address
slot common . Hash
)
if err := address . UnmarshalText ( [ ] byte ( ctx . Args ( ) . Get ( 0 ) ) ) ; err != nil {
return err
}
if ctx . NArg ( ) > 1 {
if err := slot . UnmarshalText ( [ ] byte ( ctx . Args ( ) . Get ( 1 ) ) ) ; err != nil {
return err
}
}
// Load the databases.
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
triedb := utils . MakeTrieDatabase ( ctx , db , false , false , false )
defer triedb . Close ( )
var (
err error
start uint64 // the id of first history object to query
end uint64 // the id (included) of last history object to query
)
// State histories are identified by state ID rather than block number.
// To address this, load the corresponding block header and perform the
// conversion by this function.
blockToID := func ( blockNumber uint64 ) ( uint64 , error ) {
header := rawdb . ReadHeader ( db , rawdb . ReadCanonicalHash ( db , blockNumber ) , blockNumber )
if header == nil {
return 0 , fmt . Errorf ( "block #%d is not existent" , blockNumber )
}
id := rawdb . ReadStateID ( db , header . Root )
if id == nil {
first , last , err := triedb . HistoryRange ( )
if err == nil {
return 0 , fmt . Errorf ( "history of block #%d is not existent, available history range: [#%d-#%d]" , blockNumber , first , last )
}
return 0 , fmt . Errorf ( "history of block #%d is not existent" , blockNumber )
}
return * id , nil
}
// Parse the starting block number for inspection.
startNumber := ctx . Uint64 ( "start" )
if startNumber != 0 {
start , err = blockToID ( startNumber )
if err != nil {
return err
}
}
// Parse the ending block number for inspection.
endBlock := ctx . Uint64 ( "end" )
if endBlock != 0 {
end , err = blockToID ( endBlock )
if err != nil {
return err
}
}
// Inspect the state history.
if slot == ( common . Hash { } ) {
return inspectAccount ( triedb , start , end , address , ctx . Bool ( "raw" ) )
}
return inspectStorage ( triedb , start , end , address , slot , ctx . Bool ( "raw" ) )
}