2015-07-07 03:54:22 +03:00
// Copyright 2014 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 19:48:40 +03:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 03:54:22 +03:00
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
2015-07-22 19:48:40 +03:00
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
2015-01-06 13:13:57 +02:00
2015-07-07 06:08:16 +03:00
// Package utils contains internal helper functions for go-ethereum commands.
2014-05-14 13:41:30 +03:00
package utils
import (
2021-11-02 13:31:45 +03:00
"bufio"
2024-02-07 19:18:27 +03:00
"bytes"
2016-12-12 18:08:23 +03:00
"compress/gzip"
2024-02-07 19:18:27 +03:00
"crypto/sha256"
2021-11-02 13:31:45 +03:00
"errors"
2014-06-26 20:41:36 +03:00
"fmt"
2015-03-18 14:36:48 +02:00
"io"
2014-08-15 02:07:40 +03:00
"os"
"os/signal"
2024-03-12 12:00:34 +03:00
"path/filepath"
2016-09-26 18:23:26 +03:00
"runtime"
2016-12-12 18:08:23 +03:00
"strings"
2018-02-20 15:33:34 +03:00
"syscall"
2021-01-19 11:26:42 +03:00
"time"
2014-08-15 02:07:40 +03:00
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/common"
2015-03-06 04:00:41 +02:00
"github.com/ethereum/go-ethereum/core"
2018-05-07 14:35:06 +03:00
"github.com/ethereum/go-ethereum/core/rawdb"
2023-11-22 16:48:25 +03:00
"github.com/ethereum/go-ethereum/core/state/snapshot"
2014-12-23 16:37:03 +02:00
"github.com/ethereum/go-ethereum/core/types"
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/crypto"
2021-02-05 15:51:15 +03:00
"github.com/ethereum/go-ethereum/eth/ethconfig"
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/ethdb"
2016-03-12 01:39:45 +02:00
"github.com/ethereum/go-ethereum/internal/debug"
2024-02-07 19:18:27 +03:00
"github.com/ethereum/go-ethereum/internal/era"
2017-02-22 15:10:07 +03:00
"github.com/ethereum/go-ethereum/log"
2015-11-17 18:33:25 +02:00
"github.com/ethereum/go-ethereum/node"
2024-02-07 19:18:27 +03:00
"github.com/ethereum/go-ethereum/params"
2014-12-23 16:37:03 +02:00
"github.com/ethereum/go-ethereum/rlp"
2022-06-27 19:22:36 +03:00
"github.com/urfave/cli/v2"
2014-05-14 13:41:30 +03:00
)
2015-05-28 02:16:57 +03:00
const (
importBatchSize = 2500
)
2015-05-27 16:48:07 +03:00
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
2015-03-06 04:00:41 +02:00
func Fatalf ( format string , args ... interface { } ) {
2015-05-27 16:48:07 +03:00
w := io . MultiWriter ( os . Stdout , os . Stderr )
2016-09-26 18:23:26 +03:00
if runtime . GOOS == "windows" {
// The SameFile check below doesn't work on Windows.
// stdout is unlikely to get redirected though, so just print there.
w = os . Stdout
} else {
outf , _ := os . Stdout . Stat ( )
errf , _ := os . Stderr . Stat ( )
if outf != nil && errf != nil && os . SameFile ( outf , errf ) {
w = os . Stderr
}
2015-05-27 16:48:07 +03:00
}
fmt . Fprintf ( w , "Fatal: " + format + "\n" , args ... )
2015-03-06 04:00:41 +02:00
os . Exit ( 1 )
}
2021-12-11 18:51:05 +03:00
func StartNode ( ctx * cli . Context , stack * node . Node , isConsole bool ) {
2015-11-17 18:33:25 +02:00
if err := stack . Start ( ) ; err != nil {
2017-02-22 18:22:50 +03:00
Fatalf ( "Error starting protocol stack: %v" , err )
2015-01-05 18:12:52 +02:00
}
2015-07-06 16:01:13 +03:00
go func ( ) {
sigc := make ( chan os . Signal , 1 )
2018-02-20 15:33:34 +03:00
signal . Notify ( sigc , syscall . SIGINT , syscall . SIGTERM )
2015-07-06 16:01:13 +03:00
defer signal . Stop ( sigc )
2021-01-19 11:26:42 +03:00
2022-05-03 13:12:40 +03:00
minFreeDiskSpace := 2 * ethconfig . Defaults . TrieDirtyCache // Default 2 * 256Mb
2022-06-27 19:22:36 +03:00
if ctx . IsSet ( MinFreeDiskSpaceFlag . Name ) {
minFreeDiskSpace = ctx . Int ( MinFreeDiskSpaceFlag . Name )
} else if ctx . IsSet ( CacheFlag . Name ) || ctx . IsSet ( CacheGCFlag . Name ) {
minFreeDiskSpace = 2 * ctx . Int ( CacheFlag . Name ) * ctx . Int ( CacheGCFlag . Name ) / 100
2021-01-19 11:26:42 +03:00
}
if minFreeDiskSpace > 0 {
go monitorFreeDiskSpace ( sigc , stack . InstanceDir ( ) , uint64 ( minFreeDiskSpace ) * 1024 * 1024 )
}
2021-12-11 18:51:05 +03:00
shutdown := func ( ) {
log . Info ( "Got interrupt, shutting down..." )
go stack . Close ( )
for i := 10 ; i > 0 ; i -- {
<- sigc
if i > 1 {
log . Warn ( "Already shutting down, interrupt more to panic." , "times" , i - 1 )
}
2015-07-06 16:01:13 +03:00
}
2021-12-11 18:51:05 +03:00
debug . Exit ( ) // ensure trace and CPU profile data is flushed.
debug . LoudPanic ( "boom" )
}
if isConsole {
// In JS console mode, SIGINT is ignored because it's handled by the console.
// However, SIGTERM still shuts down the node.
for {
sig := <- sigc
if sig == syscall . SIGTERM {
shutdown ( )
return
}
}
} else {
<- sigc
shutdown ( )
2015-07-06 16:01:13 +03:00
}
} ( )
2015-03-06 04:25:57 +02:00
}
2021-01-19 11:26:42 +03:00
func monitorFreeDiskSpace ( sigc chan os . Signal , path string , freeDiskSpaceCritical uint64 ) {
2023-05-19 15:38:21 +03:00
if path == "" {
return
}
2021-01-19 11:26:42 +03:00
for {
freeSpace , err := getFreeDiskSpace ( path )
if err != nil {
log . Warn ( "Failed to get free disk space" , "path" , path , "err" , err )
break
}
if freeSpace < freeDiskSpaceCritical {
2022-11-21 12:18:18 +03:00
log . Error ( "Low disk space. Gracefully shutting down Geth to prevent database corruption." , "available" , common . StorageSize ( freeSpace ) , "path" , path )
2021-01-19 11:26:42 +03:00
sigc <- syscall . SIGTERM
break
} else if freeSpace < 2 * freeDiskSpaceCritical {
2022-11-21 12:18:18 +03:00
log . Warn ( "Disk space is running low. Geth will shutdown if disk space runs below critical level." , "available" , common . StorageSize ( freeSpace ) , "critical_level" , common . StorageSize ( freeDiskSpaceCritical ) , "path" , path )
2021-01-19 11:26:42 +03:00
}
2022-05-03 13:12:40 +03:00
time . Sleep ( 30 * time . Second )
2021-01-19 11:26:42 +03:00
}
}
2015-08-31 18:09:50 +03:00
func ImportChain ( chain * core . BlockChain , fn string ) error {
2015-05-27 17:02:08 +03:00
// Watch for Ctrl-C while the import is running.
// If a signal is received, the import will stop at the next batch.
interrupt := make ( chan os . Signal , 1 )
stop := make ( chan struct { } )
2018-02-20 15:33:34 +03:00
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
2015-05-27 17:02:08 +03:00
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
2017-03-02 16:06:16 +03:00
log . Info ( "Interrupted during import, stopping at next batch" )
2015-05-27 17:02:08 +03:00
}
close ( stop )
} ( )
checkInterrupt := func ( ) bool {
select {
case <- stop :
return true
default :
return false
}
}
2017-03-02 16:06:16 +03:00
log . Info ( "Importing blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially unwrap the gzip stream
2015-05-27 17:02:08 +03:00
fh , err := os . Open ( fn )
2014-12-23 16:37:03 +02:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 18:08:23 +03:00
var reader io . Reader = fh
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
2015-04-13 11:13:52 +03:00
2015-05-27 18:35:08 +03:00
// Run actual the import.
2015-05-28 02:16:57 +03:00
blocks := make ( types . Blocks , importBatchSize )
2015-05-27 14:29:34 +03:00
n := 0
2015-05-27 18:35:08 +03:00
for batch := 0 ; ; batch ++ {
2015-05-27 14:29:34 +03:00
// Load a batch of RLP blocks.
2015-05-27 17:02:08 +03:00
if checkInterrupt ( ) {
2023-05-24 13:21:29 +03:00
return errors . New ( "interrupted" )
2015-05-27 17:02:08 +03:00
}
2015-05-27 14:29:34 +03:00
i := 0
2015-05-28 02:16:57 +03:00
for ; i < importBatchSize ; i ++ {
2015-05-27 14:29:34 +03:00
var b types . Block
if err := stream . Decode ( & b ) ; err == io . EOF {
break
} else if err != nil {
return fmt . Errorf ( "at block %d: %v" , n , err )
2015-04-13 11:13:52 +03:00
}
2015-08-03 18:48:24 +03:00
// don't import first block
if b . NumberU64 ( ) == 0 {
i --
continue
}
2015-05-27 14:29:34 +03:00
blocks [ i ] = & b
n ++
2015-04-13 11:13:52 +03:00
}
2015-05-27 14:29:34 +03:00
if i == 0 {
break
}
// Import the batch.
2015-05-27 17:02:08 +03:00
if checkInterrupt ( ) {
2023-05-24 13:21:29 +03:00
return errors . New ( "interrupted" )
2015-05-27 17:02:08 +03:00
}
2018-02-05 19:40:32 +03:00
missing := missingBlocks ( chain , blocks [ : i ] )
if len ( missing ) == 0 {
2017-03-02 16:06:16 +03:00
log . Info ( "Skipping batch as all blocks present" , "batch" , batch , "first" , blocks [ 0 ] . Hash ( ) , "last" , blocks [ i - 1 ] . Hash ( ) )
2015-05-27 18:35:08 +03:00
continue
}
2023-05-09 10:57:42 +03:00
if failindex , err := chain . InsertChain ( missing ) ; err != nil {
var failnumber uint64
if failindex > 0 && failindex < len ( missing ) {
failnumber = missing [ failindex ] . NumberU64 ( )
} else {
failnumber = missing [ 0 ] . NumberU64 ( )
}
return fmt . Errorf ( "invalid block %d: %v" , failnumber , err )
2015-03-18 14:36:48 +02:00
}
2014-12-23 16:37:03 +02:00
}
return nil
}
2015-03-08 17:44:48 +02:00
2024-02-07 19:18:27 +03:00
func readList ( filename string ) ( [ ] string , error ) {
b , err := os . ReadFile ( filename )
if err != nil {
return nil , err
}
return strings . Split ( string ( b ) , "\n" ) , nil
}
// ImportHistory imports Era1 files containing historical block information,
// starting from genesis.
func ImportHistory ( chain * core . BlockChain , db ethdb . Database , dir string , network string ) error {
if chain . CurrentSnapBlock ( ) . Number . BitLen ( ) != 0 {
2024-02-29 12:56:46 +03:00
return errors . New ( "history import only supported when starting from genesis" )
2024-02-07 19:18:27 +03:00
}
entries , err := era . ReadDir ( dir , network )
if err != nil {
return fmt . Errorf ( "error reading %s: %w" , dir , err )
}
2024-03-12 12:00:34 +03:00
checksums , err := readList ( filepath . Join ( dir , "checksums.txt" ) )
2024-02-07 19:18:27 +03:00
if err != nil {
return fmt . Errorf ( "unable to read checksums.txt: %w" , err )
}
if len ( checksums ) != len ( entries ) {
return fmt . Errorf ( "expected equal number of checksums and entries, have: %d checksums, %d entries" , len ( checksums ) , len ( entries ) )
}
var (
start = time . Now ( )
reported = time . Now ( )
imported = 0
forker = core . NewForkChoice ( chain , nil )
h = sha256 . New ( )
buf = bytes . NewBuffer ( nil )
)
for i , filename := range entries {
err := func ( ) error {
2024-03-12 12:00:34 +03:00
f , err := os . Open ( filepath . Join ( dir , filename ) )
2024-02-07 19:18:27 +03:00
if err != nil {
return fmt . Errorf ( "unable to open era: %w" , err )
}
defer f . Close ( )
// Validate checksum.
if _ , err := io . Copy ( h , f ) ; err != nil {
return fmt . Errorf ( "unable to recalculate checksum: %w" , err )
}
if have , want := common . BytesToHash ( h . Sum ( buf . Bytes ( ) [ : ] ) ) . Hex ( ) , checksums [ i ] ; have != want {
return fmt . Errorf ( "checksum mismatch: have %s, want %s" , have , want )
}
h . Reset ( )
buf . Reset ( )
// Import all block data from Era1.
e , err := era . From ( f )
if err != nil {
return fmt . Errorf ( "error opening era: %w" , err )
}
it , err := era . NewIterator ( e )
if err != nil {
return fmt . Errorf ( "error making era reader: %w" , err )
}
for it . Next ( ) {
block , err := it . Block ( )
if err != nil {
return fmt . Errorf ( "error reading block %d: %w" , it . Number ( ) , err )
}
if block . Number ( ) . BitLen ( ) == 0 {
continue // skip genesis
}
receipts , err := it . Receipts ( )
if err != nil {
return fmt . Errorf ( "error reading receipts %d: %w" , it . Number ( ) , err )
}
if status , err := chain . HeaderChain ( ) . InsertHeaderChain ( [ ] * types . Header { block . Header ( ) } , start , forker ) ; err != nil {
return fmt . Errorf ( "error inserting header %d: %w" , it . Number ( ) , err )
} else if status != core . CanonStatTy {
return fmt . Errorf ( "error inserting header %d, not canon: %v" , it . Number ( ) , status )
}
if _ , err := chain . InsertReceiptChain ( [ ] * types . Block { block } , [ ] types . Receipts { receipts } , 2 ^ 64 - 1 ) ; err != nil {
return fmt . Errorf ( "error inserting body %d: %w" , it . Number ( ) , err )
}
imported += 1
// Give the user some feedback that something is happening.
if time . Since ( reported ) >= 8 * time . Second {
log . Info ( "Importing Era files" , "head" , it . Number ( ) , "imported" , imported , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
imported = 0
reported = time . Now ( )
}
}
return nil
} ( )
if err != nil {
return err
}
}
return nil
}
2018-02-05 19:40:32 +03:00
func missingBlocks ( chain * core . BlockChain , blocks [ ] * types . Block ) [ ] * types . Block {
head := chain . CurrentBlock ( )
for i , block := range blocks {
// If we're behind the chain head, only check block, state is available at head
2023-03-02 09:29:15 +03:00
if head . Number . Uint64 ( ) > block . NumberU64 ( ) {
2018-02-05 19:40:32 +03:00
if ! chain . HasBlock ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
}
continue
}
// If we're above the chain head, state availability is a must
if ! chain . HasBlockAndState ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
2015-05-27 18:35:08 +03:00
}
}
2018-02-05 19:40:32 +03:00
return nil
2015-05-27 18:35:08 +03:00
}
2018-03-26 13:34:21 +03:00
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
2015-08-31 18:09:50 +03:00
func ExportChain ( blockchain * core . BlockChain , fn string ) error {
2017-03-02 16:06:16 +03:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially wrap with a gzip stream
2015-03-18 15:04:19 +02:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
2015-03-18 14:36:48 +02:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 18:08:23 +03:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 13:34:21 +03:00
// Iterate over the blocks and export them
2016-12-12 18:08:23 +03:00
if err := blockchain . Export ( writer ) ; err != nil {
2015-03-08 17:44:48 +02:00
return err
}
2017-03-02 16:06:16 +03:00
log . Info ( "Exported blockchain" , "file" , fn )
2016-12-12 18:08:23 +03:00
2015-03-08 17:44:48 +02:00
return nil
}
2015-06-06 07:02:32 +03:00
2018-03-26 13:34:21 +03:00
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
2015-08-31 18:09:50 +03:00
func ExportAppendChain ( blockchain * core . BlockChain , fn string , first uint64 , last uint64 ) error {
2017-03-02 16:06:16 +03:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially wrap with a gzip stream
2015-06-06 07:02:32 +03:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_APPEND | os . O_WRONLY , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 18:08:23 +03:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 13:34:21 +03:00
// Iterate over the blocks and export them
2016-12-12 18:08:23 +03:00
if err := blockchain . ExportN ( writer , first , last ) ; err != nil {
2015-06-06 07:02:32 +03:00
return err
}
2017-03-02 16:06:16 +03:00
log . Info ( "Exported blockchain to" , "file" , fn )
2015-06-06 07:02:32 +03:00
return nil
}
2018-03-26 13:34:21 +03:00
2024-02-07 19:18:27 +03:00
// ExportHistory exports blockchain history into the specified directory,
// following the Era format.
func ExportHistory ( bc * core . BlockChain , dir string , first , last , step uint64 ) error {
log . Info ( "Exporting blockchain history" , "dir" , dir )
if head := bc . CurrentBlock ( ) . Number . Uint64 ( ) ; head < last {
log . Warn ( "Last block beyond head, setting last = head" , "head" , head , "last" , last )
last = head
}
network := "unknown"
if name , ok := params . NetworkNames [ bc . Config ( ) . ChainID . String ( ) ] ; ok {
network = name
}
if err := os . MkdirAll ( dir , os . ModePerm ) ; err != nil {
return fmt . Errorf ( "error creating output directory: %w" , err )
}
var (
start = time . Now ( )
reported = time . Now ( )
h = sha256 . New ( )
buf = bytes . NewBuffer ( nil )
checksums [ ] string
)
for i := first ; i <= last ; i += step {
err := func ( ) error {
2024-03-12 12:00:34 +03:00
filename := filepath . Join ( dir , era . Filename ( network , int ( i / step ) , common . Hash { } ) )
2024-02-07 19:18:27 +03:00
f , err := os . Create ( filename )
if err != nil {
return fmt . Errorf ( "could not create era file: %w" , err )
}
defer f . Close ( )
w := era . NewBuilder ( f )
for j := uint64 ( 0 ) ; j < step && j <= last - i ; j ++ {
var (
n = i + j
block = bc . GetBlockByNumber ( n )
)
if block == nil {
return fmt . Errorf ( "export failed on #%d: not found" , n )
}
receipts := bc . GetReceiptsByHash ( block . Hash ( ) )
if receipts == nil {
return fmt . Errorf ( "export failed on #%d: receipts not found" , n )
}
td := bc . GetTd ( block . Hash ( ) , block . NumberU64 ( ) )
if td == nil {
return fmt . Errorf ( "export failed on #%d: total difficulty not found" , n )
}
if err := w . Add ( block , receipts , td ) ; err != nil {
return err
}
}
root , err := w . Finalize ( )
if err != nil {
return fmt . Errorf ( "export failed to finalize %d: %w" , step / i , err )
}
// Set correct filename with root.
2024-03-12 12:00:34 +03:00
os . Rename ( filename , filepath . Join ( dir , era . Filename ( network , int ( i / step ) , root ) ) )
2024-02-07 19:18:27 +03:00
// Compute checksum of entire Era1.
if _ , err := f . Seek ( 0 , io . SeekStart ) ; err != nil {
return err
}
if _ , err := io . Copy ( h , f ) ; err != nil {
return fmt . Errorf ( "unable to calculate checksum: %w" , err )
}
checksums = append ( checksums , common . BytesToHash ( h . Sum ( buf . Bytes ( ) [ : ] ) ) . Hex ( ) )
h . Reset ( )
buf . Reset ( )
return nil
} ( )
if err != nil {
return err
}
if time . Since ( reported ) >= 8 * time . Second {
log . Info ( "Exporting blocks" , "exported" , i , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
reported = time . Now ( )
}
}
2024-03-12 12:00:34 +03:00
os . WriteFile ( filepath . Join ( dir , "checksums.txt" ) , [ ] byte ( strings . Join ( checksums , "\n" ) ) , os . ModePerm )
2024-02-07 19:18:27 +03:00
log . Info ( "Exported blockchain to" , "dir" , dir )
return nil
}
2018-03-26 13:34:21 +03:00
// ImportPreimages imports a batch of exported hash preimages into the database.
2021-11-02 13:31:45 +03:00
// It's a part of the deprecated functionality, should be removed in the future.
2018-09-24 15:57:49 +03:00
func ImportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 13:34:21 +03:00
log . Info ( "Importing preimages" , "file" , fn )
// Open the file handle and potentially unwrap the gzip stream
fh , err := os . Open ( fn )
if err != nil {
return err
}
defer fh . Close ( )
2021-11-02 13:31:45 +03:00
var reader io . Reader = bufio . NewReader ( fh )
2018-03-26 13:34:21 +03:00
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
2021-11-02 13:31:45 +03:00
// Import the preimages in batches to prevent disk thrashing
2018-03-26 13:34:21 +03:00
preimages := make ( map [ common . Hash ] [ ] byte )
for {
// Read the next entry and ensure it's not junk
var blob [ ] byte
if err := stream . Decode ( & blob ) ; err != nil {
if err == io . EOF {
break
}
return err
}
// Accumulate the preimages and flush when enough ws gathered
preimages [ crypto . Keccak256Hash ( blob ) ] = common . CopyBytes ( blob )
if len ( preimages ) > 1024 {
2018-11-09 13:51:07 +03:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 13:34:21 +03:00
preimages = make ( map [ common . Hash ] [ ] byte )
}
}
// Flush the last batch preimage data
if len ( preimages ) > 0 {
2018-11-09 13:51:07 +03:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 13:34:21 +03:00
}
return nil
}
// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
2021-11-02 13:31:45 +03:00
// It's a part of the deprecated functionality, should be removed in the future.
2018-09-24 15:57:49 +03:00
func ExportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 13:34:21 +03:00
log . Info ( "Exporting preimages" , "file" , fn )
// Open the file handle and potentially wrap with a gzip stream
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
// Iterate over the preimages and export them
2020-04-15 14:08:53 +03:00
it := db . NewIterator ( [ ] byte ( "secure-key-" ) , nil )
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
defer it . Release ( )
2018-03-26 13:34:21 +03:00
for it . Next ( ) {
if err := rlp . Encode ( writer , it . Value ( ) ) ; err != nil {
return err
}
}
log . Info ( "Exported preimages" , "file" , fn )
return nil
}
2021-11-02 13:31:45 +03:00
2023-11-22 16:48:25 +03:00
// ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
// the snapshot for a given root.
func ExportSnapshotPreimages ( chaindb ethdb . Database , snaptree * snapshot . Tree , fn string , root common . Hash ) error {
log . Info ( "Exporting preimages" , "file" , fn )
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
// Enable gzip compressing if file name has gz suffix.
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
gz := gzip . NewWriter ( writer )
defer gz . Close ( )
writer = gz
}
buf := bufio . NewWriter ( writer )
defer buf . Flush ( )
writer = buf
type hashAndPreimageSize struct {
Hash common . Hash
Size int
}
hashCh := make ( chan hashAndPreimageSize )
var (
start = time . Now ( )
logged = time . Now ( )
preimages int
)
go func ( ) {
defer close ( hashCh )
accIt , err := snaptree . AccountIterator ( root , common . Hash { } )
if err != nil {
log . Error ( "Failed to create account iterator" , "error" , err )
return
}
defer accIt . Release ( )
for accIt . Next ( ) {
acc , err := types . FullAccount ( accIt . Account ( ) )
if err != nil {
log . Error ( "Failed to get full account" , "error" , err )
return
}
preimages += 1
hashCh <- hashAndPreimageSize { Hash : accIt . Hash ( ) , Size : common . AddressLength }
if acc . Root != ( common . Hash { } ) && acc . Root != types . EmptyRootHash {
stIt , err := snaptree . StorageIterator ( root , accIt . Hash ( ) , common . Hash { } )
if err != nil {
log . Error ( "Failed to create storage iterator" , "error" , err )
return
}
for stIt . Next ( ) {
preimages += 1
hashCh <- hashAndPreimageSize { Hash : stIt . Hash ( ) , Size : common . HashLength }
if time . Since ( logged ) > time . Second * 8 {
logged = time . Now ( )
log . Info ( "Exporting preimages" , "count" , preimages , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
}
}
stIt . Release ( )
}
if time . Since ( logged ) > time . Second * 8 {
logged = time . Now ( )
log . Info ( "Exporting preimages" , "count" , preimages , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
}
}
} ( )
for item := range hashCh {
preimage := rawdb . ReadPreimage ( chaindb , item . Hash )
if len ( preimage ) == 0 {
return fmt . Errorf ( "missing preimage for %v" , item . Hash )
}
if len ( preimage ) != item . Size {
return fmt . Errorf ( "invalid preimage size, have %d" , len ( preimage ) )
}
rlpenc , err := rlp . EncodeToBytes ( preimage )
if err != nil {
return fmt . Errorf ( "error encoding preimage: %w" , err )
}
if _ , err := writer . Write ( rlpenc ) ; err != nil {
return fmt . Errorf ( "failed to write preimage: %w" , err )
}
}
log . Info ( "Exported preimages" , "count" , preimages , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) , "file" , fn )
return nil
}
2021-11-02 13:31:45 +03:00
// exportHeader is used in the export/import flow. When we do an export,
// the first element we output is the exportHeader.
// Whenever a backwards-incompatible change is made, the Version header
// should be bumped.
// If the importer sees a higher version, it should reject the import.
type exportHeader struct {
Magic string // Always set to 'gethdbdump' for disambiguation
Version uint64
Kind string
UnixTime uint64
}
const exportMagic = "gethdbdump"
const (
OpBatchAdd = 0
OpBatchDel = 1
)
// ImportLDBData imports a batch of snapshot data into the database
func ImportLDBData ( db ethdb . Database , f string , startIndex int64 , interrupt chan struct { } ) error {
log . Info ( "Importing leveldb data" , "file" , f )
// Open the file handle and potentially unwrap the gzip stream
fh , err := os . Open ( f )
if err != nil {
return err
}
defer fh . Close ( )
var reader io . Reader = bufio . NewReader ( fh )
if strings . HasSuffix ( f , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
// Read the header
var header exportHeader
if err := stream . Decode ( & header ) ; err != nil {
return fmt . Errorf ( "could not decode header: %v" , err )
}
if header . Magic != exportMagic {
return errors . New ( "incompatible data, wrong magic" )
}
if header . Version != 0 {
return fmt . Errorf ( "incompatible version %d, (support only 0)" , header . Version )
}
log . Info ( "Importing data" , "file" , f , "type" , header . Kind , "data age" ,
common . PrettyDuration ( time . Since ( time . Unix ( int64 ( header . UnixTime ) , 0 ) ) ) )
// Import the snapshot in batches to prevent disk thrashing
var (
count int64
start = time . Now ( )
logged = time . Now ( )
batch = db . NewBatch ( )
)
for {
// Read the next entry
var (
op byte
key , val [ ] byte
)
if err := stream . Decode ( & op ) ; err != nil {
if err == io . EOF {
break
}
return err
}
if err := stream . Decode ( & key ) ; err != nil {
return err
}
if err := stream . Decode ( & val ) ; err != nil {
return err
}
if count < startIndex {
count ++
continue
}
switch op {
case OpBatchDel :
batch . Delete ( key )
case OpBatchAdd :
batch . Put ( key , val )
default :
2023-11-15 15:36:57 +03:00
return fmt . Errorf ( "unknown op %d" , op )
2021-11-02 13:31:45 +03:00
}
if batch . ValueSize ( ) > ethdb . IdealBatchSize {
if err := batch . Write ( ) ; err != nil {
return err
}
batch . Reset ( )
}
// Check interruption emitted by ctrl+c
if count % 1000 == 0 {
select {
case <- interrupt :
if err := batch . Write ( ) ; err != nil {
return err
}
log . Info ( "External data import interrupted" , "file" , f , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
default :
}
}
if count % 1000 == 0 && time . Since ( logged ) > 8 * time . Second {
log . Info ( "Importing external data" , "file" , f , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
logged = time . Now ( )
}
count += 1
}
// Flush the last batch snapshot data
if batch . ValueSize ( ) > 0 {
if err := batch . Write ( ) ; err != nil {
return err
}
}
log . Info ( "Imported chain data" , "file" , f , "count" , count ,
"elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
}
// ChainDataIterator is an interface wraps all necessary functions to iterate
// the exporting chain data.
type ChainDataIterator interface {
// Next returns the key-value pair for next exporting entry in the iterator.
// When the end is reached, it will return (0, nil, nil, false).
Next ( ) ( byte , [ ] byte , [ ] byte , bool )
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
Release ( )
}
// ExportChaindata exports the given data type (truncating any data already present)
// in the file. If the suffix is 'gz', gzip compression is used.
func ExportChaindata ( fn string , kind string , iter ChainDataIterator , interrupt chan struct { } ) error {
log . Info ( "Exporting chain data" , "file" , fn , "kind" , kind )
defer iter . Release ( )
// Open the file handle and potentially wrap with a gzip stream
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
// Write the header
if err := rlp . Encode ( writer , & exportHeader {
Magic : exportMagic ,
Version : 0 ,
Kind : kind ,
UnixTime : uint64 ( time . Now ( ) . Unix ( ) ) ,
} ) ; err != nil {
return err
}
// Extract data from source iterator and dump them out to file
var (
count int64
start = time . Now ( )
logged = time . Now ( )
)
for {
op , key , val , ok := iter . Next ( )
if ! ok {
break
}
if err := rlp . Encode ( writer , op ) ; err != nil {
return err
}
if err := rlp . Encode ( writer , key ) ; err != nil {
return err
}
if err := rlp . Encode ( writer , val ) ; err != nil {
return err
}
if count % 1000 == 0 {
// Check interruption emitted by ctrl+c
select {
case <- interrupt :
log . Info ( "Chain data exporting interrupted" , "file" , fn ,
"kind" , kind , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
default :
}
if time . Since ( logged ) > 8 * time . Second {
log . Info ( "Exporting chain data" , "file" , fn , "kind" , kind ,
"count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
logged = time . Now ( )
}
}
count ++
}
log . Info ( "Exported chain data" , "file" , fn , "kind" , kind , "count" , count ,
"elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
}