2015-07-07 02:54:22 +02:00
// Copyright 2014 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 18:48:40 +02:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 02:54:22 +02:00
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
2015-07-22 18:48:40 +02:00
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
2015-01-06 12:13:57 +01:00
2015-07-07 05:08:16 +02:00
// Package utils contains internal helper functions for go-ethereum commands.
2014-05-14 12:41:30 +02:00
package utils
import (
2021-11-02 18:31:45 +08:00
"bufio"
2016-12-12 15:08:23 +00:00
"compress/gzip"
2021-11-02 18:31:45 +08:00
"errors"
2014-06-26 18:41:36 +01:00
"fmt"
2015-03-18 13:36:48 +01:00
"io"
2014-08-15 01:07:40 +02:00
"os"
"os/signal"
2016-09-26 17:23:26 +02:00
"runtime"
2016-12-12 15:08:23 +00:00
"strings"
2018-02-20 14:33:34 +02:00
"syscall"
2021-01-19 08:26:42 +00:00
"time"
2014-08-15 01:07:40 +02:00
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/common"
2015-03-06 03:00:41 +01:00
"github.com/ethereum/go-ethereum/core"
2018-05-07 14:35:06 +03:00
"github.com/ethereum/go-ethereum/core/rawdb"
2023-11-22 14:48:25 +01:00
"github.com/ethereum/go-ethereum/core/state/snapshot"
2014-12-23 15:37:03 +01:00
"github.com/ethereum/go-ethereum/core/types"
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/crypto"
2021-02-05 20:51:15 +08:00
"github.com/ethereum/go-ethereum/eth/ethconfig"
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/ethdb"
2016-03-12 00:39:45 +01:00
"github.com/ethereum/go-ethereum/internal/debug"
2017-02-22 14:10:07 +02:00
"github.com/ethereum/go-ethereum/log"
2015-11-17 18:33:25 +02:00
"github.com/ethereum/go-ethereum/node"
2014-12-23 15:37:03 +01:00
"github.com/ethereum/go-ethereum/rlp"
2022-06-27 18:22:36 +02:00
"github.com/urfave/cli/v2"
2014-05-14 12:41:30 +02:00
)
2015-05-28 01:16:57 +02:00
const (
importBatchSize = 2500
)
2015-05-27 15:48:07 +02:00
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
2015-03-06 03:00:41 +01:00
func Fatalf ( format string , args ... interface { } ) {
2015-05-27 15:48:07 +02:00
w := io . MultiWriter ( os . Stdout , os . Stderr )
2016-09-26 17:23:26 +02:00
if runtime . GOOS == "windows" {
// The SameFile check below doesn't work on Windows.
// stdout is unlikely to get redirected though, so just print there.
w = os . Stdout
} else {
outf , _ := os . Stdout . Stat ( )
errf , _ := os . Stderr . Stat ( )
if outf != nil && errf != nil && os . SameFile ( outf , errf ) {
w = os . Stderr
}
2015-05-27 15:48:07 +02:00
}
fmt . Fprintf ( w , "Fatal: " + format + "\n" , args ... )
2015-03-06 03:00:41 +01:00
os . Exit ( 1 )
}
2021-12-11 16:51:05 +01:00
func StartNode ( ctx * cli . Context , stack * node . Node , isConsole bool ) {
2015-11-17 18:33:25 +02:00
if err := stack . Start ( ) ; err != nil {
2017-02-22 17:22:50 +02:00
Fatalf ( "Error starting protocol stack: %v" , err )
2015-01-05 17:12:52 +01:00
}
2015-07-06 15:01:13 +02:00
go func ( ) {
sigc := make ( chan os . Signal , 1 )
2018-02-20 14:33:34 +02:00
signal . Notify ( sigc , syscall . SIGINT , syscall . SIGTERM )
2015-07-06 15:01:13 +02:00
defer signal . Stop ( sigc )
2021-01-19 08:26:42 +00:00
2022-05-03 12:12:40 +02:00
minFreeDiskSpace := 2 * ethconfig . Defaults . TrieDirtyCache // Default 2 * 256Mb
2022-06-27 18:22:36 +02:00
if ctx . IsSet ( MinFreeDiskSpaceFlag . Name ) {
minFreeDiskSpace = ctx . Int ( MinFreeDiskSpaceFlag . Name )
} else if ctx . IsSet ( CacheFlag . Name ) || ctx . IsSet ( CacheGCFlag . Name ) {
minFreeDiskSpace = 2 * ctx . Int ( CacheFlag . Name ) * ctx . Int ( CacheGCFlag . Name ) / 100
2021-01-19 08:26:42 +00:00
}
if minFreeDiskSpace > 0 {
go monitorFreeDiskSpace ( sigc , stack . InstanceDir ( ) , uint64 ( minFreeDiskSpace ) * 1024 * 1024 )
}
2021-12-11 16:51:05 +01:00
shutdown := func ( ) {
log . Info ( "Got interrupt, shutting down..." )
go stack . Close ( )
for i := 10 ; i > 0 ; i -- {
<- sigc
if i > 1 {
log . Warn ( "Already shutting down, interrupt more to panic." , "times" , i - 1 )
}
2015-07-06 15:01:13 +02:00
}
2021-12-11 16:51:05 +01:00
debug . Exit ( ) // ensure trace and CPU profile data is flushed.
debug . LoudPanic ( "boom" )
}
if isConsole {
// In JS console mode, SIGINT is ignored because it's handled by the console.
// However, SIGTERM still shuts down the node.
for {
sig := <- sigc
if sig == syscall . SIGTERM {
shutdown ( )
return
}
}
} else {
<- sigc
shutdown ( )
2015-07-06 15:01:13 +02:00
}
} ( )
2015-03-06 03:25:57 +01:00
}
2021-01-19 08:26:42 +00:00
func monitorFreeDiskSpace ( sigc chan os . Signal , path string , freeDiskSpaceCritical uint64 ) {
2023-05-19 20:38:21 +08:00
if path == "" {
return
}
2021-01-19 08:26:42 +00:00
for {
freeSpace , err := getFreeDiskSpace ( path )
if err != nil {
log . Warn ( "Failed to get free disk space" , "path" , path , "err" , err )
break
}
if freeSpace < freeDiskSpaceCritical {
2022-11-20 23:18:18 -10:00
log . Error ( "Low disk space. Gracefully shutting down Geth to prevent database corruption." , "available" , common . StorageSize ( freeSpace ) , "path" , path )
2021-01-19 08:26:42 +00:00
sigc <- syscall . SIGTERM
break
} else if freeSpace < 2 * freeDiskSpaceCritical {
2022-11-20 23:18:18 -10:00
log . Warn ( "Disk space is running low. Geth will shutdown if disk space runs below critical level." , "available" , common . StorageSize ( freeSpace ) , "critical_level" , common . StorageSize ( freeDiskSpaceCritical ) , "path" , path )
2021-01-19 08:26:42 +00:00
}
2022-05-03 12:12:40 +02:00
time . Sleep ( 30 * time . Second )
2021-01-19 08:26:42 +00:00
}
}
2015-08-31 17:09:50 +02:00
func ImportChain ( chain * core . BlockChain , fn string ) error {
2015-05-27 16:02:08 +02:00
// Watch for Ctrl-C while the import is running.
// If a signal is received, the import will stop at the next batch.
interrupt := make ( chan os . Signal , 1 )
stop := make ( chan struct { } )
2018-02-20 14:33:34 +02:00
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
2015-05-27 16:02:08 +02:00
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
2017-03-02 15:06:16 +02:00
log . Info ( "Interrupted during import, stopping at next batch" )
2015-05-27 16:02:08 +02:00
}
close ( stop )
} ( )
checkInterrupt := func ( ) bool {
select {
case <- stop :
return true
default :
return false
}
}
2017-03-02 15:06:16 +02:00
log . Info ( "Importing blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially unwrap the gzip stream
2015-05-27 16:02:08 +02:00
fh , err := os . Open ( fn )
2014-12-23 15:37:03 +01:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 15:08:23 +00:00
var reader io . Reader = fh
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
2015-04-13 10:13:52 +02:00
2015-05-27 17:35:08 +02:00
// Run actual the import.
2015-05-28 01:16:57 +02:00
blocks := make ( types . Blocks , importBatchSize )
2015-05-27 13:29:34 +02:00
n := 0
2015-05-27 17:35:08 +02:00
for batch := 0 ; ; batch ++ {
2015-05-27 13:29:34 +02:00
// Load a batch of RLP blocks.
2015-05-27 16:02:08 +02:00
if checkInterrupt ( ) {
2023-05-24 18:21:29 +08:00
return errors . New ( "interrupted" )
2015-05-27 16:02:08 +02:00
}
2015-05-27 13:29:34 +02:00
i := 0
2015-05-28 01:16:57 +02:00
for ; i < importBatchSize ; i ++ {
2015-05-27 13:29:34 +02:00
var b types . Block
if err := stream . Decode ( & b ) ; err == io . EOF {
break
} else if err != nil {
return fmt . Errorf ( "at block %d: %v" , n , err )
2015-04-13 10:13:52 +02:00
}
2015-08-03 17:48:24 +02:00
// don't import first block
if b . NumberU64 ( ) == 0 {
i --
continue
}
2015-05-27 13:29:34 +02:00
blocks [ i ] = & b
n ++
2015-04-13 10:13:52 +02:00
}
2015-05-27 13:29:34 +02:00
if i == 0 {
break
}
// Import the batch.
2015-05-27 16:02:08 +02:00
if checkInterrupt ( ) {
2023-05-24 18:21:29 +08:00
return errors . New ( "interrupted" )
2015-05-27 16:02:08 +02:00
}
2018-02-05 18:40:32 +02:00
missing := missingBlocks ( chain , blocks [ : i ] )
if len ( missing ) == 0 {
2017-03-02 15:06:16 +02:00
log . Info ( "Skipping batch as all blocks present" , "batch" , batch , "first" , blocks [ 0 ] . Hash ( ) , "last" , blocks [ i - 1 ] . Hash ( ) )
2015-05-27 17:35:08 +02:00
continue
}
2023-05-09 09:57:42 +02:00
if failindex , err := chain . InsertChain ( missing ) ; err != nil {
var failnumber uint64
if failindex > 0 && failindex < len ( missing ) {
failnumber = missing [ failindex ] . NumberU64 ( )
} else {
failnumber = missing [ 0 ] . NumberU64 ( )
}
return fmt . Errorf ( "invalid block %d: %v" , failnumber , err )
2015-03-18 13:36:48 +01:00
}
2014-12-23 15:37:03 +01:00
}
return nil
}
2015-03-08 22:44:48 +07:00
2018-02-05 18:40:32 +02:00
func missingBlocks ( chain * core . BlockChain , blocks [ ] * types . Block ) [ ] * types . Block {
head := chain . CurrentBlock ( )
for i , block := range blocks {
// If we're behind the chain head, only check block, state is available at head
2023-03-02 08:29:15 +02:00
if head . Number . Uint64 ( ) > block . NumberU64 ( ) {
2018-02-05 18:40:32 +02:00
if ! chain . HasBlock ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
}
continue
}
// If we're above the chain head, state availability is a must
if ! chain . HasBlockAndState ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
2015-05-27 17:35:08 +02:00
}
}
2018-02-05 18:40:32 +02:00
return nil
2015-05-27 17:35:08 +02:00
}
2018-03-26 13:34:21 +03:00
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
2015-08-31 17:09:50 +02:00
func ExportChain ( blockchain * core . BlockChain , fn string ) error {
2017-03-02 15:06:16 +02:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially wrap with a gzip stream
2015-03-18 14:04:19 +01:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
2015-03-18 13:36:48 +01:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 15:08:23 +00:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 13:34:21 +03:00
// Iterate over the blocks and export them
2016-12-12 15:08:23 +00:00
if err := blockchain . Export ( writer ) ; err != nil {
2015-03-08 22:44:48 +07:00
return err
}
2017-03-02 15:06:16 +02:00
log . Info ( "Exported blockchain" , "file" , fn )
2016-12-12 15:08:23 +00:00
2015-03-08 22:44:48 +07:00
return nil
}
2015-06-06 00:02:32 -04:00
2018-03-26 13:34:21 +03:00
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
2015-08-31 17:09:50 +02:00
func ExportAppendChain ( blockchain * core . BlockChain , fn string , first uint64 , last uint64 ) error {
2017-03-02 15:06:16 +02:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially wrap with a gzip stream
2015-06-06 00:02:32 -04:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_APPEND | os . O_WRONLY , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 15:08:23 +00:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 13:34:21 +03:00
// Iterate over the blocks and export them
2016-12-12 15:08:23 +00:00
if err := blockchain . ExportN ( writer , first , last ) ; err != nil {
2015-06-06 00:02:32 -04:00
return err
}
2017-03-02 15:06:16 +02:00
log . Info ( "Exported blockchain to" , "file" , fn )
2015-06-06 00:02:32 -04:00
return nil
}
2018-03-26 13:34:21 +03:00
// ImportPreimages imports a batch of exported hash preimages into the database.
2021-11-02 18:31:45 +08:00
// It's a part of the deprecated functionality, should be removed in the future.
2018-09-24 15:57:49 +03:00
func ImportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 13:34:21 +03:00
log . Info ( "Importing preimages" , "file" , fn )
// Open the file handle and potentially unwrap the gzip stream
fh , err := os . Open ( fn )
if err != nil {
return err
}
defer fh . Close ( )
2021-11-02 18:31:45 +08:00
var reader io . Reader = bufio . NewReader ( fh )
2018-03-26 13:34:21 +03:00
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
2021-11-02 18:31:45 +08:00
// Import the preimages in batches to prevent disk thrashing
2018-03-26 13:34:21 +03:00
preimages := make ( map [ common . Hash ] [ ] byte )
for {
// Read the next entry and ensure it's not junk
var blob [ ] byte
if err := stream . Decode ( & blob ) ; err != nil {
if err == io . EOF {
break
}
return err
}
// Accumulate the preimages and flush when enough ws gathered
preimages [ crypto . Keccak256Hash ( blob ) ] = common . CopyBytes ( blob )
if len ( preimages ) > 1024 {
2018-11-09 18:51:07 +08:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 13:34:21 +03:00
preimages = make ( map [ common . Hash ] [ ] byte )
}
}
// Flush the last batch preimage data
if len ( preimages ) > 0 {
2018-11-09 18:51:07 +08:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 13:34:21 +03:00
}
return nil
}
// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
2021-11-02 18:31:45 +08:00
// It's a part of the deprecated functionality, should be removed in the future.
2018-09-24 15:57:49 +03:00
func ExportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 13:34:21 +03:00
log . Info ( "Exporting preimages" , "file" , fn )
// Open the file handle and potentially wrap with a gzip stream
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
// Iterate over the preimages and export them
2020-04-15 13:08:53 +02:00
it := db . NewIterator ( [ ] byte ( "secure-key-" ) , nil )
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 22:07:44 +08:00
defer it . Release ( )
2018-03-26 13:34:21 +03:00
for it . Next ( ) {
if err := rlp . Encode ( writer , it . Value ( ) ) ; err != nil {
return err
}
}
log . Info ( "Exported preimages" , "file" , fn )
return nil
}
2021-11-02 18:31:45 +08:00
2023-11-22 14:48:25 +01:00
// ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
// the snapshot for a given root.
func ExportSnapshotPreimages ( chaindb ethdb . Database , snaptree * snapshot . Tree , fn string , root common . Hash ) error {
log . Info ( "Exporting preimages" , "file" , fn )
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
// Enable gzip compressing if file name has gz suffix.
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
gz := gzip . NewWriter ( writer )
defer gz . Close ( )
writer = gz
}
buf := bufio . NewWriter ( writer )
defer buf . Flush ( )
writer = buf
type hashAndPreimageSize struct {
Hash common . Hash
Size int
}
hashCh := make ( chan hashAndPreimageSize )
var (
start = time . Now ( )
logged = time . Now ( )
preimages int
)
go func ( ) {
defer close ( hashCh )
accIt , err := snaptree . AccountIterator ( root , common . Hash { } )
if err != nil {
log . Error ( "Failed to create account iterator" , "error" , err )
return
}
defer accIt . Release ( )
for accIt . Next ( ) {
acc , err := types . FullAccount ( accIt . Account ( ) )
if err != nil {
log . Error ( "Failed to get full account" , "error" , err )
return
}
preimages += 1
hashCh <- hashAndPreimageSize { Hash : accIt . Hash ( ) , Size : common . AddressLength }
if acc . Root != ( common . Hash { } ) && acc . Root != types . EmptyRootHash {
stIt , err := snaptree . StorageIterator ( root , accIt . Hash ( ) , common . Hash { } )
if err != nil {
log . Error ( "Failed to create storage iterator" , "error" , err )
return
}
for stIt . Next ( ) {
preimages += 1
hashCh <- hashAndPreimageSize { Hash : stIt . Hash ( ) , Size : common . HashLength }
if time . Since ( logged ) > time . Second * 8 {
logged = time . Now ( )
log . Info ( "Exporting preimages" , "count" , preimages , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
}
}
stIt . Release ( )
}
if time . Since ( logged ) > time . Second * 8 {
logged = time . Now ( )
log . Info ( "Exporting preimages" , "count" , preimages , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
}
}
} ( )
for item := range hashCh {
preimage := rawdb . ReadPreimage ( chaindb , item . Hash )
if len ( preimage ) == 0 {
return fmt . Errorf ( "missing preimage for %v" , item . Hash )
}
if len ( preimage ) != item . Size {
return fmt . Errorf ( "invalid preimage size, have %d" , len ( preimage ) )
}
rlpenc , err := rlp . EncodeToBytes ( preimage )
if err != nil {
return fmt . Errorf ( "error encoding preimage: %w" , err )
}
if _ , err := writer . Write ( rlpenc ) ; err != nil {
return fmt . Errorf ( "failed to write preimage: %w" , err )
}
}
log . Info ( "Exported preimages" , "count" , preimages , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) , "file" , fn )
return nil
}
2021-11-02 18:31:45 +08:00
// exportHeader is used in the export/import flow. When we do an export,
// the first element we output is the exportHeader.
// Whenever a backwards-incompatible change is made, the Version header
// should be bumped.
// If the importer sees a higher version, it should reject the import.
type exportHeader struct {
Magic string // Always set to 'gethdbdump' for disambiguation
Version uint64
Kind string
UnixTime uint64
}
const exportMagic = "gethdbdump"
const (
OpBatchAdd = 0
OpBatchDel = 1
)
// ImportLDBData imports a batch of snapshot data into the database
func ImportLDBData ( db ethdb . Database , f string , startIndex int64 , interrupt chan struct { } ) error {
log . Info ( "Importing leveldb data" , "file" , f )
// Open the file handle and potentially unwrap the gzip stream
fh , err := os . Open ( f )
if err != nil {
return err
}
defer fh . Close ( )
var reader io . Reader = bufio . NewReader ( fh )
if strings . HasSuffix ( f , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
// Read the header
var header exportHeader
if err := stream . Decode ( & header ) ; err != nil {
return fmt . Errorf ( "could not decode header: %v" , err )
}
if header . Magic != exportMagic {
return errors . New ( "incompatible data, wrong magic" )
}
if header . Version != 0 {
return fmt . Errorf ( "incompatible version %d, (support only 0)" , header . Version )
}
log . Info ( "Importing data" , "file" , f , "type" , header . Kind , "data age" ,
common . PrettyDuration ( time . Since ( time . Unix ( int64 ( header . UnixTime ) , 0 ) ) ) )
// Import the snapshot in batches to prevent disk thrashing
var (
count int64
start = time . Now ( )
logged = time . Now ( )
batch = db . NewBatch ( )
)
for {
// Read the next entry
var (
op byte
key , val [ ] byte
)
if err := stream . Decode ( & op ) ; err != nil {
if err == io . EOF {
break
}
return err
}
if err := stream . Decode ( & key ) ; err != nil {
return err
}
if err := stream . Decode ( & val ) ; err != nil {
return err
}
if count < startIndex {
count ++
continue
}
switch op {
case OpBatchDel :
batch . Delete ( key )
case OpBatchAdd :
batch . Put ( key , val )
default :
2023-11-15 20:36:57 +08:00
return fmt . Errorf ( "unknown op %d" , op )
2021-11-02 18:31:45 +08:00
}
if batch . ValueSize ( ) > ethdb . IdealBatchSize {
if err := batch . Write ( ) ; err != nil {
return err
}
batch . Reset ( )
}
// Check interruption emitted by ctrl+c
if count % 1000 == 0 {
select {
case <- interrupt :
if err := batch . Write ( ) ; err != nil {
return err
}
log . Info ( "External data import interrupted" , "file" , f , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
default :
}
}
if count % 1000 == 0 && time . Since ( logged ) > 8 * time . Second {
log . Info ( "Importing external data" , "file" , f , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
logged = time . Now ( )
}
count += 1
}
// Flush the last batch snapshot data
if batch . ValueSize ( ) > 0 {
if err := batch . Write ( ) ; err != nil {
return err
}
}
log . Info ( "Imported chain data" , "file" , f , "count" , count ,
"elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
}
// ChainDataIterator is an interface wraps all necessary functions to iterate
// the exporting chain data.
type ChainDataIterator interface {
// Next returns the key-value pair for next exporting entry in the iterator.
// When the end is reached, it will return (0, nil, nil, false).
Next ( ) ( byte , [ ] byte , [ ] byte , bool )
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
Release ( )
}
// ExportChaindata exports the given data type (truncating any data already present)
// in the file. If the suffix is 'gz', gzip compression is used.
func ExportChaindata ( fn string , kind string , iter ChainDataIterator , interrupt chan struct { } ) error {
log . Info ( "Exporting chain data" , "file" , fn , "kind" , kind )
defer iter . Release ( )
// Open the file handle and potentially wrap with a gzip stream
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
// Write the header
if err := rlp . Encode ( writer , & exportHeader {
Magic : exportMagic ,
Version : 0 ,
Kind : kind ,
UnixTime : uint64 ( time . Now ( ) . Unix ( ) ) ,
} ) ; err != nil {
return err
}
// Extract data from source iterator and dump them out to file
var (
count int64
start = time . Now ( )
logged = time . Now ( )
)
for {
op , key , val , ok := iter . Next ( )
if ! ok {
break
}
if err := rlp . Encode ( writer , op ) ; err != nil {
return err
}
if err := rlp . Encode ( writer , key ) ; err != nil {
return err
}
if err := rlp . Encode ( writer , val ) ; err != nil {
return err
}
if count % 1000 == 0 {
// Check interruption emitted by ctrl+c
select {
case <- interrupt :
log . Info ( "Chain data exporting interrupted" , "file" , fn ,
"kind" , kind , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
default :
}
if time . Since ( logged ) > 8 * time . Second {
log . Info ( "Exporting chain data" , "file" , fn , "kind" , kind ,
"count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
logged = time . Now ( )
}
}
count ++
}
log . Info ( "Exported chain data" , "file" , fn , "kind" , kind , "count" , count ,
"elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
}