2015-07-07 03:54:22 +03:00
// Copyright 2014 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 19:48:40 +03:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 03:54:22 +03:00
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
2015-07-22 19:48:40 +03:00
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
2015-01-06 13:13:57 +02:00
2015-07-07 06:08:16 +03:00
// Package utils contains internal helper functions for go-ethereum commands.
2014-05-14 13:41:30 +03:00
package utils
import (
2021-11-02 13:31:45 +03:00
"bufio"
2016-12-12 18:08:23 +03:00
"compress/gzip"
2021-11-02 13:31:45 +03:00
"errors"
2014-06-26 20:41:36 +03:00
"fmt"
2015-03-18 14:36:48 +02:00
"io"
2014-08-15 02:07:40 +03:00
"os"
"os/signal"
2016-09-26 18:23:26 +03:00
"runtime"
2016-12-12 18:08:23 +03:00
"strings"
2018-02-20 15:33:34 +03:00
"syscall"
2021-01-19 11:26:42 +03:00
"time"
2014-08-15 02:07:40 +03:00
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/common"
2015-03-06 04:00:41 +02:00
"github.com/ethereum/go-ethereum/core"
2018-05-07 14:35:06 +03:00
"github.com/ethereum/go-ethereum/core/rawdb"
2014-12-23 16:37:03 +02:00
"github.com/ethereum/go-ethereum/core/types"
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/crypto"
2021-02-05 15:51:15 +03:00
"github.com/ethereum/go-ethereum/eth/ethconfig"
2018-03-26 13:34:21 +03:00
"github.com/ethereum/go-ethereum/ethdb"
2016-03-12 01:39:45 +02:00
"github.com/ethereum/go-ethereum/internal/debug"
2017-02-22 15:10:07 +03:00
"github.com/ethereum/go-ethereum/log"
2015-11-17 18:33:25 +02:00
"github.com/ethereum/go-ethereum/node"
2014-12-23 16:37:03 +02:00
"github.com/ethereum/go-ethereum/rlp"
2021-01-19 11:26:42 +03:00
"gopkg.in/urfave/cli.v1"
2014-05-14 13:41:30 +03:00
)
2015-05-28 02:16:57 +03:00
const (
importBatchSize = 2500
)
2015-05-27 16:48:07 +03:00
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
2015-03-06 04:00:41 +02:00
func Fatalf ( format string , args ... interface { } ) {
2015-05-27 16:48:07 +03:00
w := io . MultiWriter ( os . Stdout , os . Stderr )
2016-09-26 18:23:26 +03:00
if runtime . GOOS == "windows" {
// The SameFile check below doesn't work on Windows.
// stdout is unlikely to get redirected though, so just print there.
w = os . Stdout
} else {
outf , _ := os . Stdout . Stat ( )
errf , _ := os . Stderr . Stat ( )
if outf != nil && errf != nil && os . SameFile ( outf , errf ) {
w = os . Stderr
}
2015-05-27 16:48:07 +03:00
}
fmt . Fprintf ( w , "Fatal: " + format + "\n" , args ... )
2015-03-06 04:00:41 +02:00
os . Exit ( 1 )
}
2021-01-19 11:26:42 +03:00
func StartNode ( ctx * cli . Context , stack * node . Node ) {
2015-11-17 18:33:25 +02:00
if err := stack . Start ( ) ; err != nil {
2017-02-22 18:22:50 +03:00
Fatalf ( "Error starting protocol stack: %v" , err )
2015-01-05 18:12:52 +02:00
}
2015-07-06 16:01:13 +03:00
go func ( ) {
sigc := make ( chan os . Signal , 1 )
2018-02-20 15:33:34 +03:00
signal . Notify ( sigc , syscall . SIGINT , syscall . SIGTERM )
2015-07-06 16:01:13 +03:00
defer signal . Stop ( sigc )
2021-01-19 11:26:42 +03:00
2021-02-05 15:51:15 +03:00
minFreeDiskSpace := ethconfig . Defaults . TrieDirtyCache
2021-01-19 11:26:42 +03:00
if ctx . GlobalIsSet ( MinFreeDiskSpaceFlag . Name ) {
minFreeDiskSpace = ctx . GlobalInt ( MinFreeDiskSpaceFlag . Name )
} else if ctx . GlobalIsSet ( CacheFlag . Name ) || ctx . GlobalIsSet ( CacheGCFlag . Name ) {
minFreeDiskSpace = ctx . GlobalInt ( CacheFlag . Name ) * ctx . GlobalInt ( CacheGCFlag . Name ) / 100
}
if minFreeDiskSpace > 0 {
go monitorFreeDiskSpace ( sigc , stack . InstanceDir ( ) , uint64 ( minFreeDiskSpace ) * 1024 * 1024 )
}
2015-07-06 16:01:13 +03:00
<- sigc
2017-03-02 16:06:16 +03:00
log . Info ( "Got interrupt, shutting down..." )
2020-08-03 20:40:46 +03:00
go stack . Close ( )
2015-07-06 16:01:13 +03:00
for i := 10 ; i > 0 ; i -- {
<- sigc
if i > 1 {
2017-03-02 16:06:16 +03:00
log . Warn ( "Already shutting down, interrupt more to panic." , "times" , i - 1 )
2015-07-06 16:01:13 +03:00
}
}
2016-05-06 12:04:52 +03:00
debug . Exit ( ) // ensure trace and CPU profile data is flushed.
2016-03-12 01:39:45 +02:00
debug . LoudPanic ( "boom" )
2015-07-06 16:01:13 +03:00
} ( )
2015-03-06 04:25:57 +02:00
}
2021-01-19 11:26:42 +03:00
func monitorFreeDiskSpace ( sigc chan os . Signal , path string , freeDiskSpaceCritical uint64 ) {
for {
freeSpace , err := getFreeDiskSpace ( path )
if err != nil {
log . Warn ( "Failed to get free disk space" , "path" , path , "err" , err )
break
}
if freeSpace < freeDiskSpaceCritical {
log . Error ( "Low disk space. Gracefully shutting down Geth to prevent database corruption." , "available" , common . StorageSize ( freeSpace ) )
sigc <- syscall . SIGTERM
break
} else if freeSpace < 2 * freeDiskSpaceCritical {
log . Warn ( "Disk space is running low. Geth will shutdown if disk space runs below critical level." , "available" , common . StorageSize ( freeSpace ) , "critical_level" , common . StorageSize ( freeDiskSpaceCritical ) )
}
time . Sleep ( 60 * time . Second )
}
}
2015-08-31 18:09:50 +03:00
func ImportChain ( chain * core . BlockChain , fn string ) error {
2015-05-27 17:02:08 +03:00
// Watch for Ctrl-C while the import is running.
// If a signal is received, the import will stop at the next batch.
interrupt := make ( chan os . Signal , 1 )
stop := make ( chan struct { } )
2018-02-20 15:33:34 +03:00
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
2015-05-27 17:02:08 +03:00
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
2017-03-02 16:06:16 +03:00
log . Info ( "Interrupted during import, stopping at next batch" )
2015-05-27 17:02:08 +03:00
}
close ( stop )
} ( )
checkInterrupt := func ( ) bool {
select {
case <- stop :
return true
default :
return false
}
}
2017-03-02 16:06:16 +03:00
log . Info ( "Importing blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially unwrap the gzip stream
2015-05-27 17:02:08 +03:00
fh , err := os . Open ( fn )
2014-12-23 16:37:03 +02:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 18:08:23 +03:00
var reader io . Reader = fh
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
2015-04-13 11:13:52 +03:00
2015-05-27 18:35:08 +03:00
// Run actual the import.
2015-05-28 02:16:57 +03:00
blocks := make ( types . Blocks , importBatchSize )
2015-05-27 14:29:34 +03:00
n := 0
2015-05-27 18:35:08 +03:00
for batch := 0 ; ; batch ++ {
2015-05-27 14:29:34 +03:00
// Load a batch of RLP blocks.
2015-05-27 17:02:08 +03:00
if checkInterrupt ( ) {
return fmt . Errorf ( "interrupted" )
}
2015-05-27 14:29:34 +03:00
i := 0
2015-05-28 02:16:57 +03:00
for ; i < importBatchSize ; i ++ {
2015-05-27 14:29:34 +03:00
var b types . Block
if err := stream . Decode ( & b ) ; err == io . EOF {
break
} else if err != nil {
return fmt . Errorf ( "at block %d: %v" , n , err )
2015-04-13 11:13:52 +03:00
}
2015-08-03 18:48:24 +03:00
// don't import first block
if b . NumberU64 ( ) == 0 {
i --
continue
}
2015-05-27 14:29:34 +03:00
blocks [ i ] = & b
n ++
2015-04-13 11:13:52 +03:00
}
2015-05-27 14:29:34 +03:00
if i == 0 {
break
}
// Import the batch.
2015-05-27 17:02:08 +03:00
if checkInterrupt ( ) {
return fmt . Errorf ( "interrupted" )
}
2018-02-05 19:40:32 +03:00
missing := missingBlocks ( chain , blocks [ : i ] )
if len ( missing ) == 0 {
2017-03-02 16:06:16 +03:00
log . Info ( "Skipping batch as all blocks present" , "batch" , batch , "first" , blocks [ 0 ] . Hash ( ) , "last" , blocks [ i - 1 ] . Hash ( ) )
2015-05-27 18:35:08 +03:00
continue
}
2018-02-05 19:40:32 +03:00
if _ , err := chain . InsertChain ( missing ) ; err != nil {
2015-05-27 14:29:34 +03:00
return fmt . Errorf ( "invalid block %d: %v" , n , err )
2015-03-18 14:36:48 +02:00
}
2014-12-23 16:37:03 +02:00
}
return nil
}
2015-03-08 17:44:48 +02:00
2018-02-05 19:40:32 +03:00
func missingBlocks ( chain * core . BlockChain , blocks [ ] * types . Block ) [ ] * types . Block {
head := chain . CurrentBlock ( )
for i , block := range blocks {
// If we're behind the chain head, only check block, state is available at head
if head . NumberU64 ( ) > block . NumberU64 ( ) {
if ! chain . HasBlock ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
}
continue
}
// If we're above the chain head, state availability is a must
if ! chain . HasBlockAndState ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
2015-05-27 18:35:08 +03:00
}
}
2018-02-05 19:40:32 +03:00
return nil
2015-05-27 18:35:08 +03:00
}
2018-03-26 13:34:21 +03:00
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
2015-08-31 18:09:50 +03:00
func ExportChain ( blockchain * core . BlockChain , fn string ) error {
2017-03-02 16:06:16 +03:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially wrap with a gzip stream
2015-03-18 15:04:19 +02:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
2015-03-18 14:36:48 +02:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 18:08:23 +03:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 13:34:21 +03:00
// Iterate over the blocks and export them
2016-12-12 18:08:23 +03:00
if err := blockchain . Export ( writer ) ; err != nil {
2015-03-08 17:44:48 +02:00
return err
}
2017-03-02 16:06:16 +03:00
log . Info ( "Exported blockchain" , "file" , fn )
2016-12-12 18:08:23 +03:00
2015-03-08 17:44:48 +02:00
return nil
}
2015-06-06 07:02:32 +03:00
2018-03-26 13:34:21 +03:00
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
2015-08-31 18:09:50 +03:00
func ExportAppendChain ( blockchain * core . BlockChain , fn string , first uint64 , last uint64 ) error {
2017-03-02 16:06:16 +03:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 13:34:21 +03:00
// Open the file handle and potentially wrap with a gzip stream
2015-06-06 07:02:32 +03:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_APPEND | os . O_WRONLY , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 18:08:23 +03:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 13:34:21 +03:00
// Iterate over the blocks and export them
2016-12-12 18:08:23 +03:00
if err := blockchain . ExportN ( writer , first , last ) ; err != nil {
2015-06-06 07:02:32 +03:00
return err
}
2017-03-02 16:06:16 +03:00
log . Info ( "Exported blockchain to" , "file" , fn )
2015-06-06 07:02:32 +03:00
return nil
}
2018-03-26 13:34:21 +03:00
// ImportPreimages imports a batch of exported hash preimages into the database.
2021-11-02 13:31:45 +03:00
// It's a part of the deprecated functionality, should be removed in the future.
2018-09-24 15:57:49 +03:00
func ImportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 13:34:21 +03:00
log . Info ( "Importing preimages" , "file" , fn )
// Open the file handle and potentially unwrap the gzip stream
fh , err := os . Open ( fn )
if err != nil {
return err
}
defer fh . Close ( )
2021-11-02 13:31:45 +03:00
var reader io . Reader = bufio . NewReader ( fh )
2018-03-26 13:34:21 +03:00
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
2021-11-02 13:31:45 +03:00
// Import the preimages in batches to prevent disk thrashing
2018-03-26 13:34:21 +03:00
preimages := make ( map [ common . Hash ] [ ] byte )
for {
// Read the next entry and ensure it's not junk
var blob [ ] byte
if err := stream . Decode ( & blob ) ; err != nil {
if err == io . EOF {
break
}
return err
}
// Accumulate the preimages and flush when enough ws gathered
preimages [ crypto . Keccak256Hash ( blob ) ] = common . CopyBytes ( blob )
if len ( preimages ) > 1024 {
2018-11-09 13:51:07 +03:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 13:34:21 +03:00
preimages = make ( map [ common . Hash ] [ ] byte )
}
}
// Flush the last batch preimage data
if len ( preimages ) > 0 {
2018-11-09 13:51:07 +03:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 13:34:21 +03:00
}
return nil
}
// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
2021-11-02 13:31:45 +03:00
// It's a part of the deprecated functionality, should be removed in the future.
2018-09-24 15:57:49 +03:00
func ExportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 13:34:21 +03:00
log . Info ( "Exporting preimages" , "file" , fn )
// Open the file handle and potentially wrap with a gzip stream
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
// Iterate over the preimages and export them
2020-04-15 14:08:53 +03:00
it := db . NewIterator ( [ ] byte ( "secure-key-" ) , nil )
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 17:07:44 +03:00
defer it . Release ( )
2018-03-26 13:34:21 +03:00
for it . Next ( ) {
if err := rlp . Encode ( writer , it . Value ( ) ) ; err != nil {
return err
}
}
log . Info ( "Exported preimages" , "file" , fn )
return nil
}
2021-11-02 13:31:45 +03:00
// exportHeader is used in the export/import flow. When we do an export,
// the first element we output is the exportHeader.
// Whenever a backwards-incompatible change is made, the Version header
// should be bumped.
// If the importer sees a higher version, it should reject the import.
type exportHeader struct {
Magic string // Always set to 'gethdbdump' for disambiguation
Version uint64
Kind string
UnixTime uint64
}
const exportMagic = "gethdbdump"
const (
OpBatchAdd = 0
OpBatchDel = 1
)
// ImportLDBData imports a batch of snapshot data into the database
func ImportLDBData ( db ethdb . Database , f string , startIndex int64 , interrupt chan struct { } ) error {
log . Info ( "Importing leveldb data" , "file" , f )
// Open the file handle and potentially unwrap the gzip stream
fh , err := os . Open ( f )
if err != nil {
return err
}
defer fh . Close ( )
var reader io . Reader = bufio . NewReader ( fh )
if strings . HasSuffix ( f , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
// Read the header
var header exportHeader
if err := stream . Decode ( & header ) ; err != nil {
return fmt . Errorf ( "could not decode header: %v" , err )
}
if header . Magic != exportMagic {
return errors . New ( "incompatible data, wrong magic" )
}
if header . Version != 0 {
return fmt . Errorf ( "incompatible version %d, (support only 0)" , header . Version )
}
log . Info ( "Importing data" , "file" , f , "type" , header . Kind , "data age" ,
common . PrettyDuration ( time . Since ( time . Unix ( int64 ( header . UnixTime ) , 0 ) ) ) )
// Import the snapshot in batches to prevent disk thrashing
var (
count int64
start = time . Now ( )
logged = time . Now ( )
batch = db . NewBatch ( )
)
for {
// Read the next entry
var (
op byte
key , val [ ] byte
)
if err := stream . Decode ( & op ) ; err != nil {
if err == io . EOF {
break
}
return err
}
if err := stream . Decode ( & key ) ; err != nil {
return err
}
if err := stream . Decode ( & val ) ; err != nil {
return err
}
if count < startIndex {
count ++
continue
}
switch op {
case OpBatchDel :
batch . Delete ( key )
case OpBatchAdd :
batch . Put ( key , val )
default :
return fmt . Errorf ( "unknown op %d\n" , op )
}
if batch . ValueSize ( ) > ethdb . IdealBatchSize {
if err := batch . Write ( ) ; err != nil {
return err
}
batch . Reset ( )
}
// Check interruption emitted by ctrl+c
if count % 1000 == 0 {
select {
case <- interrupt :
if err := batch . Write ( ) ; err != nil {
return err
}
log . Info ( "External data import interrupted" , "file" , f , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
default :
}
}
if count % 1000 == 0 && time . Since ( logged ) > 8 * time . Second {
log . Info ( "Importing external data" , "file" , f , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
logged = time . Now ( )
}
count += 1
}
// Flush the last batch snapshot data
if batch . ValueSize ( ) > 0 {
if err := batch . Write ( ) ; err != nil {
return err
}
}
log . Info ( "Imported chain data" , "file" , f , "count" , count ,
"elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
}
// ChainDataIterator is an interface wraps all necessary functions to iterate
// the exporting chain data.
type ChainDataIterator interface {
// Next returns the key-value pair for next exporting entry in the iterator.
// When the end is reached, it will return (0, nil, nil, false).
Next ( ) ( byte , [ ] byte , [ ] byte , bool )
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
Release ( )
}
// ExportChaindata exports the given data type (truncating any data already present)
// in the file. If the suffix is 'gz', gzip compression is used.
func ExportChaindata ( fn string , kind string , iter ChainDataIterator , interrupt chan struct { } ) error {
log . Info ( "Exporting chain data" , "file" , fn , "kind" , kind )
defer iter . Release ( )
// Open the file handle and potentially wrap with a gzip stream
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
// Write the header
if err := rlp . Encode ( writer , & exportHeader {
Magic : exportMagic ,
Version : 0 ,
Kind : kind ,
UnixTime : uint64 ( time . Now ( ) . Unix ( ) ) ,
} ) ; err != nil {
return err
}
// Extract data from source iterator and dump them out to file
var (
count int64
start = time . Now ( )
logged = time . Now ( )
)
for {
op , key , val , ok := iter . Next ( )
if ! ok {
break
}
if err := rlp . Encode ( writer , op ) ; err != nil {
return err
}
if err := rlp . Encode ( writer , key ) ; err != nil {
return err
}
if err := rlp . Encode ( writer , val ) ; err != nil {
return err
}
if count % 1000 == 0 {
// Check interruption emitted by ctrl+c
select {
case <- interrupt :
log . Info ( "Chain data exporting interrupted" , "file" , fn ,
"kind" , kind , "count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
default :
}
if time . Since ( logged ) > 8 * time . Second {
log . Info ( "Exporting chain data" , "file" , fn , "kind" , kind ,
"count" , count , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
logged = time . Now ( )
}
}
count ++
}
log . Info ( "Exported chain data" , "file" , fn , "kind" , kind , "count" , count ,
"elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
return nil
}