2015-11-06 00:57:57 +03:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package node
|
|
|
|
|
|
|
|
import (
|
2022-03-07 10:30:27 +03:00
|
|
|
crand "crypto/rand"
|
2015-11-06 00:57:57 +03:00
|
|
|
"errors"
|
2017-02-22 15:10:07 +03:00
|
|
|
"fmt"
|
2022-06-15 15:35:53 +03:00
|
|
|
"hash/crc32"
|
2020-04-27 12:16:00 +03:00
|
|
|
"net/http"
|
2015-11-06 00:57:57 +03:00
|
|
|
"os"
|
2020-05-20 06:46:45 +03:00
|
|
|
"path"
|
2015-11-06 00:57:57 +03:00
|
|
|
"path/filepath"
|
2015-11-17 18:33:25 +02:00
|
|
|
"reflect"
|
2016-08-18 14:28:17 +03:00
|
|
|
"strings"
|
2015-11-06 00:57:57 +03:00
|
|
|
"sync"
|
|
|
|
|
2016-08-15 19:38:32 +03:00
|
|
|
"github.com/ethereum/go-ethereum/accounts"
|
2022-03-07 10:30:27 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
2018-09-24 15:57:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2016-08-18 14:28:17 +03:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2022-07-05 06:14:21 +03:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
2015-11-06 00:57:57 +03:00
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2017-02-22 15:10:07 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2015-11-06 00:57:57 +03:00
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
2015-12-16 11:58:01 +02:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2023-02-23 10:11:50 +03:00
|
|
|
"github.com/gofrs/flock"
|
2015-11-06 00:57:57 +03:00
|
|
|
)
|
|
|
|
|
2016-08-18 14:28:17 +03:00
|
|
|
// Node is a container on which services can be registered.
|
2015-11-06 00:57:57 +03:00
|
|
|
type Node struct {
|
2020-08-03 20:40:46 +03:00
|
|
|
eventmux *event.TypeMux
|
|
|
|
config *Config
|
|
|
|
accman *accounts.Manager
|
|
|
|
log log.Logger
|
2023-02-23 10:11:50 +03:00
|
|
|
keyDir string // key store directory
|
|
|
|
keyDirTemp bool // If true, key directory will be removed by Stop
|
|
|
|
dirLock *flock.Flock // prevents concurrent use of instance directory
|
|
|
|
stop chan struct{} // Channel to wait for termination notifications
|
|
|
|
server *p2p.Server // Currently running P2P networking layer
|
|
|
|
startStopLock sync.Mutex // Start/Stop are protected by an additional lock
|
|
|
|
state int // Tracks state of node lifecycle
|
2020-08-03 20:40:46 +03:00
|
|
|
|
|
|
|
lock sync.Mutex
|
|
|
|
lifecycles []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle
|
2016-02-09 14:10:40 +02:00
|
|
|
rpcAPIs []rpc.API // List of APIs currently provided by the node
|
2020-08-03 20:40:46 +03:00
|
|
|
http *httpServer //
|
|
|
|
ws *httpServer //
|
2022-03-07 10:30:27 +03:00
|
|
|
httpAuth *httpServer //
|
|
|
|
wsAuth *httpServer //
|
2020-08-03 20:40:46 +03:00
|
|
|
ipc *ipcServer // Stores information about the ipc http server
|
2016-02-09 14:10:40 +02:00
|
|
|
inprocHandler *rpc.Server // In-process RPC request handler to process the API requests
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
databases map[*closeTrackingDB]struct{} // All open databases
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
const (
|
|
|
|
initializingState = iota
|
|
|
|
runningState
|
|
|
|
closedState
|
|
|
|
)
|
|
|
|
|
2022-07-05 06:14:21 +03:00
|
|
|
const chainDataHandlesPercentage = 80
|
|
|
|
|
2015-11-06 00:57:57 +03:00
|
|
|
// New creates a new P2P node, ready for protocol registration.
|
|
|
|
func New(conf *Config) (*Node, error) {
|
2016-08-18 14:28:17 +03:00
|
|
|
// Copy config and resolve the datadir so future changes to the current
|
|
|
|
// working directory don't affect the node.
|
|
|
|
confCopy := *conf
|
|
|
|
conf = &confCopy
|
2015-11-06 00:57:57 +03:00
|
|
|
if conf.DataDir != "" {
|
2016-08-18 14:28:17 +03:00
|
|
|
absdatadir, err := filepath.Abs(conf.DataDir)
|
|
|
|
if err != nil {
|
2015-11-06 00:57:57 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-08-18 14:28:17 +03:00
|
|
|
conf.DataDir = absdatadir
|
|
|
|
}
|
2021-04-16 07:45:26 +03:00
|
|
|
if conf.LogConfig != nil {
|
2023-04-03 05:22:49 +03:00
|
|
|
if conf.LogConfig.TermTimeFormat != nil && *conf.LogConfig.TermTimeFormat != "" {
|
|
|
|
log.SetTermTimeFormat(*conf.LogConfig.TermTimeFormat)
|
|
|
|
}
|
|
|
|
|
|
|
|
if conf.LogConfig.TimeFormat != nil && *conf.LogConfig.TimeFormat != "" {
|
|
|
|
log.SetTimeFormat(*conf.LogConfig.TimeFormat)
|
|
|
|
}
|
|
|
|
|
|
|
|
if conf.LogConfig.FileRoot != nil && conf.LogConfig.FilePath != nil &&
|
|
|
|
conf.LogConfig.MaxBytesSize != nil && conf.LogConfig.Level != nil {
|
|
|
|
// log to file
|
|
|
|
logFilePath := ""
|
|
|
|
if *conf.LogConfig.FileRoot == "" {
|
|
|
|
logFilePath = path.Join(conf.DataDir, *conf.LogConfig.FilePath)
|
|
|
|
} else {
|
|
|
|
logFilePath = path.Join(*conf.LogConfig.FileRoot, *conf.LogConfig.FilePath)
|
|
|
|
}
|
2023-06-30 18:09:26 +03:00
|
|
|
|
2023-12-14 05:02:26 +03:00
|
|
|
rotateHours := uint(1) // To maintain backwards compatibility, if RotateHours is not set, then it defaults to 1
|
|
|
|
if conf.LogConfig.RotateHours != nil {
|
2023-12-14 05:58:37 +03:00
|
|
|
if *conf.LogConfig.RotateHours > 23 {
|
|
|
|
return nil, errors.New("Config.LogConfig.RotateHours cannot be greater than 23")
|
2023-12-14 05:02:26 +03:00
|
|
|
}
|
2023-06-30 18:09:26 +03:00
|
|
|
|
2023-12-14 05:02:26 +03:00
|
|
|
rotateHours = *conf.LogConfig.RotateHours
|
2023-06-30 18:09:26 +03:00
|
|
|
}
|
|
|
|
|
2024-01-30 14:16:01 +03:00
|
|
|
maxBackups := uint(0)
|
|
|
|
if conf.LogConfig.MaxBackups != nil {
|
|
|
|
maxBackups = *conf.LogConfig.MaxBackups
|
|
|
|
}
|
|
|
|
|
2024-02-02 10:43:33 +03:00
|
|
|
log.SetDefault(log.NewLogger(log.RotatingFileHandler(logFilePath, *conf.LogConfig.MaxBytesSize, maxBackups, *conf.LogConfig.Level, rotateHours)))
|
2021-04-16 07:45:26 +03:00
|
|
|
}
|
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
if conf.Logger == nil {
|
|
|
|
conf.Logger = log.New()
|
|
|
|
}
|
|
|
|
|
2016-08-18 14:28:17 +03:00
|
|
|
// Ensure that the instance name doesn't cause weird conflicts with
|
|
|
|
// other files in the data directory.
|
|
|
|
if strings.ContainsAny(conf.Name, `/\`) {
|
|
|
|
return nil, errors.New(`Config.Name must not contain '/' or '\'`)
|
|
|
|
}
|
|
|
|
if conf.Name == datadirDefaultKeyStore {
|
|
|
|
return nil, errors.New(`Config.Name cannot be "` + datadirDefaultKeyStore + `"`)
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
2016-08-18 14:28:17 +03:00
|
|
|
if strings.HasSuffix(conf.Name, ".ipc") {
|
|
|
|
return nil, errors.New(`Config.Name cannot end in ".ipc"`)
|
|
|
|
}
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
server := rpc.NewServer()
|
|
|
|
server.SetBatchLimits(conf.BatchRequestLimit, conf.BatchResponseMaxSize)
|
2020-08-03 20:40:46 +03:00
|
|
|
node := &Node{
|
|
|
|
config: conf,
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
inprocHandler: server,
|
2020-08-03 20:40:46 +03:00
|
|
|
eventmux: new(event.TypeMux),
|
|
|
|
log: conf.Logger,
|
|
|
|
stop: make(chan struct{}),
|
|
|
|
server: &p2p.Server{Config: conf.P2P},
|
|
|
|
databases: make(map[*closeTrackingDB]struct{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register built-in APIs.
|
|
|
|
node.rpcAPIs = append(node.rpcAPIs, node.apis()...)
|
|
|
|
|
|
|
|
// Acquire the instance directory lock.
|
|
|
|
if err := node.openDataDir(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-04-27 13:57:29 +03:00
|
|
|
keyDir, isEphem, err := conf.GetKeyStoreDir()
|
2016-08-15 19:38:32 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-08-25 23:34:22 +03:00
|
|
|
node.keyDir = keyDir
|
|
|
|
node.keyDirTemp = isEphem
|
|
|
|
// Creates an empty AccountManager with no backends. Callers (e.g. cmd/geth)
|
|
|
|
// are required to add the backends later on.
|
|
|
|
node.accman = accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: conf.InsecureUnlockAllowed})
|
2020-08-03 20:40:46 +03:00
|
|
|
|
|
|
|
// Initialize the p2p server. This creates the node key and discovery databases.
|
|
|
|
node.server.Config.PrivateKey = node.config.NodeKey()
|
|
|
|
node.server.Config.Name = node.config.NodeName()
|
|
|
|
node.server.Config.Logger = node.log
|
2022-10-12 11:50:12 +03:00
|
|
|
node.config.checkLegacyFiles()
|
2020-08-03 20:40:46 +03:00
|
|
|
if node.server.Config.NodeDatabase == "" {
|
|
|
|
node.server.Config.NodeDatabase = node.config.NodeDB()
|
|
|
|
}
|
|
|
|
|
2021-02-02 12:05:46 +03:00
|
|
|
// Check HTTP/WS prefixes are valid.
|
|
|
|
if err := validatePrefix("HTTP", conf.HTTPPathPrefix); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := validatePrefix("WebSocket", conf.WSPathPrefix); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// Configure RPC servers.
|
|
|
|
node.http = newHTTPServer(node.log, conf.HTTPTimeouts)
|
2022-03-07 10:30:27 +03:00
|
|
|
node.httpAuth = newHTTPServer(node.log, conf.HTTPTimeouts)
|
2020-08-03 20:40:46 +03:00
|
|
|
node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
|
2022-03-07 10:30:27 +03:00
|
|
|
node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
|
2020-08-03 20:40:46 +03:00
|
|
|
node.ipc = newIPCServer(node.log, conf.IPCEndpoint())
|
|
|
|
|
|
|
|
return node, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start starts all registered lifecycles, RPC services and p2p networking.
|
|
|
|
// Node can only be started once.
|
|
|
|
func (n *Node) Start() error {
|
|
|
|
n.startStopLock.Lock()
|
|
|
|
defer n.startStopLock.Unlock()
|
|
|
|
|
|
|
|
n.lock.Lock()
|
|
|
|
switch n.state {
|
|
|
|
case runningState:
|
|
|
|
n.lock.Unlock()
|
|
|
|
return ErrNodeRunning
|
|
|
|
case closedState:
|
|
|
|
n.lock.Unlock()
|
|
|
|
return ErrNodeStopped
|
|
|
|
}
|
|
|
|
n.state = runningState
|
2021-01-04 14:39:25 +03:00
|
|
|
// open networking and RPC endpoints
|
|
|
|
err := n.openEndpoints()
|
2020-08-03 20:40:46 +03:00
|
|
|
lifecycles := make([]Lifecycle, len(n.lifecycles))
|
|
|
|
copy(lifecycles, n.lifecycles)
|
|
|
|
n.lock.Unlock()
|
|
|
|
|
2021-01-04 14:39:25 +03:00
|
|
|
// Check if endpoint startup failed.
|
2020-08-03 20:40:46 +03:00
|
|
|
if err != nil {
|
|
|
|
n.doClose(nil)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Start all registered lifecycles.
|
|
|
|
var started []Lifecycle
|
|
|
|
for _, lifecycle := range lifecycles {
|
|
|
|
if err = lifecycle.Start(); err != nil {
|
|
|
|
break
|
2020-05-20 06:46:45 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
started = append(started, lifecycle)
|
2020-05-20 06:46:45 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
// Check if any lifecycle failed to start.
|
|
|
|
if err != nil {
|
|
|
|
n.stopServices(started)
|
|
|
|
n.doClose(nil)
|
2017-12-01 14:49:04 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
|
|
|
|
2019-02-07 13:40:36 +03:00
|
|
|
// Close stops the Node and releases resources acquired in
|
|
|
|
// Node constructor New.
|
|
|
|
func (n *Node) Close() error {
|
2020-08-03 20:40:46 +03:00
|
|
|
n.startStopLock.Lock()
|
|
|
|
defer n.startStopLock.Unlock()
|
2019-02-07 13:40:36 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
n.lock.Lock()
|
|
|
|
state := n.state
|
|
|
|
n.lock.Unlock()
|
|
|
|
switch state {
|
|
|
|
case initializingState:
|
|
|
|
// The node was never started.
|
|
|
|
return n.doClose(nil)
|
|
|
|
case runningState:
|
|
|
|
// The node was started, release resources acquired by Start().
|
|
|
|
var errs []error
|
|
|
|
if err := n.stopServices(n.lifecycles); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
return n.doClose(errs)
|
|
|
|
case closedState:
|
|
|
|
return ErrNodeStopped
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("node is in unknown state %d", state))
|
2019-02-07 13:40:36 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// doClose releases resources acquired by New(), collecting errors.
|
|
|
|
func (n *Node) doClose(errs []error) error {
|
|
|
|
// Close databases. This needs the lock because it needs to
|
|
|
|
// synchronize with OpenDatabase*.
|
|
|
|
n.lock.Lock()
|
|
|
|
n.state = closedState
|
|
|
|
errs = append(errs, n.closeDatabases()...)
|
|
|
|
n.lock.Unlock()
|
|
|
|
|
2019-02-07 13:40:36 +03:00
|
|
|
if err := n.accman.Close(); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
2021-08-25 23:34:22 +03:00
|
|
|
if n.keyDirTemp {
|
|
|
|
if err := os.RemoveAll(n.keyDir); err != nil {
|
2020-08-03 20:40:46 +03:00
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release instance directory lock.
|
|
|
|
n.closeDataDir()
|
|
|
|
|
|
|
|
// Unblock n.Wait.
|
|
|
|
close(n.stop)
|
|
|
|
|
|
|
|
// Report any errors that might have occurred.
|
2019-02-07 13:40:36 +03:00
|
|
|
switch len(errs) {
|
|
|
|
case 0:
|
|
|
|
return nil
|
|
|
|
case 1:
|
|
|
|
return errs[0]
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("%v", errs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-04 14:39:25 +03:00
|
|
|
// openEndpoints starts all network and RPC endpoints.
|
|
|
|
func (n *Node) openEndpoints() error {
|
|
|
|
// start networking endpoints
|
2020-08-03 20:40:46 +03:00
|
|
|
n.log.Info("Starting peer-to-peer node", "instance", n.server.Name)
|
|
|
|
if err := n.server.Start(); err != nil {
|
|
|
|
return convertFileLockError(err)
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
2021-01-04 14:39:25 +03:00
|
|
|
// start RPC endpoints
|
2020-08-03 20:40:46 +03:00
|
|
|
err := n.startRPC()
|
|
|
|
if err != nil {
|
|
|
|
n.stopRPC()
|
|
|
|
n.server.Stop()
|
2016-08-18 14:28:17 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-08-18 14:28:17 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// containsLifecycle checks if 'lfs' contains 'l'.
|
|
|
|
func containsLifecycle(lfs []Lifecycle, l Lifecycle) bool {
|
|
|
|
for _, obj := range lfs {
|
|
|
|
if obj == l {
|
|
|
|
return true
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
return false
|
|
|
|
}
|
2015-11-17 18:33:25 +02:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// stopServices terminates running services, RPC and p2p networking.
|
|
|
|
// It is the inverse of Start.
|
|
|
|
func (n *Node) stopServices(running []Lifecycle) error {
|
|
|
|
n.stopRPC()
|
|
|
|
|
|
|
|
// Stop running lifecycles in reverse order.
|
|
|
|
failure := &StopError{Services: make(map[reflect.Type]error)}
|
|
|
|
for i := len(running) - 1; i >= 0; i-- {
|
|
|
|
if err := running[i].Stop(); err != nil {
|
|
|
|
failure.Services[reflect.TypeOf(running[i])] = err
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
|
|
|
|
// Stop p2p networking.
|
|
|
|
n.server.Stop()
|
|
|
|
|
|
|
|
if len(failure.Services) > 0 {
|
|
|
|
return failure
|
2016-02-02 19:06:43 +02:00
|
|
|
}
|
2015-11-06 00:57:57 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-18 14:28:17 +03:00
|
|
|
func (n *Node) openDataDir() error {
|
|
|
|
if n.config.DataDir == "" {
|
|
|
|
return nil // ephemeral
|
|
|
|
}
|
|
|
|
|
|
|
|
instdir := filepath.Join(n.config.DataDir, n.config.name())
|
|
|
|
if err := os.MkdirAll(instdir, 0700); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-18 13:14:00 +03:00
|
|
|
// Lock the instance directory to prevent concurrent use by another instance as well as
|
|
|
|
// accidental use of the instance directory as a database.
|
2023-02-23 10:11:50 +03:00
|
|
|
n.dirLock = flock.New(filepath.Join(instdir, "LOCK"))
|
|
|
|
|
|
|
|
if locked, err := n.dirLock.TryLock(); err != nil {
|
|
|
|
return err
|
|
|
|
} else if !locked {
|
|
|
|
return ErrDatadirUsed
|
2016-08-18 14:28:17 +03:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
func (n *Node) closeDataDir() {
|
|
|
|
// Release instance directory lock.
|
2023-02-23 10:11:50 +03:00
|
|
|
if n.dirLock != nil && n.dirLock.Locked() {
|
|
|
|
n.dirLock.Unlock()
|
2020-08-03 20:40:46 +03:00
|
|
|
n.dirLock = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-07 10:30:27 +03:00
|
|
|
// obtainJWTSecret loads the jwt-secret, either from the provided config,
|
|
|
|
// or from the default location. If neither of those are present, it generates
|
|
|
|
// a new secret and stores to the default location.
|
|
|
|
func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) {
|
2022-03-24 17:04:47 +03:00
|
|
|
fileName := cliParam
|
|
|
|
if len(fileName) == 0 {
|
2022-03-07 10:30:27 +03:00
|
|
|
// no path provided, use default
|
|
|
|
fileName = n.ResolvePath(datadirJWTKey)
|
|
|
|
}
|
|
|
|
// try reading from file
|
|
|
|
if data, err := os.ReadFile(fileName); err == nil {
|
|
|
|
jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
|
|
|
|
if len(jwtSecret) == 32 {
|
2022-06-15 15:35:53 +03:00
|
|
|
log.Info("Loaded JWT secret file", "path", fileName, "crc32", fmt.Sprintf("%#x", crc32.ChecksumIEEE(jwtSecret)))
|
2022-03-07 10:30:27 +03:00
|
|
|
return jwtSecret, nil
|
|
|
|
}
|
|
|
|
log.Error("Invalid JWT secret", "path", fileName, "length", len(jwtSecret))
|
|
|
|
return nil, errors.New("invalid JWT secret")
|
|
|
|
}
|
|
|
|
// Need to generate one
|
|
|
|
jwtSecret := make([]byte, 32)
|
|
|
|
crand.Read(jwtSecret)
|
|
|
|
// if we're in --dev mode, don't bother saving, just show it
|
|
|
|
if fileName == "" {
|
|
|
|
log.Info("Generated ephemeral JWT secret", "secret", hexutil.Encode(jwtSecret))
|
|
|
|
return jwtSecret, nil
|
|
|
|
}
|
|
|
|
if err := os.WriteFile(fileName, []byte(hexutil.Encode(jwtSecret)), 0600); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
log.Info("Generated JWT secret", "path", fileName)
|
|
|
|
return jwtSecret, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// startRPC is a helper method to configure all the various RPC endpoints during node
|
2016-02-05 13:45:36 +02:00
|
|
|
// startup. It's not meant to be called at any time afterwards as it makes certain
|
|
|
|
// assumptions about the state of the node.
|
2020-08-03 20:40:46 +03:00
|
|
|
func (n *Node) startRPC() error {
|
2023-02-02 14:52:19 +03:00
|
|
|
// Filter out personal api
|
|
|
|
var apis []rpc.API
|
|
|
|
for _, api := range n.rpcAPIs {
|
|
|
|
if api.Namespace == "personal" {
|
|
|
|
if n.config.EnablePersonal {
|
|
|
|
log.Warn("Deprecated personal namespace activated")
|
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
apis = append(apis, api)
|
|
|
|
}
|
|
|
|
if err := n.startInProc(apis); err != nil {
|
2016-02-05 13:45:36 +02:00
|
|
|
return err
|
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
|
|
|
|
// Configure IPC.
|
|
|
|
if n.ipc.endpoint != "" {
|
2023-02-02 14:52:19 +03:00
|
|
|
if err := n.ipc.start(apis); err != nil {
|
2020-04-08 14:33:12 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-02-05 15:08:48 +02:00
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
var (
|
2022-10-06 15:01:04 +03:00
|
|
|
servers []*httpServer
|
|
|
|
openAPIs, allAPIs = n.getAPIs()
|
2022-03-07 10:30:27 +03:00
|
|
|
)
|
2020-04-08 14:33:12 +03:00
|
|
|
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
rpcConfig := rpcEndpointConfig{
|
|
|
|
batchItemLimit: n.config.BatchRequestLimit,
|
|
|
|
batchResponseSizeLimit: n.config.BatchResponseMaxSize,
|
|
|
|
}
|
|
|
|
|
2022-10-06 15:01:04 +03:00
|
|
|
initHttp := func(server *httpServer, port int) error {
|
2022-03-07 10:30:27 +03:00
|
|
|
if err := server.setListenAddr(n.config.HTTPHost, port); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-10-06 15:01:04 +03:00
|
|
|
if err := server.enableRPC(openAPIs, httpConfig{
|
2020-08-03 20:40:46 +03:00
|
|
|
CorsAllowedOrigins: n.config.HTTPCors,
|
|
|
|
Vhosts: n.config.HTTPVirtualHosts,
|
|
|
|
Modules: n.config.HTTPModules,
|
2021-02-02 12:05:46 +03:00
|
|
|
prefix: n.config.HTTPPathPrefix,
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
rpcEndpointConfig: rpcConfig,
|
2022-03-07 10:30:27 +03:00
|
|
|
}); err != nil {
|
|
|
|
return err
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
servers = append(servers, server)
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-09 16:45:39 +03:00
|
|
|
|
2022-10-06 15:01:04 +03:00
|
|
|
initWS := func(port int) error {
|
2022-03-07 10:30:27 +03:00
|
|
|
server := n.wsServerForPort(port, false)
|
|
|
|
if err := server.setListenAddr(n.config.WSHost, port); err != nil {
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
|
|
|
}
|
2022-10-06 15:01:04 +03:00
|
|
|
if err := server.enableWS(openAPIs, wsConfig{
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
Modules: n.config.WSModules,
|
|
|
|
Origins: n.config.WSOrigins,
|
|
|
|
prefix: n.config.WSPathPrefix,
|
|
|
|
rpcEndpointConfig: rpcConfig,
|
2022-03-07 10:30:27 +03:00
|
|
|
}); err != nil {
|
2016-02-09 14:10:40 +02:00
|
|
|
return err
|
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
servers = append(servers, server)
|
|
|
|
return nil
|
2016-02-09 14:10:40 +02:00
|
|
|
}
|
|
|
|
|
2022-10-06 15:01:04 +03:00
|
|
|
initAuth := func(port int, secret []byte) error {
|
2022-03-07 10:30:27 +03:00
|
|
|
// Enable auth via HTTP
|
|
|
|
server := n.httpAuth
|
2022-03-17 18:20:03 +03:00
|
|
|
if err := server.setListenAddr(n.config.AuthAddr, port); err != nil {
|
2022-03-07 10:30:27 +03:00
|
|
|
return err
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
2023-08-16 16:26:49 +03:00
|
|
|
sharedConfig := rpcEndpointConfig{
|
|
|
|
jwtSecret: secret,
|
|
|
|
batchItemLimit: engineAPIBatchItemLimit,
|
|
|
|
batchResponseSizeLimit: engineAPIBatchResponseSizeLimit,
|
2024-02-07 23:06:38 +03:00
|
|
|
httpBodyLimit: engineAPIBodyLimit,
|
2023-08-16 16:26:49 +03:00
|
|
|
}
|
2024-02-07 23:06:38 +03:00
|
|
|
err := server.enableRPC(allAPIs, httpConfig{
|
2022-03-07 10:30:27 +03:00
|
|
|
CorsAllowedOrigins: DefaultAuthCors,
|
2022-03-17 18:20:03 +03:00
|
|
|
Vhosts: n.config.AuthVirtualHosts,
|
2022-03-07 10:30:27 +03:00
|
|
|
Modules: DefaultAuthModules,
|
|
|
|
prefix: DefaultAuthPrefix,
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
rpcEndpointConfig: sharedConfig,
|
2024-02-07 23:06:38 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2022-03-07 10:30:27 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
servers = append(servers, server)
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
|
2022-03-07 10:30:27 +03:00
|
|
|
// Enable auth via WS
|
|
|
|
server = n.wsServerForPort(port, true)
|
2022-03-17 18:20:03 +03:00
|
|
|
if err := server.setListenAddr(n.config.AuthAddr, port); err != nil {
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
|
|
|
}
|
2022-10-06 15:01:04 +03:00
|
|
|
if err := server.enableWS(allAPIs, wsConfig{
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
Modules: DefaultAuthModules,
|
|
|
|
Origins: DefaultAuthOrigins,
|
|
|
|
prefix: DefaultAuthPrefix,
|
|
|
|
rpcEndpointConfig: sharedConfig,
|
2022-03-07 10:30:27 +03:00
|
|
|
}); err != nil {
|
2016-02-09 14:10:40 +02:00
|
|
|
return err
|
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
servers = append(servers, server)
|
|
|
|
return nil
|
2016-02-09 14:10:40 +02:00
|
|
|
}
|
|
|
|
|
2022-03-07 10:30:27 +03:00
|
|
|
// Set up HTTP.
|
|
|
|
if n.config.HTTPHost != "" {
|
|
|
|
// Configure legacy unauthenticated HTTP.
|
2022-10-06 15:01:04 +03:00
|
|
|
if err := initHttp(n.http, n.config.HTTPPort); err != nil {
|
2022-03-07 10:30:27 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
// Configure WebSocket.
|
|
|
|
if n.config.WSHost != "" {
|
2022-03-07 10:30:27 +03:00
|
|
|
// legacy unauthenticated
|
2022-10-06 15:01:04 +03:00
|
|
|
if err := initWS(n.config.WSPort); err != nil {
|
2022-03-07 10:30:27 +03:00
|
|
|
return err
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
}
|
|
|
|
// Configure authenticated API
|
2022-10-06 15:01:04 +03:00
|
|
|
if len(openAPIs) != len(allAPIs) {
|
2022-03-07 10:30:27 +03:00
|
|
|
jwtSecret, err := n.obtainJWTSecret(n.config.JWTSecret)
|
|
|
|
if err != nil {
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
|
|
|
}
|
2022-10-06 15:01:04 +03:00
|
|
|
if err := initAuth(n.config.AuthPort, jwtSecret); err != nil {
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-02-09 14:10:40 +02:00
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
// Start the servers
|
|
|
|
for _, server := range servers {
|
|
|
|
if err := server.start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-05 13:45:36 +02:00
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
return nil
|
2016-02-05 13:45:36 +02:00
|
|
|
}
|
|
|
|
|
2022-03-07 10:30:27 +03:00
|
|
|
func (n *Node) wsServerForPort(port int, authenticated bool) *httpServer {
|
|
|
|
httpServer, wsServer := n.http, n.ws
|
|
|
|
if authenticated {
|
|
|
|
httpServer, wsServer = n.httpAuth, n.wsAuth
|
2016-02-02 19:06:43 +02:00
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
if n.config.HTTPHost == "" || httpServer.port == port {
|
|
|
|
return httpServer
|
2016-02-02 19:06:43 +02:00
|
|
|
}
|
2022-03-07 10:30:27 +03:00
|
|
|
return wsServer
|
2016-02-05 13:45:36 +02:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
func (n *Node) stopRPC() {
|
|
|
|
n.http.stop()
|
|
|
|
n.ws.stop()
|
2022-03-07 10:30:27 +03:00
|
|
|
n.httpAuth.stop()
|
|
|
|
n.wsAuth.stop()
|
2020-08-03 20:40:46 +03:00
|
|
|
n.ipc.stop()
|
|
|
|
n.stopInProc()
|
2016-02-02 19:06:43 +02:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// startInProc registers all RPC APIs on the inproc server.
|
2023-02-02 14:52:19 +03:00
|
|
|
func (n *Node) startInProc(apis []rpc.API) error {
|
|
|
|
for _, api := range apis {
|
2020-08-03 20:40:46 +03:00
|
|
|
if err := n.inprocHandler.RegisterName(api.Namespace, api.Service); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-05 13:45:36 +02:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
return nil
|
2016-02-05 13:45:36 +02:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// stopInProc terminates the in-process RPC endpoint.
|
|
|
|
func (n *Node) stopInProc() {
|
|
|
|
n.inprocHandler.Stop()
|
2016-02-05 15:08:48 +02:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// Wait blocks until the node is closed.
|
|
|
|
func (n *Node) Wait() {
|
|
|
|
<-n.stop
|
2016-02-05 15:08:48 +02:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// RegisterLifecycle registers the given Lifecycle on the node.
|
|
|
|
func (n *Node) RegisterLifecycle(lifecycle Lifecycle) {
|
2015-11-06 00:57:57 +03:00
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
2016-02-05 15:08:48 +02:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
if n.state != initializingState {
|
|
|
|
panic("can't register lifecycle on running/stopped node")
|
2016-02-05 15:08:48 +02:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
if containsLifecycle(n.lifecycles, lifecycle) {
|
|
|
|
panic(fmt.Sprintf("attempt to register lifecycle %T more than once", lifecycle))
|
2016-02-05 15:08:48 +02:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
n.lifecycles = append(n.lifecycles, lifecycle)
|
2016-02-05 15:08:48 +02:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// RegisterProtocols adds backend's protocols to the node's p2p server.
|
|
|
|
func (n *Node) RegisterProtocols(protocols []p2p.Protocol) {
|
2015-11-06 00:57:57 +03:00
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
if n.state != initializingState {
|
|
|
|
panic("can't register protocols on running/stopped node")
|
2016-08-18 14:28:17 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
n.server.Protocols = append(n.server.Protocols, protocols...)
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
2016-08-18 14:28:17 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// RegisterAPIs registers the APIs a service provides on the node.
|
|
|
|
func (n *Node) RegisterAPIs(apis []rpc.API) {
|
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
2016-08-15 19:38:32 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
if n.state != initializingState {
|
|
|
|
panic("can't register APIs on running/stopped node")
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
n.rpcAPIs = append(n.rpcAPIs, apis...)
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
|
|
|
|
2022-10-06 15:01:04 +03:00
|
|
|
// getAPIs return two sets of APIs, both the ones that do not require
|
2022-03-07 10:30:27 +03:00
|
|
|
// authentication, and the complete set
|
2022-10-06 15:01:04 +03:00
|
|
|
func (n *Node) getAPIs() (unauthenticated, all []rpc.API) {
|
2022-03-07 10:30:27 +03:00
|
|
|
for _, api := range n.rpcAPIs {
|
|
|
|
if !api.Authenticated {
|
|
|
|
unauthenticated = append(unauthenticated, api)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return unauthenticated, n.rpcAPIs
|
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// RegisterHandler mounts a handler on the given path on the canonical HTTP server.
|
|
|
|
//
|
|
|
|
// The name of the handler is shown in a log message when the HTTP server starts
|
|
|
|
// and should be a descriptive term for the service provided by the handler.
|
|
|
|
func (n *Node) RegisterHandler(name, path string, handler http.Handler) {
|
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
2015-11-17 18:33:25 +02:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
if n.state != initializingState {
|
|
|
|
panic("can't register HTTP handler on running/stopped node")
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
2021-02-02 12:05:46 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
n.http.mux.Handle(path, handler)
|
|
|
|
n.http.handlerNames[path] = name
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
|
|
|
|
2016-02-09 14:10:40 +02:00
|
|
|
// Attach creates an RPC client attached to an in-process API handler.
|
2023-06-14 15:24:47 +03:00
|
|
|
func (n *Node) Attach() *rpc.Client {
|
|
|
|
return rpc.DialInProc(n.inprocHandler)
|
2016-02-09 14:10:40 +02:00
|
|
|
}
|
|
|
|
|
2017-09-25 11:08:07 +03:00
|
|
|
// RPCHandler returns the in-process RPC request handler.
|
|
|
|
func (n *Node) RPCHandler() (*rpc.Server, error) {
|
2020-08-03 20:40:46 +03:00
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
2017-09-25 11:08:07 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
if n.state == closedState {
|
2017-09-25 11:08:07 +03:00
|
|
|
return nil, ErrNodeStopped
|
|
|
|
}
|
|
|
|
return n.inprocHandler, nil
|
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// Config returns the configuration of node.
|
|
|
|
func (n *Node) Config() *Config {
|
|
|
|
return n.config
|
|
|
|
}
|
|
|
|
|
2015-11-06 00:57:57 +03:00
|
|
|
// Server retrieves the currently running P2P network layer. This method is meant
|
2020-08-03 20:40:46 +03:00
|
|
|
// only to inspect fields of the currently running server. Callers should not
|
|
|
|
// start or stop the returned server.
|
2015-11-06 00:57:57 +03:00
|
|
|
func (n *Node) Server() *p2p.Server {
|
2020-08-03 20:40:46 +03:00
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
2015-11-06 00:57:57 +03:00
|
|
|
|
2015-11-17 18:33:25 +02:00
|
|
|
return n.server
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// DataDir retrieves the current datadir used by the protocol stack.
|
2016-11-10 19:03:24 +03:00
|
|
|
// Deprecated: No files should be stored in this directory, use InstanceDir instead.
|
2015-11-06 00:57:57 +03:00
|
|
|
func (n *Node) DataDir() string {
|
2016-08-18 14:28:17 +03:00
|
|
|
return n.config.DataDir
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
|
|
|
|
2016-11-10 19:03:24 +03:00
|
|
|
// InstanceDir retrieves the instance directory used by the protocol stack.
|
|
|
|
func (n *Node) InstanceDir() string {
|
|
|
|
return n.config.instanceDir()
|
|
|
|
}
|
|
|
|
|
2021-08-25 23:34:22 +03:00
|
|
|
// KeyStoreDir retrieves the key directory
|
|
|
|
func (n *Node) KeyStoreDir() string {
|
|
|
|
return n.keyDir
|
|
|
|
}
|
|
|
|
|
2016-08-15 19:38:32 +03:00
|
|
|
// AccountManager retrieves the account manager used by the protocol stack.
|
|
|
|
func (n *Node) AccountManager() *accounts.Manager {
|
|
|
|
return n.accman
|
|
|
|
}
|
|
|
|
|
2016-02-09 13:24:42 +02:00
|
|
|
// IPCEndpoint retrieves the current IPC endpoint used by the protocol stack.
|
|
|
|
func (n *Node) IPCEndpoint() string {
|
2020-08-03 20:40:46 +03:00
|
|
|
return n.ipc.endpoint
|
2016-02-02 19:06:43 +02:00
|
|
|
}
|
|
|
|
|
2021-02-02 12:05:46 +03:00
|
|
|
// HTTPEndpoint returns the URL of the HTTP server. Note that this URL does not
|
|
|
|
// contain the JSON-RPC path prefix set by HTTPPathPrefix.
|
2016-02-09 14:10:40 +02:00
|
|
|
func (n *Node) HTTPEndpoint() string {
|
2020-08-03 20:40:46 +03:00
|
|
|
return "http://" + n.http.listenAddr()
|
2016-02-09 14:10:40 +02:00
|
|
|
}
|
|
|
|
|
2021-02-02 12:05:46 +03:00
|
|
|
// WSEndpoint returns the current JSON-RPC over WebSocket endpoint.
|
2016-02-09 14:10:40 +02:00
|
|
|
func (n *Node) WSEndpoint() string {
|
2020-08-03 20:40:46 +03:00
|
|
|
if n.http.wsAllowed() {
|
2021-02-02 12:05:46 +03:00
|
|
|
return "ws://" + n.http.listenAddr() + n.http.wsConfig.prefix
|
2018-10-11 21:32:14 +03:00
|
|
|
}
|
2021-02-02 12:05:46 +03:00
|
|
|
return "ws://" + n.ws.listenAddr() + n.ws.wsConfig.prefix
|
2016-02-09 14:10:40 +02:00
|
|
|
}
|
|
|
|
|
2022-09-02 18:40:41 +03:00
|
|
|
// HTTPAuthEndpoint returns the URL of the authenticated HTTP server.
|
|
|
|
func (n *Node) HTTPAuthEndpoint() string {
|
|
|
|
return "http://" + n.httpAuth.listenAddr()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WSAuthEndpoint returns the current authenticated JSON-RPC over WebSocket endpoint.
|
|
|
|
func (n *Node) WSAuthEndpoint() string {
|
|
|
|
if n.httpAuth.wsAllowed() {
|
|
|
|
return "ws://" + n.httpAuth.listenAddr() + n.httpAuth.wsConfig.prefix
|
|
|
|
}
|
|
|
|
return "ws://" + n.wsAuth.listenAddr() + n.wsAuth.wsConfig.prefix
|
|
|
|
}
|
|
|
|
|
2015-11-06 00:57:57 +03:00
|
|
|
// EventMux retrieves the event multiplexer used by all the network services in
|
|
|
|
// the current protocol stack.
|
|
|
|
func (n *Node) EventMux() *event.TypeMux {
|
2015-11-17 18:33:25 +02:00
|
|
|
return n.eventmux
|
2015-11-06 00:57:57 +03:00
|
|
|
}
|
2015-10-15 17:07:19 +03:00
|
|
|
|
2016-08-18 14:28:17 +03:00
|
|
|
// OpenDatabase opens an existing database with the given name (or creates one if no
|
|
|
|
// previous can be found) from within the node's instance directory. If the node is
|
|
|
|
// ephemeral, a memory database is returned.
|
2021-03-22 21:06:30 +03:00
|
|
|
func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) {
|
2020-08-03 20:40:46 +03:00
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
|
|
|
if n.state == closedState {
|
|
|
|
return nil, ErrNodeStopped
|
|
|
|
}
|
|
|
|
|
|
|
|
var db ethdb.Database
|
|
|
|
var err error
|
2016-08-18 14:28:17 +03:00
|
|
|
if n.config.DataDir == "" {
|
2020-08-03 20:40:46 +03:00
|
|
|
db = rawdb.NewMemoryDatabase()
|
|
|
|
} else {
|
2023-02-09 11:48:34 +03:00
|
|
|
db, err = rawdb.Open(rawdb.OpenOptions{
|
|
|
|
Type: n.config.DBEngine,
|
|
|
|
Directory: n.ResolvePath(name),
|
|
|
|
Namespace: namespace,
|
|
|
|
Cache: cache,
|
|
|
|
Handles: handles,
|
|
|
|
ReadOnly: readonly,
|
|
|
|
})
|
2016-08-18 14:28:17 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
db = n.wrapDatabase(db)
|
2016-08-18 14:28:17 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
return db, err
|
2016-08-18 14:28:17 +03:00
|
|
|
}
|
|
|
|
|
2022-07-05 06:14:21 +03:00
|
|
|
func (n *Node) OpenAndMergeDatabase(name string, cache, handles int, freezer, diff, namespace string, readonly, persistDiff, pruneAncientData bool) (ethdb.Database, error) {
|
|
|
|
chainDataHandles := handles
|
|
|
|
if persistDiff {
|
|
|
|
chainDataHandles = handles * chainDataHandlesPercentage / 100
|
|
|
|
}
|
2024-03-08 11:07:29 +03:00
|
|
|
var statediskdb ethdb.Database
|
|
|
|
var err error
|
|
|
|
// Open the separated state database if the state directory exists
|
|
|
|
if n.IsSeparatedDB() {
|
|
|
|
// Allocate half of the handles and cache to this separate state data database
|
|
|
|
statediskdb, err = n.OpenDatabaseWithFreezer(name+"/state", cache/2, chainDataHandles/2, "", "eth/db/statedata/", readonly, false, false, pruneAncientData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reduce the handles and cache to this separate database because it is not a complete database with no trie data storing in it.
|
|
|
|
cache = int(float64(cache) * 0.6)
|
|
|
|
chainDataHandles = int(float64(chainDataHandles) * 0.6)
|
|
|
|
}
|
|
|
|
|
2023-09-07 11:39:29 +03:00
|
|
|
chainDB, err := n.OpenDatabaseWithFreezer(name, cache, chainDataHandles, freezer, namespace, readonly, false, false, pruneAncientData)
|
2022-07-05 06:14:21 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-08 11:07:29 +03:00
|
|
|
|
|
|
|
if statediskdb != nil {
|
|
|
|
chainDB.SetStateStore(statediskdb)
|
|
|
|
}
|
|
|
|
|
2022-07-05 06:14:21 +03:00
|
|
|
if persistDiff {
|
|
|
|
diffStore, err := n.OpenDiffDatabase(name, handles-chainDataHandles, diff, namespace, readonly)
|
|
|
|
if err != nil {
|
|
|
|
chainDB.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
chainDB.SetDiffStore(diffStore)
|
|
|
|
}
|
|
|
|
return chainDB, nil
|
|
|
|
}
|
|
|
|
|
2019-03-08 16:56:20 +03:00
|
|
|
// OpenDatabaseWithFreezer opens an existing database with the given name (or
|
|
|
|
// creates one if no previous can be found) from within the node's data directory,
|
|
|
|
// also attaching a chain freezer to it that moves ancient chain data from the
|
|
|
|
// database to immutable append-only files. If the node is an ephemeral one, a
|
|
|
|
// memory database is returned.
|
2023-09-07 11:39:29 +03:00
|
|
|
func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) {
|
2020-08-03 20:40:46 +03:00
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
|
|
|
if n.state == closedState {
|
|
|
|
return nil, ErrNodeStopped
|
|
|
|
}
|
|
|
|
var db ethdb.Database
|
|
|
|
var err error
|
2019-03-08 16:56:20 +03:00
|
|
|
if n.config.DataDir == "" {
|
2020-08-03 20:40:46 +03:00
|
|
|
db = rawdb.NewMemoryDatabase()
|
|
|
|
} else {
|
2023-02-09 11:48:34 +03:00
|
|
|
db, err = rawdb.Open(rawdb.OpenOptions{
|
|
|
|
Type: n.config.DBEngine,
|
|
|
|
Directory: n.ResolvePath(name),
|
|
|
|
AncientsDirectory: n.ResolveAncient(name, ancient),
|
|
|
|
Namespace: namespace,
|
|
|
|
Cache: cache,
|
|
|
|
Handles: handles,
|
|
|
|
ReadOnly: readonly,
|
2023-08-23 12:46:08 +03:00
|
|
|
DisableFreeze: disableFreeze,
|
|
|
|
IsLastOffset: isLastOffset,
|
|
|
|
PruneAncientData: pruneAncientData,
|
2023-02-09 11:48:34 +03:00
|
|
|
})
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
if err == nil {
|
|
|
|
db = n.wrapDatabase(db)
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
return db, err
|
2019-03-08 16:56:20 +03:00
|
|
|
}
|
|
|
|
|
2024-03-08 11:07:29 +03:00
|
|
|
// IsSeparatedDB check the state subdirectory of db, if subdirectory exists, return true
|
|
|
|
func (n *Node) IsSeparatedDB() bool {
|
|
|
|
separateDir := filepath.Join(n.ResolvePath("chaindata"), "state")
|
|
|
|
fileInfo, err := os.Stat(separateDir)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return fileInfo.IsDir()
|
|
|
|
}
|
|
|
|
|
2022-07-05 06:14:21 +03:00
|
|
|
func (n *Node) OpenDiffDatabase(name string, handles int, diff, namespace string, readonly bool) (*leveldb.Database, error) {
|
|
|
|
n.lock.Lock()
|
|
|
|
defer n.lock.Unlock()
|
|
|
|
if n.state == closedState {
|
|
|
|
return nil, ErrNodeStopped
|
|
|
|
}
|
|
|
|
|
|
|
|
var db *leveldb.Database
|
|
|
|
var err error
|
|
|
|
if n.config.DataDir == "" {
|
|
|
|
panic("datadir is missing")
|
|
|
|
}
|
|
|
|
root := n.ResolvePath(name)
|
|
|
|
switch {
|
|
|
|
case diff == "":
|
|
|
|
diff = filepath.Join(root, "diff")
|
|
|
|
case !filepath.IsAbs(diff):
|
|
|
|
diff = n.ResolvePath(diff)
|
|
|
|
}
|
|
|
|
db, err = leveldb.New(diff, 0, handles, namespace, readonly)
|
|
|
|
|
|
|
|
return db, err
|
|
|
|
}
|
|
|
|
|
2016-08-18 14:28:17 +03:00
|
|
|
// ResolvePath returns the absolute path of a resource in the instance directory.
|
|
|
|
func (n *Node) ResolvePath(x string) string {
|
cmd, dashboard, log: log collection and exploration (#17097)
* cmd, dashboard, internal, log, node: logging feature
* cmd, dashboard, internal, log: requested changes
* dashboard, vendor: gofmt, govendor, use vendored file watcher
* dashboard, log: gofmt -s -w, goimports
* dashboard, log: gosimple
2018-07-11 10:59:04 +03:00
|
|
|
return n.config.ResolvePath(x)
|
2016-08-18 14:28:17 +03:00
|
|
|
}
|
|
|
|
|
2022-08-08 12:08:36 +03:00
|
|
|
// ResolveAncient returns the absolute path of the root ancient directory.
|
|
|
|
func (n *Node) ResolveAncient(name string, ancient string) string {
|
|
|
|
switch {
|
|
|
|
case ancient == "":
|
|
|
|
ancient = filepath.Join(n.ResolvePath(name), "ancient")
|
|
|
|
case !filepath.IsAbs(ancient):
|
|
|
|
ancient = n.ResolvePath(ancient)
|
|
|
|
}
|
|
|
|
return ancient
|
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// closeTrackingDB wraps the Close method of a database. When the database is closed by the
|
|
|
|
// service, the wrapper removes it from the node's database map. This ensures that Node
|
|
|
|
// won't auto-close the database if it is closed by the service that opened it.
|
|
|
|
type closeTrackingDB struct {
|
|
|
|
ethdb.Database
|
|
|
|
n *Node
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *closeTrackingDB) Close() error {
|
|
|
|
db.n.lock.Lock()
|
|
|
|
delete(db.n.databases, db)
|
|
|
|
db.n.lock.Unlock()
|
|
|
|
return db.Database.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// wrapDatabase ensures the database will be auto-closed when Node is closed.
|
|
|
|
func (n *Node) wrapDatabase(db ethdb.Database) ethdb.Database {
|
|
|
|
wrapper := &closeTrackingDB{db, n}
|
|
|
|
n.databases[wrapper] = struct{}{}
|
|
|
|
return wrapper
|
|
|
|
}
|
|
|
|
|
|
|
|
// closeDatabases closes all open databases.
|
|
|
|
func (n *Node) closeDatabases() (errors []error) {
|
|
|
|
for db := range n.databases {
|
|
|
|
delete(n.databases, db)
|
|
|
|
if err := db.Database.Close(); err != nil {
|
|
|
|
errors = append(errors, err)
|
2020-04-08 14:33:12 +03:00
|
|
|
}
|
|
|
|
}
|
2020-08-03 20:40:46 +03:00
|
|
|
return errors
|
2020-04-08 14:33:12 +03:00
|
|
|
}
|