2020-04-08 17:58:27 +03:00
|
|
|
// Copyright 2020 The go-ethereum Authors
|
2020-04-08 14:33:12 +03:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package node
|
|
|
|
|
|
|
|
import (
|
|
|
|
"compress/gzip"
|
2020-08-03 20:40:46 +03:00
|
|
|
"context"
|
2024-02-29 12:56:46 +03:00
|
|
|
"errors"
|
2020-08-03 20:40:46 +03:00
|
|
|
"fmt"
|
2020-04-08 14:33:12 +03:00
|
|
|
"io"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
2020-08-03 20:40:46 +03:00
|
|
|
"sort"
|
2022-12-07 16:02:14 +03:00
|
|
|
"strconv"
|
2020-04-08 14:33:12 +03:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2020-08-03 20:40:46 +03:00
|
|
|
"sync/atomic"
|
2022-07-08 22:25:12 +03:00
|
|
|
"time"
|
2020-04-08 14:33:12 +03:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2020-08-03 20:40:46 +03:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2020-04-08 14:33:12 +03:00
|
|
|
"github.com/rs/cors"
|
|
|
|
)
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
// httpConfig is the JSON-RPC/HTTP configuration.
|
|
|
|
type httpConfig struct {
|
|
|
|
Modules []string
|
|
|
|
CorsAllowedOrigins []string
|
|
|
|
Vhosts []string
|
2021-02-02 12:05:46 +03:00
|
|
|
prefix string // path prefix on which to mount http handler
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
rpcEndpointConfig
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// wsConfig is the JSON-RPC/Websocket configuration
|
|
|
|
type wsConfig struct {
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
Origins []string
|
|
|
|
Modules []string
|
|
|
|
prefix string // path prefix on which to mount ws handler
|
|
|
|
rpcEndpointConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
type rpcEndpointConfig struct {
|
|
|
|
jwtSecret []byte // optional JWT secret
|
|
|
|
batchItemLimit int
|
|
|
|
batchResponseSizeLimit int
|
2024-02-07 23:06:38 +03:00
|
|
|
httpBodyLimit int
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
type rpcHandler struct {
|
|
|
|
http.Handler
|
|
|
|
server *rpc.Server
|
|
|
|
}
|
|
|
|
|
|
|
|
type httpServer struct {
|
|
|
|
log log.Logger
|
|
|
|
timeouts rpc.HTTPTimeouts
|
|
|
|
mux http.ServeMux // registered handlers go here
|
|
|
|
|
|
|
|
mu sync.Mutex
|
|
|
|
server *http.Server
|
|
|
|
listener net.Listener // non-nil when server is running
|
|
|
|
|
|
|
|
// HTTP RPC handler things.
|
2021-02-02 12:05:46 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
httpConfig httpConfig
|
|
|
|
httpHandler atomic.Value // *rpcHandler
|
|
|
|
|
|
|
|
// WebSocket handler things.
|
|
|
|
wsConfig wsConfig
|
|
|
|
wsHandler atomic.Value // *rpcHandler
|
|
|
|
|
|
|
|
// These are set by setListenAddr.
|
|
|
|
endpoint string
|
|
|
|
host string
|
|
|
|
port int
|
|
|
|
|
|
|
|
handlerNames map[string]string
|
|
|
|
}
|
|
|
|
|
2022-07-08 22:25:12 +03:00
|
|
|
const (
|
|
|
|
shutdownTimeout = 5 * time.Second
|
|
|
|
)
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
func newHTTPServer(log log.Logger, timeouts rpc.HTTPTimeouts) *httpServer {
|
|
|
|
h := &httpServer{log: log, timeouts: timeouts, handlerNames: make(map[string]string)}
|
2021-02-02 12:05:46 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
h.httpHandler.Store((*rpcHandler)(nil))
|
|
|
|
h.wsHandler.Store((*rpcHandler)(nil))
|
|
|
|
return h
|
|
|
|
}
|
|
|
|
|
|
|
|
// setListenAddr configures the listening address of the server.
|
|
|
|
// The address can only be set while the server isn't running.
|
|
|
|
func (h *httpServer) setListenAddr(host string, port int) error {
|
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
|
|
|
|
if h.listener != nil && (host != h.host || port != h.port) {
|
|
|
|
return fmt.Errorf("HTTP server already running on %s", h.endpoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.host, h.port = host, port
|
2023-07-02 14:21:16 +03:00
|
|
|
h.endpoint = net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
2020-08-03 20:40:46 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// listenAddr returns the listening address of the server.
|
|
|
|
func (h *httpServer) listenAddr() string {
|
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
|
|
|
|
if h.listener != nil {
|
|
|
|
return h.listener.Addr().String()
|
|
|
|
}
|
|
|
|
return h.endpoint
|
|
|
|
}
|
|
|
|
|
|
|
|
// start starts the HTTP server if it is enabled and not already running.
|
|
|
|
func (h *httpServer) start() error {
|
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
|
|
|
|
if h.endpoint == "" || h.listener != nil {
|
|
|
|
return nil // already running or not configured
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the server.
|
|
|
|
h.server = &http.Server{Handler: h}
|
|
|
|
if h.timeouts != (rpc.HTTPTimeouts{}) {
|
|
|
|
CheckTimeouts(&h.timeouts)
|
|
|
|
h.server.ReadTimeout = h.timeouts.ReadTimeout
|
2022-08-03 17:50:12 +03:00
|
|
|
h.server.ReadHeaderTimeout = h.timeouts.ReadHeaderTimeout
|
2020-08-03 20:40:46 +03:00
|
|
|
h.server.WriteTimeout = h.timeouts.WriteTimeout
|
|
|
|
h.server.IdleTimeout = h.timeouts.IdleTimeout
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the server.
|
|
|
|
listener, err := net.Listen("tcp", h.endpoint)
|
|
|
|
if err != nil {
|
|
|
|
// If the server fails to start, we need to clear out the RPC and WS
|
|
|
|
// configuration so they can be configured another time.
|
|
|
|
h.disableRPC()
|
|
|
|
h.disableWS()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
h.listener = listener
|
|
|
|
go h.server.Serve(listener)
|
|
|
|
|
2021-02-18 12:40:19 +03:00
|
|
|
if h.wsAllowed() {
|
2021-02-02 12:05:46 +03:00
|
|
|
url := fmt.Sprintf("ws://%v", listener.Addr())
|
|
|
|
if h.wsConfig.prefix != "" {
|
|
|
|
url += h.wsConfig.prefix
|
|
|
|
}
|
|
|
|
h.log.Info("WebSocket enabled", "url", url)
|
2021-02-18 12:40:19 +03:00
|
|
|
}
|
|
|
|
// if server is websocket only, return after logging
|
|
|
|
if !h.rpcAllowed() {
|
2020-08-03 20:40:46 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Log http endpoint.
|
|
|
|
h.log.Info("HTTP server started",
|
2022-03-07 10:30:27 +03:00
|
|
|
"endpoint", listener.Addr(), "auth", (h.httpConfig.jwtSecret != nil),
|
2021-02-02 12:05:46 +03:00
|
|
|
"prefix", h.httpConfig.prefix,
|
2020-08-03 20:40:46 +03:00
|
|
|
"cors", strings.Join(h.httpConfig.CorsAllowedOrigins, ","),
|
|
|
|
"vhosts", strings.Join(h.httpConfig.Vhosts, ","),
|
|
|
|
)
|
|
|
|
|
|
|
|
// Log all handlers mounted on server.
|
|
|
|
var paths []string
|
|
|
|
for path := range h.handlerNames {
|
|
|
|
paths = append(paths, path)
|
|
|
|
}
|
|
|
|
sort.Strings(paths)
|
|
|
|
logged := make(map[string]bool, len(paths))
|
|
|
|
for _, path := range paths {
|
|
|
|
name := h.handlerNames[path]
|
|
|
|
if !logged[name] {
|
|
|
|
log.Info(name+" enabled", "url", "http://"+listener.Addr().String()+path)
|
|
|
|
logged[name] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *httpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
2021-02-02 12:05:46 +03:00
|
|
|
// check if ws request and serve if ws enabled
|
|
|
|
ws := h.wsHandler.Load().(*rpcHandler)
|
|
|
|
if ws != nil && isWebsocket(r) {
|
|
|
|
if checkPath(r, h.wsConfig.prefix) {
|
2020-08-03 20:40:46 +03:00
|
|
|
ws.ServeHTTP(w, r)
|
2021-02-02 12:05:46 +03:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2022-12-07 16:02:14 +03:00
|
|
|
|
2021-02-02 12:05:46 +03:00
|
|
|
// if http-rpc is enabled, try to serve request
|
|
|
|
rpc := h.httpHandler.Load().(*rpcHandler)
|
|
|
|
if rpc != nil {
|
|
|
|
// First try to route in the mux.
|
|
|
|
// Requests to a path below root are handled by the mux,
|
|
|
|
// which has all the handlers registered via Node.RegisterHandler.
|
|
|
|
// These are made available when RPC is enabled.
|
|
|
|
muxHandler, pattern := h.mux.Handler(r)
|
|
|
|
if pattern != "" {
|
|
|
|
muxHandler.ServeHTTP(w, r)
|
2020-08-03 20:40:46 +03:00
|
|
|
return
|
|
|
|
}
|
2021-02-02 12:05:46 +03:00
|
|
|
|
|
|
|
if checkPath(r, h.httpConfig.prefix) {
|
2020-08-03 20:40:46 +03:00
|
|
|
rpc.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-02-02 12:05:46 +03:00
|
|
|
w.WriteHeader(http.StatusNotFound)
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkPath checks whether a given request URL matches a given path prefix.
|
|
|
|
func checkPath(r *http.Request, path string) bool {
|
|
|
|
// if no prefix has been specified, request URL must be on root
|
|
|
|
if path == "" {
|
|
|
|
return r.URL.Path == "/"
|
|
|
|
}
|
|
|
|
// otherwise, check to make sure prefix matches
|
|
|
|
return len(r.URL.Path) >= len(path) && r.URL.Path[:len(path)] == path
|
|
|
|
}
|
|
|
|
|
|
|
|
// validatePrefix checks if 'path' is a valid configuration value for the RPC prefix option.
|
|
|
|
func validatePrefix(what, path string) error {
|
|
|
|
if path == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if path[0] != '/' {
|
|
|
|
return fmt.Errorf(`%s RPC path prefix %q does not contain leading "/"`, what, path)
|
|
|
|
}
|
|
|
|
if strings.ContainsAny(path, "?#") {
|
|
|
|
// This is just to avoid confusion. While these would match correctly (i.e. they'd
|
|
|
|
// match if URL-escaped into path), it's not easy to understand for users when
|
|
|
|
// setting that on the command line.
|
|
|
|
return fmt.Errorf("%s RPC path prefix %q contains URL meta-characters", what, path)
|
|
|
|
}
|
|
|
|
return nil
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// stop shuts down the HTTP server.
|
|
|
|
func (h *httpServer) stop() {
|
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
h.doStop()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *httpServer) doStop() {
|
|
|
|
if h.listener == nil {
|
|
|
|
return // not running
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shut down the server.
|
|
|
|
httpHandler := h.httpHandler.Load().(*rpcHandler)
|
2021-07-15 11:15:08 +03:00
|
|
|
wsHandler := h.wsHandler.Load().(*rpcHandler)
|
2020-08-03 20:40:46 +03:00
|
|
|
if httpHandler != nil {
|
|
|
|
h.httpHandler.Store((*rpcHandler)(nil))
|
|
|
|
httpHandler.server.Stop()
|
|
|
|
}
|
|
|
|
if wsHandler != nil {
|
|
|
|
h.wsHandler.Store((*rpcHandler)(nil))
|
|
|
|
wsHandler.server.Stop()
|
|
|
|
}
|
2022-09-15 16:21:44 +03:00
|
|
|
|
2022-07-08 22:25:12 +03:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
|
|
|
defer cancel()
|
|
|
|
err := h.server.Shutdown(ctx)
|
2022-09-14 19:37:53 +03:00
|
|
|
if err != nil && err == ctx.Err() {
|
2022-07-08 22:25:12 +03:00
|
|
|
h.log.Warn("HTTP server graceful shutdown timed out")
|
|
|
|
h.server.Close()
|
|
|
|
}
|
2022-09-15 16:21:44 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
h.listener.Close()
|
|
|
|
h.log.Info("HTTP server stopped", "endpoint", h.listener.Addr())
|
|
|
|
|
|
|
|
// Clear out everything to allow re-configuring it later.
|
|
|
|
h.host, h.port, h.endpoint = "", 0, ""
|
|
|
|
h.server, h.listener = nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// enableRPC turns on JSON-RPC over HTTP on the server.
|
|
|
|
func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error {
|
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
|
|
|
|
if h.rpcAllowed() {
|
2024-02-29 12:56:46 +03:00
|
|
|
return errors.New("JSON-RPC over HTTP is already enabled")
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create RPC server and handler.
|
|
|
|
srv := rpc.NewServer()
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit)
|
2024-02-07 23:06:38 +03:00
|
|
|
if config.httpBodyLimit > 0 {
|
|
|
|
srv.SetHTTPBodyLimit(config.httpBodyLimit)
|
|
|
|
}
|
2022-06-27 13:33:13 +03:00
|
|
|
if err := RegisterApis(apis, config.Modules, srv); err != nil {
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
h.httpConfig = config
|
|
|
|
h.httpHandler.Store(&rpcHandler{
|
2022-03-07 10:30:27 +03:00
|
|
|
Handler: NewHTTPHandlerStack(srv, config.CorsAllowedOrigins, config.Vhosts, config.jwtSecret),
|
2020-08-03 20:40:46 +03:00
|
|
|
server: srv,
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// disableRPC stops the HTTP RPC handler. This is internal, the caller must hold h.mu.
|
|
|
|
func (h *httpServer) disableRPC() bool {
|
|
|
|
handler := h.httpHandler.Load().(*rpcHandler)
|
|
|
|
if handler != nil {
|
|
|
|
h.httpHandler.Store((*rpcHandler)(nil))
|
|
|
|
handler.server.Stop()
|
|
|
|
}
|
|
|
|
return handler != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// enableWS turns on JSON-RPC over WebSocket on the server.
|
|
|
|
func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error {
|
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
|
|
|
|
if h.wsAllowed() {
|
2024-02-29 12:56:46 +03:00
|
|
|
return errors.New("JSON-RPC over WebSocket is already enabled")
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
// Create RPC server and handler.
|
|
|
|
srv := rpc.NewServer()
|
rpc: add limit for batch request items and response size (#26681)
This PR adds server-side limits for JSON-RPC batch requests. Before this change, batches
were limited only by processing time. The server would pick calls from the batch and
answer them until the response timeout occurred, then stop processing the remaining batch
items.
Here, we are adding two additional limits which can be configured:
- the 'item limit': batches can have at most N items
- the 'response size limit': batches can contain at most X response bytes
These limits are optional in package rpc. In Geth, we set a default limit of 1000 items
and 25MB response size.
When a batch goes over the limit, an error response is returned to the client. However,
doing this correctly isn't always possible. In JSON-RPC, only method calls with a valid
`id` can be responded to. Since batches may also contain non-call messages or
notifications, the best effort thing we can do to report an error with the batch itself is
reporting the limit violation as an error for the first method call in the batch. If a batch is
too large, but contains only notifications and responses, the error will be reported with
a null `id`.
The RPC client was also changed so it can deal with errors resulting from too large
batches. An older client connected to the server code in this PR could get stuck
until the request timeout occurred when the batch is too large. **Upgrading to a version
of the RPC client containing this change is strongly recommended to avoid timeout issues.**
For some weird reason, when writing the original client implementation, @fjl worked off of
the assumption that responses could be distributed across batches arbitrarily. So for a
batch request containing requests `[A B C]`, the server could respond with `[A B C]` but
also with `[A B] [C]` or even `[A] [B] [C]` and it wouldn't make a difference to the
client.
So in the implementation of BatchCallContext, the client waited for all requests in the
batch individually. If the server didn't respond to some of the requests in the batch, the
client would eventually just time out (if a context was used).
With the addition of batch limits into the server, we anticipate that people will hit this
kind of error way more often. To handle this properly, the client now waits for a single
response batch and expects it to contain all responses to the requests.
---------
Co-authored-by: Felix Lange <fjl@twurst.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>
2023-06-13 14:38:58 +03:00
|
|
|
srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit)
|
2024-02-07 23:06:38 +03:00
|
|
|
if config.httpBodyLimit > 0 {
|
|
|
|
srv.SetHTTPBodyLimit(config.httpBodyLimit)
|
|
|
|
}
|
2022-06-27 13:33:13 +03:00
|
|
|
if err := RegisterApis(apis, config.Modules, srv); err != nil {
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
h.wsConfig = config
|
|
|
|
h.wsHandler.Store(&rpcHandler{
|
2022-03-07 10:30:27 +03:00
|
|
|
Handler: NewWSHandlerStack(srv.WebsocketHandler(config.Origins), config.jwtSecret),
|
2020-08-03 20:40:46 +03:00
|
|
|
server: srv,
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// stopWS disables JSON-RPC over WebSocket and also stops the server if it only serves WebSocket.
|
|
|
|
func (h *httpServer) stopWS() {
|
|
|
|
h.mu.Lock()
|
|
|
|
defer h.mu.Unlock()
|
|
|
|
|
|
|
|
if h.disableWS() {
|
|
|
|
if !h.rpcAllowed() {
|
|
|
|
h.doStop()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// disableWS disables the WebSocket handler. This is internal, the caller must hold h.mu.
|
|
|
|
func (h *httpServer) disableWS() bool {
|
|
|
|
ws := h.wsHandler.Load().(*rpcHandler)
|
|
|
|
if ws != nil {
|
|
|
|
h.wsHandler.Store((*rpcHandler)(nil))
|
|
|
|
ws.server.Stop()
|
|
|
|
}
|
|
|
|
return ws != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// rpcAllowed returns true when JSON-RPC over HTTP is enabled.
|
|
|
|
func (h *httpServer) rpcAllowed() bool {
|
|
|
|
return h.httpHandler.Load().(*rpcHandler) != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// wsAllowed returns true when JSON-RPC over WebSocket is enabled.
|
|
|
|
func (h *httpServer) wsAllowed() bool {
|
|
|
|
return h.wsHandler.Load().(*rpcHandler) != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// isWebsocket checks the header of an http request for a websocket upgrade request.
|
|
|
|
func isWebsocket(r *http.Request) bool {
|
2022-05-17 14:56:52 +03:00
|
|
|
return strings.EqualFold(r.Header.Get("Upgrade"), "websocket") &&
|
2020-10-07 21:05:14 +03:00
|
|
|
strings.Contains(strings.ToLower(r.Header.Get("Connection")), "upgrade")
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
|
2020-04-08 14:33:12 +03:00
|
|
|
// NewHTTPHandlerStack returns wrapped http-related handlers
|
2022-03-07 10:30:27 +03:00
|
|
|
func NewHTTPHandlerStack(srv http.Handler, cors []string, vhosts []string, jwtSecret []byte) http.Handler {
|
2020-04-08 14:33:12 +03:00
|
|
|
// Wrap the CORS-handler within a host-handler
|
|
|
|
handler := newCorsHandler(srv, cors)
|
|
|
|
handler = newVHostHandler(vhosts, handler)
|
2022-03-07 10:30:27 +03:00
|
|
|
if len(jwtSecret) != 0 {
|
|
|
|
handler = newJWTHandler(jwtSecret, handler)
|
|
|
|
}
|
2020-04-08 14:33:12 +03:00
|
|
|
return newGzipHandler(handler)
|
|
|
|
}
|
|
|
|
|
2022-03-07 10:30:27 +03:00
|
|
|
// NewWSHandlerStack returns a wrapped ws-related handler.
|
|
|
|
func NewWSHandlerStack(srv http.Handler, jwtSecret []byte) http.Handler {
|
|
|
|
if len(jwtSecret) != 0 {
|
|
|
|
return newJWTHandler(jwtSecret, srv)
|
|
|
|
}
|
|
|
|
return srv
|
|
|
|
}
|
|
|
|
|
2020-04-08 14:33:12 +03:00
|
|
|
func newCorsHandler(srv http.Handler, allowedOrigins []string) http.Handler {
|
|
|
|
// disable CORS support if user has not specified a custom CORS configuration
|
|
|
|
if len(allowedOrigins) == 0 {
|
|
|
|
return srv
|
|
|
|
}
|
|
|
|
c := cors.New(cors.Options{
|
|
|
|
AllowedOrigins: allowedOrigins,
|
|
|
|
AllowedMethods: []string{http.MethodPost, http.MethodGet},
|
|
|
|
AllowedHeaders: []string{"*"},
|
2020-08-03 20:40:46 +03:00
|
|
|
MaxAge: 600,
|
2020-04-08 14:33:12 +03:00
|
|
|
})
|
|
|
|
return c.Handler(srv)
|
|
|
|
}
|
|
|
|
|
|
|
|
// virtualHostHandler is a handler which validates the Host-header of incoming requests.
|
|
|
|
// Using virtual hosts can help prevent DNS rebinding attacks, where a 'random' domain name points to
|
|
|
|
// the service ip address (but without CORS headers). By verifying the targeted virtual host, we can
|
|
|
|
// ensure that it's a destination that the node operator has defined.
|
|
|
|
type virtualHostHandler struct {
|
|
|
|
vhosts map[string]struct{}
|
|
|
|
next http.Handler
|
|
|
|
}
|
|
|
|
|
|
|
|
func newVHostHandler(vhosts []string, next http.Handler) http.Handler {
|
|
|
|
vhostMap := make(map[string]struct{})
|
|
|
|
for _, allowedHost := range vhosts {
|
|
|
|
vhostMap[strings.ToLower(allowedHost)] = struct{}{}
|
|
|
|
}
|
|
|
|
return &virtualHostHandler{vhostMap, next}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ServeHTTP serves JSON-RPC requests over HTTP, implements http.Handler
|
|
|
|
func (h *virtualHostHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
|
|
// if r.Host is not set, we can continue serving since a browser would set the Host header
|
|
|
|
if r.Host == "" {
|
|
|
|
h.next.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
host, _, err := net.SplitHostPort(r.Host)
|
|
|
|
if err != nil {
|
|
|
|
// Either invalid (too many colons) or no port specified
|
|
|
|
host = r.Host
|
|
|
|
}
|
|
|
|
if ipAddr := net.ParseIP(host); ipAddr != nil {
|
|
|
|
// It's an IP address, we can serve that
|
|
|
|
h.next.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Not an IP address, but a hostname. Need to validate
|
|
|
|
if _, exist := h.vhosts["*"]; exist {
|
|
|
|
h.next.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, exist := h.vhosts[host]; exist {
|
|
|
|
h.next.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
http.Error(w, "invalid host specified", http.StatusForbidden)
|
|
|
|
}
|
|
|
|
|
|
|
|
var gzPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
2022-05-16 12:59:35 +03:00
|
|
|
w := gzip.NewWriter(io.Discard)
|
2020-04-08 14:33:12 +03:00
|
|
|
return w
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
type gzipResponseWriter struct {
|
2022-12-07 16:02:14 +03:00
|
|
|
resp http.ResponseWriter
|
|
|
|
|
|
|
|
gz *gzip.Writer
|
|
|
|
contentLength uint64 // total length of the uncompressed response
|
|
|
|
written uint64 // amount of written bytes from the uncompressed response
|
|
|
|
hasLength bool // true if uncompressed response had Content-Length
|
|
|
|
inited bool // true after init was called for the first time
|
|
|
|
}
|
|
|
|
|
|
|
|
// init runs just before response headers are written. Among other things, this function
|
|
|
|
// also decides whether compression will be applied at all.
|
|
|
|
func (w *gzipResponseWriter) init() {
|
|
|
|
if w.inited {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.inited = true
|
|
|
|
|
|
|
|
hdr := w.resp.Header()
|
|
|
|
length := hdr.Get("content-length")
|
|
|
|
if len(length) > 0 {
|
|
|
|
if n, err := strconv.ParseUint(length, 10, 64); err != nil {
|
|
|
|
w.hasLength = true
|
|
|
|
w.contentLength = n
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setting Transfer-Encoding to "identity" explicitly disables compression. net/http
|
|
|
|
// also recognizes this header value and uses it to disable "chunked" transfer
|
|
|
|
// encoding, trimming the header from the response. This means downstream handlers can
|
|
|
|
// set this without harm, even if they aren't wrapped by newGzipHandler.
|
|
|
|
//
|
|
|
|
// In go-ethereum, we use this signal to disable compression for certain error
|
|
|
|
// responses which are flushed out close to the write deadline of the response. For
|
|
|
|
// these cases, we want to avoid chunked transfer encoding and compression because
|
|
|
|
// they require additional output that may not get written in time.
|
|
|
|
passthrough := hdr.Get("transfer-encoding") == "identity"
|
|
|
|
if !passthrough {
|
|
|
|
w.gz = gzPool.Get().(*gzip.Writer)
|
|
|
|
w.gz.Reset(w.resp)
|
|
|
|
hdr.Del("content-length")
|
|
|
|
hdr.Set("content-encoding", "gzip")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *gzipResponseWriter) Header() http.Header {
|
|
|
|
return w.resp.Header()
|
2020-04-08 14:33:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (w *gzipResponseWriter) WriteHeader(status int) {
|
2022-12-07 16:02:14 +03:00
|
|
|
w.init()
|
|
|
|
w.resp.WriteHeader(status)
|
2020-04-08 14:33:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
2022-12-07 16:02:14 +03:00
|
|
|
w.init()
|
|
|
|
|
|
|
|
if w.gz == nil {
|
|
|
|
// Compression is disabled.
|
|
|
|
return w.resp.Write(b)
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := w.gz.Write(b)
|
|
|
|
w.written += uint64(n)
|
|
|
|
if w.hasLength && w.written >= w.contentLength {
|
|
|
|
// The HTTP handler has finished writing the entire uncompressed response. Close
|
|
|
|
// the gzip stream to ensure the footer will be seen by the client in case the
|
|
|
|
// response is flushed after this call to write.
|
|
|
|
err = w.gz.Close()
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *gzipResponseWriter) Flush() {
|
|
|
|
if w.gz != nil {
|
|
|
|
w.gz.Flush()
|
|
|
|
}
|
|
|
|
if f, ok := w.resp.(http.Flusher); ok {
|
|
|
|
f.Flush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *gzipResponseWriter) close() {
|
|
|
|
if w.gz == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.gz.Close()
|
|
|
|
gzPool.Put(w.gz)
|
|
|
|
w.gz = nil
|
2020-04-08 14:33:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func newGzipHandler(next http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
|
|
|
next.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-07 16:02:14 +03:00
|
|
|
wrapper := &gzipResponseWriter{resp: w}
|
|
|
|
defer wrapper.close()
|
2020-04-08 14:33:12 +03:00
|
|
|
|
2022-12-07 16:02:14 +03:00
|
|
|
next.ServeHTTP(wrapper, r)
|
2020-04-08 14:33:12 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
type ipcServer struct {
|
|
|
|
log log.Logger
|
|
|
|
endpoint string
|
2020-04-08 14:33:12 +03:00
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
mu sync.Mutex
|
|
|
|
listener net.Listener
|
|
|
|
srv *rpc.Server
|
2020-04-08 14:33:12 +03:00
|
|
|
}
|
|
|
|
|
2020-08-03 20:40:46 +03:00
|
|
|
func newIPCServer(log log.Logger, endpoint string) *ipcServer {
|
|
|
|
return &ipcServer{log: log, endpoint: endpoint}
|
|
|
|
}
|
|
|
|
|
2024-03-26 23:01:28 +03:00
|
|
|
// start starts the httpServer's http.Server
|
2020-08-03 20:40:46 +03:00
|
|
|
func (is *ipcServer) start(apis []rpc.API) error {
|
|
|
|
is.mu.Lock()
|
|
|
|
defer is.mu.Unlock()
|
|
|
|
|
|
|
|
if is.listener != nil {
|
|
|
|
return nil // already running
|
|
|
|
}
|
|
|
|
listener, srv, err := rpc.StartIPCEndpoint(is.endpoint, apis)
|
|
|
|
if err != nil {
|
2020-11-20 00:50:47 +03:00
|
|
|
is.log.Warn("IPC opening failed", "url", is.endpoint, "error", err)
|
2020-08-03 20:40:46 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
is.log.Info("IPC endpoint opened", "url", is.endpoint)
|
|
|
|
is.listener, is.srv = listener, srv
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (is *ipcServer) stop() error {
|
|
|
|
is.mu.Lock()
|
|
|
|
defer is.mu.Unlock()
|
|
|
|
|
|
|
|
if is.listener == nil {
|
|
|
|
return nil // not running
|
|
|
|
}
|
|
|
|
err := is.listener.Close()
|
|
|
|
is.srv.Stop()
|
|
|
|
is.listener, is.srv = nil, nil
|
|
|
|
is.log.Info("IPC endpoint closed", "url", is.endpoint)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-08-02 16:43:01 +03:00
|
|
|
// RegisterApis checks the given modules' availability, generates an allowlist based on the allowed modules,
|
2020-08-03 20:40:46 +03:00
|
|
|
// and then registers all of the APIs exposed by the services.
|
2022-06-27 13:33:13 +03:00
|
|
|
func RegisterApis(apis []rpc.API, modules []string, srv *rpc.Server) error {
|
2020-08-03 20:40:46 +03:00
|
|
|
if bad, available := checkModuleAvailability(modules, apis); len(bad) > 0 {
|
|
|
|
log.Error("Unavailable modules in HTTP API list", "unavailable", bad, "available", available)
|
|
|
|
}
|
2021-08-02 16:43:01 +03:00
|
|
|
// Generate the allow list based on the allowed modules
|
|
|
|
allowList := make(map[string]bool)
|
2020-08-03 20:40:46 +03:00
|
|
|
for _, module := range modules {
|
2021-08-02 16:43:01 +03:00
|
|
|
allowList[module] = true
|
2020-08-03 20:40:46 +03:00
|
|
|
}
|
|
|
|
// Register all the APIs exposed by the services
|
|
|
|
for _, api := range apis {
|
2022-06-27 13:33:13 +03:00
|
|
|
if allowList[api.Namespace] || len(allowList) == 0 {
|
2020-08-03 20:40:46 +03:00
|
|
|
if err := srv.RegisterName(api.Namespace, api.Service); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2020-04-08 14:33:12 +03:00
|
|
|
}
|