2022-05-05 00:51:24 +03:00
|
|
|
package proxyd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-05-14 06:54:27 +03:00
|
|
|
"crypto/rand"
|
|
|
|
"encoding/hex"
|
2022-05-05 00:51:24 +03:00
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math"
|
2023-07-19 23:21:00 +03:00
|
|
|
"math/big"
|
2022-05-05 00:51:24 +03:00
|
|
|
"net/http"
|
2022-10-17 16:08:33 +03:00
|
|
|
"regexp"
|
2022-05-05 00:51:24 +03:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2022-06-08 18:09:32 +03:00
|
|
|
"sync"
|
2022-05-05 00:51:24 +03:00
|
|
|
"time"
|
|
|
|
|
2023-01-23 02:40:26 +03:00
|
|
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
2023-06-21 21:06:49 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
2023-07-19 23:21:00 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/txpool"
|
2023-01-23 02:40:26 +03:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2022-05-05 00:51:24 +03:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/gorilla/mux"
|
|
|
|
"github.com/gorilla/websocket"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2023-09-14 22:36:24 +03:00
|
|
|
"github.com/redis/go-redis/v9"
|
2022-05-05 00:51:24 +03:00
|
|
|
"github.com/rs/cors"
|
2023-07-27 21:48:46 +03:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
2022-05-05 00:51:24 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2023-10-17 23:19:13 +03:00
|
|
|
ContextKeyAuth = "authorization"
|
|
|
|
ContextKeyReqID = "req_id"
|
|
|
|
ContextKeyXForwardedFor = "x_forwarded_for"
|
|
|
|
DefaultMaxBatchRPCCallsLimit = 100
|
|
|
|
MaxBatchRPCCallsHardLimit = 1000
|
|
|
|
cacheStatusHdr = "X-Proxyd-Cache-Status"
|
|
|
|
defaultRPCTimeout = 10 * time.Second
|
|
|
|
defaultBodySizeLimit = 256 * opt.KiB
|
|
|
|
defaultWSHandshakeTimeout = 10 * time.Second
|
|
|
|
defaultWSReadTimeout = 2 * time.Minute
|
|
|
|
defaultWSWriteTimeout = 10 * time.Second
|
2024-02-24 00:58:00 +03:00
|
|
|
defaultCacheTtl = 1 * time.Hour
|
2023-10-17 23:19:13 +03:00
|
|
|
maxRequestBodyLogLen = 2000
|
|
|
|
defaultMaxUpstreamBatchSize = 10
|
2023-11-02 22:38:04 +03:00
|
|
|
defaultRateLimitHeader = "X-Forwarded-For"
|
2022-05-05 00:51:24 +03:00
|
|
|
)
|
|
|
|
|
2022-06-16 22:02:39 +03:00
|
|
|
var emptyArrayResponse = json.RawMessage("[]")
|
|
|
|
|
2022-05-05 00:51:24 +03:00
|
|
|
type Server struct {
|
2023-04-18 21:57:55 +03:00
|
|
|
BackendGroups map[string]*BackendGroup
|
2023-02-15 10:42:44 +03:00
|
|
|
wsBackendGroup *BackendGroup
|
|
|
|
wsMethodWhitelist *StringSet
|
|
|
|
rpcMethodMappings map[string]string
|
|
|
|
maxBodySize int64
|
|
|
|
enableRequestLog bool
|
|
|
|
maxRequestBodyLogLen int
|
|
|
|
authenticatedPaths map[string]string
|
|
|
|
timeout time.Duration
|
|
|
|
maxUpstreamBatchSize int
|
|
|
|
maxBatchSize int
|
2023-10-19 22:48:03 +03:00
|
|
|
enableServedByHeader bool
|
2023-02-15 10:42:44 +03:00
|
|
|
upgrader *websocket.Upgrader
|
|
|
|
mainLim FrontendRateLimiter
|
|
|
|
overrideLims map[string]FrontendRateLimiter
|
|
|
|
senderLim FrontendRateLimiter
|
2023-07-19 23:21:00 +03:00
|
|
|
allowedChainIds []*big.Int
|
2023-02-15 10:42:44 +03:00
|
|
|
limExemptOrigins []*regexp.Regexp
|
|
|
|
limExemptUserAgents []*regexp.Regexp
|
|
|
|
globallyLimitedMethods map[string]bool
|
|
|
|
rpcServer *http.Server
|
|
|
|
wsServer *http.Server
|
|
|
|
cache RPCCache
|
|
|
|
srvMu sync.Mutex
|
2023-11-02 22:38:04 +03:00
|
|
|
rateLimitHeader string
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
|
|
|
|
2022-09-15 11:46:11 +03:00
|
|
|
type limiterFunc func(method string) bool
|
|
|
|
|
2022-05-05 00:51:24 +03:00
|
|
|
func NewServer(
|
|
|
|
backendGroups map[string]*BackendGroup,
|
|
|
|
wsBackendGroup *BackendGroup,
|
|
|
|
wsMethodWhitelist *StringSet,
|
|
|
|
rpcMethodMappings map[string]string,
|
|
|
|
maxBodySize int64,
|
|
|
|
authenticatedPaths map[string]string,
|
|
|
|
timeout time.Duration,
|
|
|
|
maxUpstreamBatchSize int,
|
2023-10-19 22:48:03 +03:00
|
|
|
enableServedByHeader bool,
|
2022-05-05 00:51:24 +03:00
|
|
|
cache RPCCache,
|
2022-08-04 20:34:43 +03:00
|
|
|
rateLimitConfig RateLimitConfig,
|
2023-01-23 02:40:26 +03:00
|
|
|
senderRateLimitConfig SenderRateLimitConfig,
|
2022-07-27 20:12:47 +03:00
|
|
|
enableRequestLog bool,
|
|
|
|
maxRequestBodyLogLen int,
|
2022-09-24 00:06:02 +03:00
|
|
|
maxBatchSize int,
|
2022-10-09 23:26:27 +03:00
|
|
|
redisClient *redis.Client,
|
2022-08-04 20:34:43 +03:00
|
|
|
) (*Server, error) {
|
2022-05-05 00:51:24 +03:00
|
|
|
if cache == nil {
|
|
|
|
cache = &NoopRPCCache{}
|
|
|
|
}
|
|
|
|
|
|
|
|
if maxBodySize == 0 {
|
2023-07-27 21:48:46 +03:00
|
|
|
maxBodySize = defaultBodySizeLimit
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if timeout == 0 {
|
2023-07-27 21:48:46 +03:00
|
|
|
timeout = defaultRPCTimeout
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if maxUpstreamBatchSize == 0 {
|
|
|
|
maxUpstreamBatchSize = defaultMaxUpstreamBatchSize
|
|
|
|
}
|
|
|
|
|
2023-10-17 23:19:13 +03:00
|
|
|
if maxBatchSize == 0 {
|
|
|
|
maxBatchSize = DefaultMaxBatchRPCCallsLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
if maxBatchSize > MaxBatchRPCCallsHardLimit {
|
2022-09-24 00:06:02 +03:00
|
|
|
maxBatchSize = MaxBatchRPCCallsHardLimit
|
|
|
|
}
|
|
|
|
|
2022-10-09 23:26:27 +03:00
|
|
|
limiterFactory := func(dur time.Duration, max int, prefix string) FrontendRateLimiter {
|
|
|
|
if rateLimitConfig.UseRedis {
|
|
|
|
return NewRedisFrontendRateLimiter(redisClient, dur, max, prefix)
|
2022-08-04 20:34:43 +03:00
|
|
|
}
|
|
|
|
|
2022-10-09 23:26:27 +03:00
|
|
|
return NewMemoryFrontendRateLimit(dur, max)
|
|
|
|
}
|
|
|
|
|
|
|
|
var mainLim FrontendRateLimiter
|
2022-10-17 16:08:33 +03:00
|
|
|
limExemptOrigins := make([]*regexp.Regexp, 0)
|
|
|
|
limExemptUserAgents := make([]*regexp.Regexp, 0)
|
2022-10-09 23:26:27 +03:00
|
|
|
if rateLimitConfig.BaseRate > 0 {
|
|
|
|
mainLim = limiterFactory(time.Duration(rateLimitConfig.BaseInterval), rateLimitConfig.BaseRate, "main")
|
2022-08-04 20:34:43 +03:00
|
|
|
for _, origin := range rateLimitConfig.ExemptOrigins {
|
2022-10-17 16:08:33 +03:00
|
|
|
pattern, err := regexp.Compile(origin)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
limExemptOrigins = append(limExemptOrigins, pattern)
|
2022-08-04 20:34:43 +03:00
|
|
|
}
|
|
|
|
for _, agent := range rateLimitConfig.ExemptUserAgents {
|
2022-10-17 16:08:33 +03:00
|
|
|
pattern, err := regexp.Compile(agent)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
limExemptUserAgents = append(limExemptUserAgents, pattern)
|
2022-08-04 20:34:43 +03:00
|
|
|
}
|
|
|
|
} else {
|
2022-10-09 23:26:27 +03:00
|
|
|
mainLim = NoopFrontendRateLimiter
|
2022-09-15 11:46:11 +03:00
|
|
|
}
|
|
|
|
|
2022-10-09 23:26:27 +03:00
|
|
|
overrideLims := make(map[string]FrontendRateLimiter)
|
2023-02-15 10:42:44 +03:00
|
|
|
globalMethodLims := make(map[string]bool)
|
2022-09-15 11:46:11 +03:00
|
|
|
for method, override := range rateLimitConfig.MethodOverrides {
|
2022-10-09 23:26:27 +03:00
|
|
|
overrideLims[method] = limiterFactory(time.Duration(override.Interval), override.Limit, method)
|
2023-02-15 10:42:44 +03:00
|
|
|
|
|
|
|
if override.Global {
|
|
|
|
globalMethodLims[method] = true
|
|
|
|
}
|
2022-08-04 20:34:43 +03:00
|
|
|
}
|
2023-01-23 02:40:26 +03:00
|
|
|
var senderLim FrontendRateLimiter
|
|
|
|
if senderRateLimitConfig.Enabled {
|
|
|
|
senderLim = limiterFactory(time.Duration(senderRateLimitConfig.Interval), senderRateLimitConfig.Limit, "senders")
|
|
|
|
}
|
2022-08-04 20:34:43 +03:00
|
|
|
|
2023-11-02 22:38:04 +03:00
|
|
|
rateLimitHeader := defaultRateLimitHeader
|
|
|
|
if rateLimitConfig.IPHeaderOverride != "" {
|
|
|
|
rateLimitHeader = rateLimitConfig.IPHeaderOverride
|
|
|
|
}
|
|
|
|
|
2022-05-05 00:51:24 +03:00
|
|
|
return &Server{
|
2023-04-18 21:57:55 +03:00
|
|
|
BackendGroups: backendGroups,
|
2022-05-05 00:51:24 +03:00
|
|
|
wsBackendGroup: wsBackendGroup,
|
|
|
|
wsMethodWhitelist: wsMethodWhitelist,
|
|
|
|
rpcMethodMappings: rpcMethodMappings,
|
|
|
|
maxBodySize: maxBodySize,
|
|
|
|
authenticatedPaths: authenticatedPaths,
|
|
|
|
timeout: timeout,
|
|
|
|
maxUpstreamBatchSize: maxUpstreamBatchSize,
|
2023-10-19 22:48:03 +03:00
|
|
|
enableServedByHeader: enableServedByHeader,
|
2022-05-05 00:51:24 +03:00
|
|
|
cache: cache,
|
2022-07-27 20:12:47 +03:00
|
|
|
enableRequestLog: enableRequestLog,
|
|
|
|
maxRequestBodyLogLen: maxRequestBodyLogLen,
|
2022-09-24 00:06:02 +03:00
|
|
|
maxBatchSize: maxBatchSize,
|
2022-05-05 00:51:24 +03:00
|
|
|
upgrader: &websocket.Upgrader{
|
2023-07-27 21:48:46 +03:00
|
|
|
HandshakeTimeout: defaultWSHandshakeTimeout,
|
2022-05-05 00:51:24 +03:00
|
|
|
},
|
2023-02-15 10:42:44 +03:00
|
|
|
mainLim: mainLim,
|
|
|
|
overrideLims: overrideLims,
|
|
|
|
globallyLimitedMethods: globalMethodLims,
|
|
|
|
senderLim: senderLim,
|
2023-07-19 23:21:00 +03:00
|
|
|
allowedChainIds: senderRateLimitConfig.AllowedChainIds,
|
2023-02-15 10:42:44 +03:00
|
|
|
limExemptOrigins: limExemptOrigins,
|
|
|
|
limExemptUserAgents: limExemptUserAgents,
|
2023-11-02 22:38:04 +03:00
|
|
|
rateLimitHeader: rateLimitHeader,
|
2022-08-04 20:34:43 +03:00
|
|
|
}, nil
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) RPCListenAndServe(host string, port int) error {
|
2022-06-08 18:09:32 +03:00
|
|
|
s.srvMu.Lock()
|
2022-05-05 00:51:24 +03:00
|
|
|
hdlr := mux.NewRouter()
|
|
|
|
hdlr.HandleFunc("/healthz", s.HandleHealthz).Methods("GET")
|
|
|
|
hdlr.HandleFunc("/", s.HandleRPC).Methods("POST")
|
|
|
|
hdlr.HandleFunc("/{authorization}", s.HandleRPC).Methods("POST")
|
|
|
|
c := cors.New(cors.Options{
|
|
|
|
AllowedOrigins: []string{"*"},
|
|
|
|
})
|
|
|
|
addr := fmt.Sprintf("%s:%d", host, port)
|
|
|
|
s.rpcServer = &http.Server{
|
|
|
|
Handler: instrumentedHdlr(c.Handler(hdlr)),
|
|
|
|
Addr: addr,
|
|
|
|
}
|
|
|
|
log.Info("starting HTTP server", "addr", addr)
|
2022-06-08 18:09:32 +03:00
|
|
|
s.srvMu.Unlock()
|
2022-05-05 00:51:24 +03:00
|
|
|
return s.rpcServer.ListenAndServe()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) WSListenAndServe(host string, port int) error {
|
2022-06-08 18:09:32 +03:00
|
|
|
s.srvMu.Lock()
|
2022-05-05 00:51:24 +03:00
|
|
|
hdlr := mux.NewRouter()
|
|
|
|
hdlr.HandleFunc("/", s.HandleWS)
|
|
|
|
hdlr.HandleFunc("/{authorization}", s.HandleWS)
|
|
|
|
c := cors.New(cors.Options{
|
|
|
|
AllowedOrigins: []string{"*"},
|
|
|
|
})
|
|
|
|
addr := fmt.Sprintf("%s:%d", host, port)
|
|
|
|
s.wsServer = &http.Server{
|
|
|
|
Handler: instrumentedHdlr(c.Handler(hdlr)),
|
|
|
|
Addr: addr,
|
|
|
|
}
|
|
|
|
log.Info("starting WS server", "addr", addr)
|
2022-06-08 18:09:32 +03:00
|
|
|
s.srvMu.Unlock()
|
2022-05-05 00:51:24 +03:00
|
|
|
return s.wsServer.ListenAndServe()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) Shutdown() {
|
2022-06-08 18:09:32 +03:00
|
|
|
s.srvMu.Lock()
|
|
|
|
defer s.srvMu.Unlock()
|
2022-05-05 00:51:24 +03:00
|
|
|
if s.rpcServer != nil {
|
|
|
|
_ = s.rpcServer.Shutdown(context.Background())
|
|
|
|
}
|
|
|
|
if s.wsServer != nil {
|
|
|
|
_ = s.wsServer.Shutdown(context.Background())
|
|
|
|
}
|
2023-05-14 08:19:32 +03:00
|
|
|
for _, bg := range s.BackendGroups {
|
2023-05-18 07:56:55 +03:00
|
|
|
bg.Shutdown()
|
2023-05-14 08:19:32 +03:00
|
|
|
}
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) {
|
|
|
|
_, _ = w.Write([]byte("OK"))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := s.populateContext(w, r)
|
|
|
|
if ctx == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var cancel context.CancelFunc
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, s.timeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2022-08-04 21:44:46 +03:00
|
|
|
origin := r.Header.Get("Origin")
|
|
|
|
userAgent := r.Header.Get("User-Agent")
|
|
|
|
// Use XFF in context since it will automatically be replaced by the remote IP
|
|
|
|
xff := stripXFF(GetXForwardedFor(ctx))
|
2022-09-15 11:46:11 +03:00
|
|
|
isUnlimitedOrigin := s.isUnlimitedOrigin(origin)
|
|
|
|
isUnlimitedUserAgent := s.isUnlimitedUserAgent(userAgent)
|
|
|
|
|
|
|
|
if xff == "" {
|
|
|
|
writeRPCError(ctx, w, nil, ErrInvalidRequest("request does not include a remote IP"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
isLimited := func(method string) bool {
|
2023-02-15 10:42:44 +03:00
|
|
|
isGloballyLimitedMethod := s.isGlobalLimit(method)
|
|
|
|
if !isGloballyLimitedMethod && (isUnlimitedOrigin || isUnlimitedUserAgent) {
|
2022-09-15 11:46:11 +03:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-10-09 23:26:27 +03:00
|
|
|
var lim FrontendRateLimiter
|
2022-09-15 11:46:11 +03:00
|
|
|
if method == "" {
|
|
|
|
lim = s.mainLim
|
2022-08-04 20:34:43 +03:00
|
|
|
} else {
|
2022-09-15 11:46:11 +03:00
|
|
|
lim = s.overrideLims[method]
|
2022-08-04 20:34:43 +03:00
|
|
|
}
|
2022-09-15 11:46:11 +03:00
|
|
|
|
|
|
|
if lim == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-10-09 23:26:27 +03:00
|
|
|
ok, err := lim.Take(ctx, xff)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("error taking rate limit", "err", err)
|
|
|
|
return true
|
|
|
|
}
|
2022-09-15 11:46:11 +03:00
|
|
|
return !ok
|
2022-08-04 20:34:43 +03:00
|
|
|
}
|
2022-09-15 11:46:11 +03:00
|
|
|
|
2024-08-06 18:23:25 +03:00
|
|
|
log.Debug(
|
2022-05-05 00:51:24 +03:00
|
|
|
"received RPC request",
|
|
|
|
"req_id", GetReqID(ctx),
|
|
|
|
"auth", GetAuthCtx(ctx),
|
2022-08-04 21:44:46 +03:00
|
|
|
"user_agent", userAgent,
|
2022-09-16 20:28:56 +03:00
|
|
|
"origin", origin,
|
|
|
|
"remote_ip", xff,
|
2022-05-05 00:51:24 +03:00
|
|
|
)
|
|
|
|
|
2023-10-21 22:40:51 +03:00
|
|
|
body, err := io.ReadAll(LimitReader(r.Body, s.maxBodySize))
|
|
|
|
if errors.Is(err, ErrLimitReaderOverLimit) {
|
|
|
|
log.Error("request body too large", "req_id", GetReqID(ctx))
|
|
|
|
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrRequestBodyTooLarge)
|
|
|
|
writeRPCError(ctx, w, nil, ErrRequestBodyTooLarge)
|
|
|
|
return
|
|
|
|
}
|
2022-05-05 00:51:24 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Error("error reading request body", "err", err)
|
|
|
|
writeRPCError(ctx, w, nil, ErrInternal)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
RecordRequestPayloadSize(ctx, len(body))
|
|
|
|
|
2022-07-27 20:12:47 +03:00
|
|
|
if s.enableRequestLog {
|
|
|
|
log.Info("Raw RPC request",
|
|
|
|
"body", truncate(string(body), s.maxRequestBodyLogLen),
|
|
|
|
"req_id", GetReqID(ctx),
|
|
|
|
"auth", GetAuthCtx(ctx),
|
|
|
|
)
|
|
|
|
}
|
2022-05-05 00:51:24 +03:00
|
|
|
|
|
|
|
if IsBatch(body) {
|
|
|
|
reqs, err := ParseBatchRPCReq(body)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("error parsing batch RPC request", "err", err)
|
|
|
|
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
|
|
|
writeRPCError(ctx, w, nil, ErrParseErr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-09-24 00:06:02 +03:00
|
|
|
RecordBatchSize(len(reqs))
|
|
|
|
|
|
|
|
if len(reqs) > s.maxBatchSize {
|
2022-05-05 00:51:24 +03:00
|
|
|
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrTooManyBatchRequests)
|
|
|
|
writeRPCError(ctx, w, nil, ErrTooManyBatchRequests)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(reqs) == 0 {
|
|
|
|
writeRPCError(ctx, w, nil, ErrInvalidRequest("must specify at least one batch call"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-10-19 22:48:03 +03:00
|
|
|
batchRes, batchContainsCached, servedBy, err := s.handleBatchRPC(ctx, reqs, isLimited, true)
|
2022-05-05 00:51:24 +03:00
|
|
|
if err == context.DeadlineExceeded {
|
|
|
|
writeRPCError(ctx, w, nil, ErrGatewayTimeout)
|
|
|
|
return
|
|
|
|
}
|
2023-06-02 23:07:18 +03:00
|
|
|
if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) ||
|
|
|
|
errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) {
|
2023-06-01 23:16:40 +03:00
|
|
|
writeRPCError(ctx, w, nil, ErrInvalidRequest(err.Error()))
|
|
|
|
return
|
|
|
|
}
|
2022-05-05 00:51:24 +03:00
|
|
|
if err != nil {
|
|
|
|
writeRPCError(ctx, w, nil, ErrInternal)
|
|
|
|
return
|
|
|
|
}
|
2023-10-19 22:48:03 +03:00
|
|
|
if s.enableServedByHeader {
|
|
|
|
w.Header().Set("x-served-by", servedBy)
|
|
|
|
}
|
2022-05-05 00:51:24 +03:00
|
|
|
setCacheHeader(w, batchContainsCached)
|
|
|
|
writeBatchRPCRes(ctx, w, batchRes)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
rawBody := json.RawMessage(body)
|
2023-10-19 22:48:03 +03:00
|
|
|
backendRes, cached, servedBy, err := s.handleBatchRPC(ctx, []json.RawMessage{rawBody}, isLimited, false)
|
2022-05-05 00:51:24 +03:00
|
|
|
if err != nil {
|
2023-06-02 23:07:18 +03:00
|
|
|
if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) ||
|
|
|
|
errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) {
|
|
|
|
writeRPCError(ctx, w, nil, ErrInvalidRequest(err.Error()))
|
|
|
|
return
|
|
|
|
}
|
2022-05-05 00:51:24 +03:00
|
|
|
writeRPCError(ctx, w, nil, ErrInternal)
|
|
|
|
return
|
|
|
|
}
|
2023-10-19 22:48:03 +03:00
|
|
|
if s.enableServedByHeader {
|
|
|
|
w.Header().Set("x-served-by", servedBy)
|
|
|
|
}
|
2022-05-05 00:51:24 +03:00
|
|
|
setCacheHeader(w, cached)
|
|
|
|
writeRPCRes(ctx, w, backendRes[0])
|
|
|
|
}
|
|
|
|
|
2023-10-19 22:48:03 +03:00
|
|
|
func (s *Server) handleBatchRPC(ctx context.Context, reqs []json.RawMessage, isLimited limiterFunc, isBatch bool) ([]*RPCRes, bool, string, error) {
|
2022-05-05 00:51:24 +03:00
|
|
|
// A request set is transformed into groups of batches.
|
|
|
|
// Each batch group maps to a forwarded JSON-RPC batch request (subject to maxUpstreamBatchSize constraints)
|
|
|
|
// A groupID is used to decouple Requests that have duplicate ID so they're not part of the same batch that's
|
|
|
|
// forwarded to the backend. This is done to ensure that the order of JSON-RPC Responses match the Request order
|
|
|
|
// as the backend MAY return Responses out of order.
|
|
|
|
// NOTE: Duplicate request ids induces 1-sized JSON-RPC batches
|
|
|
|
type batchGroup struct {
|
|
|
|
groupID int
|
|
|
|
backendGroup string
|
|
|
|
}
|
|
|
|
|
|
|
|
responses := make([]*RPCRes, len(reqs))
|
|
|
|
batches := make(map[batchGroup][]batchElem)
|
|
|
|
ids := make(map[string]int, len(reqs))
|
|
|
|
|
|
|
|
for i := range reqs {
|
|
|
|
parsedReq, err := ParseRPCReq(reqs[i])
|
|
|
|
if err != nil {
|
|
|
|
log.Info("error parsing RPC call", "source", "rpc", "err", err)
|
|
|
|
responses[i] = NewRPCErrorRes(nil, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-12-18 21:58:18 +03:00
|
|
|
// Simple health check
|
|
|
|
if len(reqs) == 1 && parsedReq.Method == proxydHealthzMethod {
|
|
|
|
res := &RPCRes{
|
|
|
|
ID: parsedReq.ID,
|
|
|
|
JSONRPC: JSONRPCVersion,
|
|
|
|
Result: "OK",
|
|
|
|
}
|
|
|
|
return []*RPCRes{res}, false, "", nil
|
|
|
|
}
|
|
|
|
|
2022-05-05 00:51:24 +03:00
|
|
|
if err := ValidateRPCReq(parsedReq); err != nil {
|
|
|
|
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
|
|
|
responses[i] = NewRPCErrorRes(nil, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-06-16 22:02:39 +03:00
|
|
|
if parsedReq.Method == "eth_accounts" {
|
|
|
|
RecordRPCForward(ctx, BackendProxyd, "eth_accounts", RPCRequestSourceHTTP)
|
|
|
|
responses[i] = NewRPCRes(parsedReq.ID, emptyArrayResponse)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-05-05 00:51:24 +03:00
|
|
|
group := s.rpcMethodMappings[parsedReq.Method]
|
|
|
|
if group == "" {
|
|
|
|
// use unknown below to prevent DOS vector that fills up memory
|
|
|
|
// with arbitrary method names.
|
|
|
|
log.Info(
|
|
|
|
"blocked request for non-whitelisted method",
|
|
|
|
"source", "rpc",
|
|
|
|
"req_id", GetReqID(ctx),
|
|
|
|
"method", parsedReq.Method,
|
|
|
|
)
|
|
|
|
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted)
|
|
|
|
responses[i] = NewRPCErrorRes(parsedReq.ID, ErrMethodNotWhitelisted)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-07-31 23:25:53 +03:00
|
|
|
// Take base rate limit first
|
|
|
|
if isLimited("") {
|
2024-08-06 18:23:25 +03:00
|
|
|
log.Debug(
|
2024-07-31 23:25:53 +03:00
|
|
|
"rate limited individual RPC in a batch request",
|
|
|
|
"source", "rpc",
|
|
|
|
"req_id", parsedReq.ID,
|
|
|
|
"method", parsedReq.Method,
|
|
|
|
)
|
|
|
|
RecordRPCError(ctx, BackendProxyd, parsedReq.Method, ErrOverRateLimit)
|
|
|
|
responses[i] = NewRPCErrorRes(parsedReq.ID, ErrOverRateLimit)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-09-15 11:46:11 +03:00
|
|
|
// Take rate limit for specific methods.
|
|
|
|
if _, ok := s.overrideLims[parsedReq.Method]; ok && isLimited(parsedReq.Method) {
|
2024-08-06 18:23:25 +03:00
|
|
|
log.Debug(
|
2022-09-15 11:46:11 +03:00
|
|
|
"rate limited specific RPC",
|
|
|
|
"source", "rpc",
|
|
|
|
"req_id", GetReqID(ctx),
|
|
|
|
"method", parsedReq.Method,
|
|
|
|
)
|
2022-09-23 23:21:12 +03:00
|
|
|
RecordRPCError(ctx, BackendProxyd, parsedReq.Method, ErrOverRateLimit)
|
|
|
|
responses[i] = NewRPCErrorRes(parsedReq.ID, ErrOverRateLimit)
|
2022-09-15 11:46:11 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-01-23 02:40:26 +03:00
|
|
|
// Apply a sender-based rate limit if it is enabled. Note that sender-based rate
|
|
|
|
// limits apply regardless of origin or user-agent. As such, they don't use the
|
|
|
|
// isLimited method.
|
|
|
|
if parsedReq.Method == "eth_sendRawTransaction" && s.senderLim != nil {
|
|
|
|
if err := s.rateLimitSender(ctx, parsedReq); err != nil {
|
|
|
|
RecordRPCError(ctx, BackendProxyd, parsedReq.Method, err)
|
|
|
|
responses[i] = NewRPCErrorRes(parsedReq.ID, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-05 00:51:24 +03:00
|
|
|
id := string(parsedReq.ID)
|
|
|
|
// If this is a duplicate Request ID, move the Request to a new batchGroup
|
|
|
|
ids[id]++
|
|
|
|
batchGroupID := ids[id]
|
|
|
|
batchGroup := batchGroup{groupID: batchGroupID, backendGroup: group}
|
|
|
|
batches[batchGroup] = append(batches[batchGroup], batchElem{parsedReq, i})
|
|
|
|
}
|
|
|
|
|
2023-10-19 22:48:03 +03:00
|
|
|
servedBy := make(map[string]bool, 0)
|
2022-05-05 00:51:24 +03:00
|
|
|
var cached bool
|
|
|
|
for group, batch := range batches {
|
|
|
|
var cacheMisses []batchElem
|
|
|
|
|
|
|
|
for _, req := range batch {
|
|
|
|
backendRes, _ := s.cache.GetRPC(ctx, req.Req)
|
|
|
|
if backendRes != nil {
|
|
|
|
responses[req.Index] = backendRes
|
|
|
|
cached = true
|
|
|
|
} else {
|
|
|
|
cacheMisses = append(cacheMisses, req)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create minibatches - each minibatch must be no larger than the maxUpstreamBatchSize
|
|
|
|
numBatches := int(math.Ceil(float64(len(cacheMisses)) / float64(s.maxUpstreamBatchSize)))
|
|
|
|
for i := 0; i < numBatches; i++ {
|
|
|
|
if ctx.Err() == context.DeadlineExceeded {
|
|
|
|
log.Info("short-circuiting batch RPC",
|
|
|
|
"req_id", GetReqID(ctx),
|
|
|
|
"auth", GetAuthCtx(ctx),
|
|
|
|
"batch_index", i,
|
|
|
|
)
|
|
|
|
batchRPCShortCircuitsTotal.Inc()
|
2023-10-19 22:48:03 +03:00
|
|
|
return nil, false, "", context.DeadlineExceeded
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
start := i * s.maxUpstreamBatchSize
|
|
|
|
end := int(math.Min(float64(start+s.maxUpstreamBatchSize), float64(len(cacheMisses))))
|
|
|
|
elems := cacheMisses[start:end]
|
2023-10-19 22:48:03 +03:00
|
|
|
res, sb, err := s.BackendGroups[group.backendGroup].Forward(ctx, createBatchRequest(elems), isBatch)
|
|
|
|
servedBy[sb] = true
|
2022-05-05 00:51:24 +03:00
|
|
|
if err != nil {
|
2023-06-02 23:07:18 +03:00
|
|
|
if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) ||
|
|
|
|
errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) {
|
2023-10-19 22:48:03 +03:00
|
|
|
return nil, false, "", err
|
2023-06-01 23:16:40 +03:00
|
|
|
}
|
2022-05-05 00:51:24 +03:00
|
|
|
log.Error(
|
|
|
|
"error forwarding RPC batch",
|
|
|
|
"batch_size", len(elems),
|
|
|
|
"backend_group", group,
|
2023-01-27 21:23:20 +03:00
|
|
|
"req_id", GetReqID(ctx),
|
2022-05-05 00:51:24 +03:00
|
|
|
"err", err,
|
|
|
|
)
|
|
|
|
res = nil
|
|
|
|
for _, elem := range elems {
|
|
|
|
res = append(res, NewRPCErrorRes(elem.Req.ID, err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range elems {
|
|
|
|
responses[elems[i].Index] = res[i]
|
|
|
|
|
|
|
|
// TODO(inphi): batch put these
|
|
|
|
if res[i].Error == nil && res[i].Result != nil {
|
|
|
|
if err := s.cache.PutRPC(ctx, elems[i].Req, res[i]); err != nil {
|
|
|
|
log.Warn(
|
|
|
|
"cache put error",
|
|
|
|
"req_id", GetReqID(ctx),
|
|
|
|
"err", err,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-19 22:48:03 +03:00
|
|
|
servedByString := ""
|
2024-06-11 22:34:44 +03:00
|
|
|
for sb := range servedBy {
|
2023-10-19 22:48:03 +03:00
|
|
|
if servedByString != "" {
|
|
|
|
servedByString += ", "
|
|
|
|
}
|
|
|
|
servedByString += sb
|
|
|
|
}
|
|
|
|
|
|
|
|
return responses, cached, servedByString, nil
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := s.populateContext(w, r)
|
|
|
|
if ctx == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("received WS connection", "req_id", GetReqID(ctx))
|
|
|
|
|
|
|
|
clientConn, err := s.upgrader.Upgrade(w, r, nil)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("error upgrading client conn", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
|
|
|
return
|
|
|
|
}
|
2023-07-27 21:48:46 +03:00
|
|
|
clientConn.SetReadLimit(s.maxBodySize)
|
2022-05-05 00:51:24 +03:00
|
|
|
|
|
|
|
proxier, err := s.wsBackendGroup.ProxyWS(ctx, clientConn, s.wsMethodWhitelist)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, ErrNoBackends) {
|
|
|
|
RecordUnserviceableRequest(ctx, RPCRequestSourceWS)
|
|
|
|
}
|
|
|
|
log.Error("error dialing ws backend", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
|
|
|
clientConn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Inc()
|
|
|
|
go func() {
|
|
|
|
// Below call blocks so run it in a goroutine.
|
|
|
|
if err := proxier.Proxy(ctx); err != nil {
|
|
|
|
log.Error("error proxying websocket", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
|
|
|
}
|
|
|
|
activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Dec()
|
|
|
|
}()
|
|
|
|
|
|
|
|
log.Info("accepted WS connection", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context.Context {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
authorization := vars["authorization"]
|
2023-11-02 22:38:04 +03:00
|
|
|
xff := r.Header.Get(s.rateLimitHeader)
|
2022-08-04 20:34:43 +03:00
|
|
|
if xff == "" {
|
|
|
|
ipPort := strings.Split(r.RemoteAddr, ":")
|
|
|
|
if len(ipPort) == 2 {
|
|
|
|
xff = ipPort[0]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ctx := context.WithValue(r.Context(), ContextKeyXForwardedFor, xff) // nolint:staticcheck
|
2022-05-05 00:51:24 +03:00
|
|
|
|
2023-06-21 21:06:49 +03:00
|
|
|
if len(s.authenticatedPaths) > 0 {
|
2022-08-04 20:34:43 +03:00
|
|
|
if authorization == "" || s.authenticatedPaths[authorization] == "" {
|
|
|
|
log.Info("blocked unauthorized request", "authorization", authorization)
|
|
|
|
httpResponseCodesTotal.WithLabelValues("401").Inc()
|
|
|
|
w.WriteHeader(401)
|
|
|
|
return nil
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
2022-08-04 20:34:43 +03:00
|
|
|
|
2023-05-09 05:19:46 +03:00
|
|
|
ctx = context.WithValue(ctx, ContextKeyAuth, s.authenticatedPaths[authorization]) // nolint:staticcheck
|
2022-05-05 00:51:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return context.WithValue(
|
|
|
|
ctx,
|
|
|
|
ContextKeyReqID, // nolint:staticcheck
|
|
|
|
randStr(10),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-05-14 06:54:27 +03:00
|
|
|
func randStr(l int) string {
|
|
|
|
b := make([]byte, l)
|
|
|
|
if _, err := rand.Read(b); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return hex.EncodeToString(b)
|
|
|
|
}
|
|
|
|
|
2022-09-15 11:46:11 +03:00
|
|
|
func (s *Server) isUnlimitedOrigin(origin string) bool {
|
2022-10-17 16:08:33 +03:00
|
|
|
for _, pat := range s.limExemptOrigins {
|
|
|
|
if pat.MatchString(origin) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
2022-09-15 11:46:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) isUnlimitedUserAgent(origin string) bool {
|
2022-10-17 16:08:33 +03:00
|
|
|
for _, pat := range s.limExemptUserAgents {
|
|
|
|
if pat.MatchString(origin) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
2022-09-15 11:46:11 +03:00
|
|
|
}
|
|
|
|
|
2023-02-15 10:42:44 +03:00
|
|
|
func (s *Server) isGlobalLimit(method string) bool {
|
|
|
|
return s.globallyLimitedMethods[method]
|
|
|
|
}
|
|
|
|
|
2023-01-23 02:40:26 +03:00
|
|
|
func (s *Server) rateLimitSender(ctx context.Context, req *RPCReq) error {
|
|
|
|
var params []string
|
|
|
|
if err := json.Unmarshal(req.Params, ¶ms); err != nil {
|
2023-12-11 19:06:56 +03:00
|
|
|
log.Debug("error unmarshalling raw transaction params", "err", err, "req_Id", GetReqID(ctx))
|
2023-01-23 02:40:26 +03:00
|
|
|
return ErrParseErr
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(params) != 1 {
|
|
|
|
log.Debug("raw transaction request has invalid number of params", "req_id", GetReqID(ctx))
|
|
|
|
// The error below is identical to the one Geth responds with.
|
|
|
|
return ErrInvalidParams("missing value for required argument 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
var data hexutil.Bytes
|
|
|
|
if err := data.UnmarshalText([]byte(params[0])); err != nil {
|
|
|
|
log.Debug("error decoding raw tx data", "err", err, "req_id", GetReqID(ctx))
|
2023-09-14 22:42:33 +03:00
|
|
|
// Geth returns the raw error from UnmarshalText.
|
2023-01-23 02:40:26 +03:00
|
|
|
return ErrInvalidParams(err.Error())
|
|
|
|
}
|
|
|
|
|
2023-09-14 22:42:33 +03:00
|
|
|
// Inflates a types.Transaction object from the transaction's raw bytes.
|
2023-01-23 02:40:26 +03:00
|
|
|
tx := new(types.Transaction)
|
|
|
|
if err := tx.UnmarshalBinary(data); err != nil {
|
|
|
|
log.Debug("could not unmarshal transaction", "err", err, "req_id", GetReqID(ctx))
|
|
|
|
return ErrInvalidParams(err.Error())
|
|
|
|
}
|
|
|
|
|
2023-07-19 23:21:00 +03:00
|
|
|
// Check if the transaction is for the expected chain,
|
|
|
|
// otherwise reject before rate limiting to avoid replay attacks.
|
|
|
|
if !s.isAllowedChainId(tx.ChainId()) {
|
|
|
|
log.Debug("chain id is not allowed", "req_id", GetReqID(ctx))
|
|
|
|
return txpool.ErrInvalidSender
|
|
|
|
}
|
|
|
|
|
2023-01-23 02:40:26 +03:00
|
|
|
// Convert the transaction into a Message object so that we can get the
|
|
|
|
// sender. This method performs an ecrecover, which can be expensive.
|
2023-05-26 08:02:19 +03:00
|
|
|
msg, err := core.TransactionToMessage(tx, types.LatestSignerForChainID(tx.ChainId()), nil)
|
2023-01-23 02:40:26 +03:00
|
|
|
if err != nil {
|
2023-09-14 22:42:33 +03:00
|
|
|
log.Debug("could not get message from transaction", "err", err, "req_id", GetReqID(ctx))
|
2023-01-23 02:40:26 +03:00
|
|
|
return ErrInvalidParams(err.Error())
|
|
|
|
}
|
2023-05-26 08:02:19 +03:00
|
|
|
ok, err := s.senderLim.Take(ctx, fmt.Sprintf("%s:%d", msg.From.Hex(), tx.Nonce()))
|
2023-01-23 02:40:26 +03:00
|
|
|
if err != nil {
|
2023-09-14 22:42:33 +03:00
|
|
|
log.Error("error taking from sender limiter", "err", err, "req_id", GetReqID(ctx))
|
2023-01-23 02:40:26 +03:00
|
|
|
return ErrInternal
|
|
|
|
}
|
|
|
|
if !ok {
|
2023-05-26 08:02:19 +03:00
|
|
|
log.Debug("sender rate limit exceeded", "sender", msg.From.Hex(), "req_id", GetReqID(ctx))
|
2023-01-23 02:40:26 +03:00
|
|
|
return ErrOverSenderRateLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-19 23:21:00 +03:00
|
|
|
func (s *Server) isAllowedChainId(chainId *big.Int) bool {
|
|
|
|
if s.allowedChainIds == nil || len(s.allowedChainIds) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
for _, id := range s.allowedChainIds {
|
|
|
|
if chainId.Cmp(id) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-05-05 00:51:24 +03:00
|
|
|
func setCacheHeader(w http.ResponseWriter, cached bool) {
|
|
|
|
if cached {
|
|
|
|
w.Header().Set(cacheStatusHdr, "HIT")
|
|
|
|
} else {
|
|
|
|
w.Header().Set(cacheStatusHdr, "MISS")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeRPCError(ctx context.Context, w http.ResponseWriter, id json.RawMessage, err error) {
|
|
|
|
var res *RPCRes
|
|
|
|
if r, ok := err.(*RPCErr); ok {
|
|
|
|
res = NewRPCErrorRes(id, r)
|
|
|
|
} else {
|
|
|
|
res = NewRPCErrorRes(id, ErrInternal)
|
|
|
|
}
|
|
|
|
writeRPCRes(ctx, w, res)
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
|
|
|
|
statusCode := 200
|
|
|
|
if res.IsError() && res.Error.HTTPErrorCode != 0 {
|
|
|
|
statusCode = res.Error.HTTPErrorCode
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("content-type", "application/json")
|
|
|
|
w.WriteHeader(statusCode)
|
|
|
|
ww := &recordLenWriter{Writer: w}
|
|
|
|
enc := json.NewEncoder(ww)
|
|
|
|
if err := enc.Encode(res); err != nil {
|
|
|
|
log.Error("error writing rpc response", "err", err)
|
|
|
|
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
httpResponseCodesTotal.WithLabelValues(strconv.Itoa(statusCode)).Inc()
|
|
|
|
RecordResponsePayloadSize(ctx, ww.Len)
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeBatchRPCRes(ctx context.Context, w http.ResponseWriter, res []*RPCRes) {
|
|
|
|
w.Header().Set("content-type", "application/json")
|
|
|
|
w.WriteHeader(200)
|
|
|
|
ww := &recordLenWriter{Writer: w}
|
|
|
|
enc := json.NewEncoder(ww)
|
|
|
|
if err := enc.Encode(res); err != nil {
|
|
|
|
log.Error("error writing batch rpc response", "err", err)
|
|
|
|
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
RecordResponsePayloadSize(ctx, ww.Len)
|
|
|
|
}
|
|
|
|
|
|
|
|
func instrumentedHdlr(h http.Handler) http.HandlerFunc {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
respTimer := prometheus.NewTimer(httpRequestDurationSumm)
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
respTimer.ObserveDuration()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func GetAuthCtx(ctx context.Context) string {
|
|
|
|
authUser, ok := ctx.Value(ContextKeyAuth).(string)
|
|
|
|
if !ok {
|
|
|
|
return "none"
|
|
|
|
}
|
|
|
|
|
|
|
|
return authUser
|
|
|
|
}
|
|
|
|
|
|
|
|
func GetReqID(ctx context.Context) string {
|
|
|
|
reqId, ok := ctx.Value(ContextKeyReqID).(string)
|
|
|
|
if !ok {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return reqId
|
|
|
|
}
|
|
|
|
|
|
|
|
func GetXForwardedFor(ctx context.Context) string {
|
|
|
|
xff, ok := ctx.Value(ContextKeyXForwardedFor).(string)
|
|
|
|
if !ok {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return xff
|
|
|
|
}
|
|
|
|
|
|
|
|
type recordLenWriter struct {
|
|
|
|
io.Writer
|
|
|
|
Len int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *recordLenWriter) Write(p []byte) (n int, err error) {
|
|
|
|
n, err = w.Writer.Write(p)
|
|
|
|
w.Len += n
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
type NoopRPCCache struct{}
|
|
|
|
|
|
|
|
func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-07-27 20:12:47 +03:00
|
|
|
func truncate(str string, maxLen int) string {
|
|
|
|
if maxLen == 0 {
|
|
|
|
maxLen = maxRequestBodyLogLen
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(str) > maxLen {
|
|
|
|
return str[:maxLen] + "..."
|
2022-05-05 00:51:24 +03:00
|
|
|
} else {
|
|
|
|
return str
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type batchElem struct {
|
|
|
|
Req *RPCReq
|
|
|
|
Index int
|
|
|
|
}
|
|
|
|
|
|
|
|
func createBatchRequest(elems []batchElem) []*RPCReq {
|
|
|
|
batch := make([]*RPCReq, len(elems))
|
|
|
|
for i := range elems {
|
|
|
|
batch[i] = elems[i].Req
|
|
|
|
}
|
|
|
|
return batch
|
|
|
|
}
|