maint: Move Go packages into root of repo, adopt go.work (#2524)
- Adopts Go workspaces for future compatibility with the Bedrock move into the monorepo - Moves Go packages to the root of the repo in order to fix import paths - Rewrites existing Go import paths - Removes Stackman, since it's not needed anymore Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
commit
67a4016fdf
3
proxyd/proxyd/.gitignore
vendored
Normal file
3
proxyd/proxyd/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
bin
|
||||
|
||||
config.toml
|
151
proxyd/proxyd/CHANGELOG.md
Normal file
151
proxyd/proxyd/CHANGELOG.md
Normal file
@ -0,0 +1,151 @@
|
||||
# @eth-optimism/proxyd
|
||||
|
||||
## 3.8.5
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 2a062b11: proxyd: Log ssanitized RPC requests
|
||||
- d9f058ce: proxyd: Reduced RPC request logging
|
||||
- a4bfd9e7: proxyd: Limit the number of concurrent RPCs to backends
|
||||
|
||||
## 3.8.4
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 08329ba2: proxyd: Record redis cache operation latency
|
||||
- ae112021: proxyd: Request-scoped context for fast batch RPC short-circuiting
|
||||
|
||||
## 3.8.3
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 160f4c3d: Update docker image to use golang 1.18.0
|
||||
|
||||
## 3.8.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- ae18cea1: Don't hit Redis when the out of service interval is zero
|
||||
|
||||
## 3.8.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- acf7dbd5: Update to go-ethereum v1.10.16
|
||||
|
||||
## 3.8.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 527448bb: Handle nil responses better
|
||||
|
||||
## 3.7.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 3c2926b1: Add debug cache status header to proxyd responses
|
||||
|
||||
## 3.6.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 096c5f20: proxyd: Allow cached RPCs to be evicted by redis
|
||||
- 71d64834: Add caching for block-dependent RPCs
|
||||
- fd2e1523: proxyd: Cache block-dependent RPCs
|
||||
- 1760613c: Add integration tests and batching
|
||||
|
||||
## 3.5.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 025a3c0d: Add request/response payload size metrics to proxyd
|
||||
- daf8db0b: cache immutable RPC responses in proxyd
|
||||
- 8aa89bf3: Add X-Forwarded-For header when proxying RPCs on proxyd
|
||||
|
||||
## 3.4.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 415164e1: Force proxyd build
|
||||
|
||||
## 3.4.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 4b56ed84: Various proxyd fixes
|
||||
|
||||
## 3.3.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 7b7ffd2e: Allows string RPC ids on proxyd
|
||||
|
||||
## 3.2.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 73484138: Adds ability to specify env vars in config
|
||||
|
||||
## 3.1.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 1b79aa62: Release proxyd
|
||||
|
||||
## 3.1.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- b8802054: Trigger release of proxyd
|
||||
- 34fcb277: Bump proxyd to test release build workflow
|
||||
|
||||
## 3.1.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- da6138fd: Updated metrics, support local rate limiter
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 6c7f483b: Add support for additional SSL certificates in Docker container
|
||||
|
||||
## 3.0.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
- abe231bf: Make endpoints match Geth, better logging
|
||||
|
||||
## 2.0.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
- 6c50098b: Update metrics, support WS
|
||||
- f827dbda: Brings back the ability to selectively route RPC methods to backend groups
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 8cc824e5: Updates proxyd to include additional error metrics.
|
||||
- 9ba4c5e0: Update metrics, support authenticated endpoints
|
||||
- 78d0f3f0: Put special errors in a dedicated metric, pass along the content-type header
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 6e6a55b1: Canary release
|
||||
|
||||
## 1.0.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- b9d2fbee: Trigger releases
|
||||
|
||||
## 1.0.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 893623c9: Trigger patch releases for dockerhub
|
||||
|
||||
## 1.0.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
- 28aabc41: Initial release of RPC proxy daemon
|
30
proxyd/proxyd/Dockerfile
Normal file
30
proxyd/proxyd/Dockerfile
Normal file
@ -0,0 +1,30 @@
|
||||
FROM golang:1.18.0-alpine3.15 as builder
|
||||
|
||||
ARG GITCOMMIT=docker
|
||||
ARG GITDATE=docker
|
||||
ARG GITVERSION=docker
|
||||
|
||||
RUN apk add make jq git gcc musl-dev linux-headers
|
||||
|
||||
COPY ./proxyd /app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN make proxyd
|
||||
|
||||
FROM alpine:3.15
|
||||
|
||||
COPY ./proxyd/entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
chmod +x /bin/entrypoint.sh
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
VOLUME /etc/proxyd
|
||||
|
||||
COPY --from=builder /app/bin/proxyd /bin/proxyd
|
||||
|
||||
ENTRYPOINT ["/bin/entrypoint.sh"]
|
||||
CMD ["/bin/proxyd", "/etc/proxyd/proxyd.toml"]
|
21
proxyd/proxyd/Makefile
Normal file
21
proxyd/proxyd/Makefile
Normal file
@ -0,0 +1,21 @@
|
||||
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
|
||||
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
|
||||
LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION)
|
||||
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
|
||||
|
||||
proxyd:
|
||||
go build -v $(LDFLAGS) -o ./bin/proxyd ./cmd/proxyd
|
||||
.PHONY: proxyd
|
||||
|
||||
fmt:
|
||||
go mod tidy
|
||||
gofmt -w .
|
||||
.PHONY: fmt
|
||||
|
||||
test:
|
||||
go test -race -v ./...
|
||||
.PHONY: test
|
||||
|
||||
lint:
|
||||
go vet ./...
|
||||
.PHONY: test
|
26
proxyd/proxyd/README.md
Normal file
26
proxyd/proxyd/README.md
Normal file
@ -0,0 +1,26 @@
|
||||
# rpc-proxy
|
||||
|
||||
This tool implements `proxyd`, an RPC request router and proxy. It does the following things:
|
||||
|
||||
1. Whitelists RPC methods.
|
||||
2. Routes RPC methods to groups of backend services.
|
||||
3. Automatically retries failed backend requests.
|
||||
4. Provides metrics the measure request latency, error rates, and the like.
|
||||
|
||||
## Usage
|
||||
|
||||
Run `make proxyd` to build the binary. No additional dependencies are necessary.
|
||||
|
||||
To configure `proxyd` for use, you'll need to create a configuration file to define your proxy backends and routing rules. Check out [example.config.toml](./example.config.toml) for how to do this alongside a full list of all options with commentary.
|
||||
|
||||
Once you have a config file, start the daemon via `proxyd <path-to-config>.toml`.
|
||||
|
||||
## Metrics
|
||||
|
||||
See `metrics.go` for a list of all available metrics.
|
||||
|
||||
The metrics port is configurable via the `metrics.port` and `metrics.host` keys in the config.
|
||||
|
||||
## Adding Backend SSL Certificates in Docker
|
||||
|
||||
The Docker image runs on Alpine Linux. If you get SSL errors when connecting to a backend within Docker, you may need to add additional certificates to Alpine's certificate store. To do this, bind mount the certificate bundle into a file in `/usr/local/share/ca-certificates`. The `entrypoint.sh` script will then update the store with whatever is in the `ca-certificates` directory prior to starting `proxyd`.
|
785
proxyd/proxyd/backend.go
Normal file
785
proxyd/proxyd/backend.go
Normal file
@ -0,0 +1,785 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const (
|
||||
JSONRPCVersion = "2.0"
|
||||
JSONRPCErrorInternal = -32000
|
||||
)
|
||||
|
||||
var (
|
||||
ErrParseErr = &RPCErr{
|
||||
Code: -32700,
|
||||
Message: "parse error",
|
||||
HTTPErrorCode: 400,
|
||||
}
|
||||
ErrInternal = &RPCErr{
|
||||
Code: JSONRPCErrorInternal,
|
||||
Message: "internal error",
|
||||
HTTPErrorCode: 500,
|
||||
}
|
||||
ErrMethodNotWhitelisted = &RPCErr{
|
||||
Code: JSONRPCErrorInternal - 1,
|
||||
Message: "rpc method is not whitelisted",
|
||||
HTTPErrorCode: 403,
|
||||
}
|
||||
ErrBackendOffline = &RPCErr{
|
||||
Code: JSONRPCErrorInternal - 10,
|
||||
Message: "backend offline",
|
||||
HTTPErrorCode: 503,
|
||||
}
|
||||
ErrNoBackends = &RPCErr{
|
||||
Code: JSONRPCErrorInternal - 11,
|
||||
Message: "no backends available for method",
|
||||
HTTPErrorCode: 503,
|
||||
}
|
||||
ErrBackendOverCapacity = &RPCErr{
|
||||
Code: JSONRPCErrorInternal - 12,
|
||||
Message: "backend is over capacity",
|
||||
HTTPErrorCode: 429,
|
||||
}
|
||||
ErrBackendBadResponse = &RPCErr{
|
||||
Code: JSONRPCErrorInternal - 13,
|
||||
Message: "backend returned an invalid response",
|
||||
HTTPErrorCode: 500,
|
||||
}
|
||||
ErrTooManyBatchRequests = &RPCErr{
|
||||
Code: JSONRPCErrorInternal - 14,
|
||||
Message: "too many RPC calls in batch request",
|
||||
}
|
||||
ErrGatewayTimeout = &RPCErr{
|
||||
Code: JSONRPCErrorInternal - 15,
|
||||
Message: "gateway timeout",
|
||||
HTTPErrorCode: 504,
|
||||
}
|
||||
)
|
||||
|
||||
func ErrInvalidRequest(msg string) *RPCErr {
|
||||
return &RPCErr{
|
||||
Code: -32601,
|
||||
Message: msg,
|
||||
HTTPErrorCode: 400,
|
||||
}
|
||||
}
|
||||
|
||||
type Backend struct {
|
||||
Name string
|
||||
rpcURL string
|
||||
wsURL string
|
||||
authUsername string
|
||||
authPassword string
|
||||
rateLimiter RateLimiter
|
||||
client *LimitedHTTPClient
|
||||
dialer *websocket.Dialer
|
||||
maxRetries int
|
||||
maxResponseSize int64
|
||||
maxRPS int
|
||||
maxWSConns int
|
||||
outOfServiceInterval time.Duration
|
||||
stripTrailingXFF bool
|
||||
proxydIP string
|
||||
}
|
||||
|
||||
type BackendOpt func(b *Backend)
|
||||
|
||||
func WithBasicAuth(username, password string) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.authUsername = username
|
||||
b.authPassword = password
|
||||
}
|
||||
}
|
||||
|
||||
func WithTimeout(timeout time.Duration) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.client.Timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxRetries(retries int) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.maxRetries = retries
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxResponseSize(size int64) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.maxResponseSize = size
|
||||
}
|
||||
}
|
||||
|
||||
func WithOutOfServiceDuration(interval time.Duration) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.outOfServiceInterval = interval
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxRPS(maxRPS int) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.maxRPS = maxRPS
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxWSConns(maxConns int) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.maxWSConns = maxConns
|
||||
}
|
||||
}
|
||||
|
||||
func WithTLSConfig(tlsConfig *tls.Config) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
if b.client.Transport == nil {
|
||||
b.client.Transport = &http.Transport{}
|
||||
}
|
||||
b.client.Transport.(*http.Transport).TLSClientConfig = tlsConfig
|
||||
}
|
||||
}
|
||||
|
||||
func WithStrippedTrailingXFF() BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.stripTrailingXFF = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithProxydIP(ip string) BackendOpt {
|
||||
return func(b *Backend) {
|
||||
b.proxydIP = ip
|
||||
}
|
||||
}
|
||||
|
||||
func NewBackend(
|
||||
name string,
|
||||
rpcURL string,
|
||||
wsURL string,
|
||||
rateLimiter RateLimiter,
|
||||
rpcSemaphore *semaphore.Weighted,
|
||||
opts ...BackendOpt,
|
||||
) *Backend {
|
||||
backend := &Backend{
|
||||
Name: name,
|
||||
rpcURL: rpcURL,
|
||||
wsURL: wsURL,
|
||||
rateLimiter: rateLimiter,
|
||||
maxResponseSize: math.MaxInt64,
|
||||
client: &LimitedHTTPClient{
|
||||
Client: http.Client{Timeout: 5 * time.Second},
|
||||
sem: rpcSemaphore,
|
||||
backendName: name,
|
||||
},
|
||||
dialer: &websocket.Dialer{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(backend)
|
||||
}
|
||||
|
||||
if !backend.stripTrailingXFF && backend.proxydIP == "" {
|
||||
log.Warn("proxied requests' XFF header will not contain the proxyd ip address")
|
||||
}
|
||||
|
||||
return backend
|
||||
}
|
||||
|
||||
func (b *Backend) Forward(ctx context.Context, reqs []*RPCReq, isBatch bool) ([]*RPCRes, error) {
|
||||
if !b.Online() {
|
||||
RecordBatchRPCError(ctx, b.Name, reqs, ErrBackendOffline)
|
||||
return nil, ErrBackendOffline
|
||||
}
|
||||
if b.IsRateLimited() {
|
||||
RecordBatchRPCError(ctx, b.Name, reqs, ErrBackendOverCapacity)
|
||||
return nil, ErrBackendOverCapacity
|
||||
}
|
||||
|
||||
var lastError error
|
||||
// <= to account for the first attempt not technically being
|
||||
// a retry
|
||||
for i := 0; i <= b.maxRetries; i++ {
|
||||
RecordBatchRPCForward(ctx, b.Name, reqs, RPCRequestSourceHTTP)
|
||||
metricLabelMethod := reqs[0].Method
|
||||
if isBatch {
|
||||
metricLabelMethod = "<batch>"
|
||||
}
|
||||
timer := prometheus.NewTimer(
|
||||
rpcBackendRequestDurationSumm.WithLabelValues(
|
||||
b.Name,
|
||||
metricLabelMethod,
|
||||
strconv.FormatBool(isBatch),
|
||||
),
|
||||
)
|
||||
|
||||
res, err := b.doForward(ctx, reqs, isBatch)
|
||||
if err != nil {
|
||||
lastError = err
|
||||
log.Warn(
|
||||
"backend request failed, trying again",
|
||||
"name", b.Name,
|
||||
"req_id", GetReqID(ctx),
|
||||
"err", err,
|
||||
)
|
||||
timer.ObserveDuration()
|
||||
RecordBatchRPCError(ctx, b.Name, reqs, err)
|
||||
sleepContext(ctx, calcBackoff(i))
|
||||
continue
|
||||
}
|
||||
timer.ObserveDuration()
|
||||
|
||||
MaybeRecordErrorsInRPCRes(ctx, b.Name, reqs, res)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
b.setOffline()
|
||||
return nil, wrapErr(lastError, "permanent error forwarding request")
|
||||
}
|
||||
|
||||
func (b *Backend) ProxyWS(clientConn *websocket.Conn, methodWhitelist *StringSet) (*WSProxier, error) {
|
||||
if !b.Online() {
|
||||
return nil, ErrBackendOffline
|
||||
}
|
||||
if b.IsWSSaturated() {
|
||||
return nil, ErrBackendOverCapacity
|
||||
}
|
||||
|
||||
backendConn, _, err := b.dialer.Dial(b.wsURL, nil) // nolint:bodyclose
|
||||
if err != nil {
|
||||
b.setOffline()
|
||||
if err := b.rateLimiter.DecBackendWSConns(b.Name); err != nil {
|
||||
log.Error("error decrementing backend ws conns", "name", b.Name, "err", err)
|
||||
}
|
||||
return nil, wrapErr(err, "error dialing backend")
|
||||
}
|
||||
|
||||
activeBackendWsConnsGauge.WithLabelValues(b.Name).Inc()
|
||||
return NewWSProxier(b, clientConn, backendConn, methodWhitelist), nil
|
||||
}
|
||||
|
||||
func (b *Backend) Online() bool {
|
||||
online, err := b.rateLimiter.IsBackendOnline(b.Name)
|
||||
if err != nil {
|
||||
log.Warn(
|
||||
"error getting backend availability, assuming it is offline",
|
||||
"name", b.Name,
|
||||
"err", err,
|
||||
)
|
||||
return false
|
||||
}
|
||||
return online
|
||||
}
|
||||
|
||||
func (b *Backend) IsRateLimited() bool {
|
||||
if b.maxRPS == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
usedLimit, err := b.rateLimiter.IncBackendRPS(b.Name)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"error getting backend used rate limit, assuming limit is exhausted",
|
||||
"name", b.Name,
|
||||
"err", err,
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
return b.maxRPS < usedLimit
|
||||
}
|
||||
|
||||
func (b *Backend) IsWSSaturated() bool {
|
||||
if b.maxWSConns == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
incremented, err := b.rateLimiter.IncBackendWSConns(b.Name, b.maxWSConns)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"error getting backend used ws conns, assuming limit is exhausted",
|
||||
"name", b.Name,
|
||||
"err", err,
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
return !incremented
|
||||
}
|
||||
|
||||
func (b *Backend) setOffline() {
|
||||
err := b.rateLimiter.SetBackendOffline(b.Name, b.outOfServiceInterval)
|
||||
if err != nil {
|
||||
log.Warn(
|
||||
"error setting backend offline",
|
||||
"name", b.Name,
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool) ([]*RPCRes, error) {
|
||||
body := mustMarshalJSON(rpcReqs)
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "POST", b.rpcURL, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, wrapErr(err, "error creating backend request")
|
||||
}
|
||||
|
||||
if b.authPassword != "" {
|
||||
httpReq.SetBasicAuth(b.authUsername, b.authPassword)
|
||||
}
|
||||
|
||||
xForwardedFor := GetXForwardedFor(ctx)
|
||||
if b.stripTrailingXFF {
|
||||
ipList := strings.Split(xForwardedFor, ", ")
|
||||
if len(ipList) > 0 {
|
||||
xForwardedFor = ipList[0]
|
||||
}
|
||||
} else if b.proxydIP != "" {
|
||||
xForwardedFor = fmt.Sprintf("%s, %s", xForwardedFor, b.proxydIP)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("content-type", "application/json")
|
||||
httpReq.Header.Set("X-Forwarded-For", xForwardedFor)
|
||||
|
||||
httpRes, err := b.client.DoLimited(httpReq)
|
||||
if err != nil {
|
||||
return nil, wrapErr(err, "error in backend request")
|
||||
}
|
||||
|
||||
metricLabelMethod := rpcReqs[0].Method
|
||||
if isBatch {
|
||||
metricLabelMethod = "<batch>"
|
||||
}
|
||||
rpcBackendHTTPResponseCodesTotal.WithLabelValues(
|
||||
GetAuthCtx(ctx),
|
||||
b.Name,
|
||||
metricLabelMethod,
|
||||
strconv.Itoa(httpRes.StatusCode),
|
||||
strconv.FormatBool(isBatch),
|
||||
).Inc()
|
||||
|
||||
// Alchemy returns a 400 on bad JSONs, so handle that case
|
||||
if httpRes.StatusCode != 200 && httpRes.StatusCode != 400 {
|
||||
return nil, fmt.Errorf("response code %d", httpRes.StatusCode)
|
||||
}
|
||||
|
||||
defer httpRes.Body.Close()
|
||||
resB, err := ioutil.ReadAll(io.LimitReader(httpRes.Body, b.maxResponseSize))
|
||||
if err != nil {
|
||||
return nil, wrapErr(err, "error reading response body")
|
||||
}
|
||||
|
||||
var res []*RPCRes
|
||||
if err := json.Unmarshal(resB, &res); err != nil {
|
||||
return nil, ErrBackendBadResponse
|
||||
}
|
||||
|
||||
// Alas! Certain node providers (Infura) always return a single JSON object for some types of errors
|
||||
if len(rpcReqs) != len(res) {
|
||||
return nil, ErrBackendBadResponse
|
||||
}
|
||||
|
||||
// capture the HTTP status code in the response. this will only
|
||||
// ever be 400 given the status check on line 318 above.
|
||||
if httpRes.StatusCode != 200 {
|
||||
for _, res := range res {
|
||||
res.Error.HTTPErrorCode = httpRes.StatusCode
|
||||
}
|
||||
}
|
||||
|
||||
sortBatchRPCResponse(rpcReqs, res)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// sortBatchRPCResponse sorts the RPCRes slice according to the position of its corresponding ID in the RPCReq slice
|
||||
func sortBatchRPCResponse(req []*RPCReq, res []*RPCRes) {
|
||||
pos := make(map[string]int, len(req))
|
||||
for i, r := range req {
|
||||
key := string(r.ID)
|
||||
if _, ok := pos[key]; ok {
|
||||
panic("bug! detected requests with duplicate IDs")
|
||||
}
|
||||
pos[key] = i
|
||||
}
|
||||
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
l := res[i].ID
|
||||
r := res[j].ID
|
||||
return pos[string(l)] < pos[string(r)]
|
||||
})
|
||||
}
|
||||
|
||||
type BackendGroup struct {
|
||||
Name string
|
||||
Backends []*Backend
|
||||
}
|
||||
|
||||
func (b *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool) ([]*RPCRes, error) {
|
||||
if len(rpcReqs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rpcRequestsTotal.Inc()
|
||||
|
||||
for _, back := range b.Backends {
|
||||
res, err := back.Forward(ctx, rpcReqs, isBatch)
|
||||
if errors.Is(err, ErrMethodNotWhitelisted) {
|
||||
return nil, err
|
||||
}
|
||||
if errors.Is(err, ErrBackendOffline) {
|
||||
log.Warn(
|
||||
"skipping offline backend",
|
||||
"name", back.Name,
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"req_id", GetReqID(ctx),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, ErrBackendOverCapacity) {
|
||||
log.Warn(
|
||||
"skipping over-capacity backend",
|
||||
"name", back.Name,
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"req_id", GetReqID(ctx),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"error forwarding request to backend",
|
||||
"name", back.Name,
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"err", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
RecordUnserviceableRequest(ctx, RPCRequestSourceHTTP)
|
||||
return nil, ErrNoBackends
|
||||
}
|
||||
|
||||
func (b *BackendGroup) ProxyWS(ctx context.Context, clientConn *websocket.Conn, methodWhitelist *StringSet) (*WSProxier, error) {
|
||||
for _, back := range b.Backends {
|
||||
proxier, err := back.ProxyWS(clientConn, methodWhitelist)
|
||||
if errors.Is(err, ErrBackendOffline) {
|
||||
log.Warn(
|
||||
"skipping offline backend",
|
||||
"name", back.Name,
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, ErrBackendOverCapacity) {
|
||||
log.Warn(
|
||||
"skipping over-capacity backend",
|
||||
"name", back.Name,
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
log.Warn(
|
||||
"error dialing ws backend",
|
||||
"name", back.Name,
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"err", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
return proxier, nil
|
||||
}
|
||||
|
||||
return nil, ErrNoBackends
|
||||
}
|
||||
|
||||
func calcBackoff(i int) time.Duration {
|
||||
jitter := float64(rand.Int63n(250))
|
||||
ms := math.Min(math.Pow(2, float64(i))*1000+jitter, 3000)
|
||||
return time.Duration(ms) * time.Millisecond
|
||||
}
|
||||
|
||||
type WSProxier struct {
|
||||
backend *Backend
|
||||
clientConn *websocket.Conn
|
||||
backendConn *websocket.Conn
|
||||
methodWhitelist *StringSet
|
||||
}
|
||||
|
||||
func NewWSProxier(backend *Backend, clientConn, backendConn *websocket.Conn, methodWhitelist *StringSet) *WSProxier {
|
||||
return &WSProxier{
|
||||
backend: backend,
|
||||
clientConn: clientConn,
|
||||
backendConn: backendConn,
|
||||
methodWhitelist: methodWhitelist,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WSProxier) Proxy(ctx context.Context) error {
|
||||
errC := make(chan error, 2)
|
||||
go w.clientPump(ctx, errC)
|
||||
go w.backendPump(ctx, errC)
|
||||
err := <-errC
|
||||
w.close()
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *WSProxier) clientPump(ctx context.Context, errC chan error) {
|
||||
for {
|
||||
outConn := w.backendConn
|
||||
// Block until we get a message.
|
||||
msgType, msg, err := w.clientConn.ReadMessage()
|
||||
if err != nil {
|
||||
errC <- err
|
||||
if err := outConn.WriteMessage(websocket.CloseMessage, formatWSError(err)); err != nil {
|
||||
log.Error("error writing backendConn message", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
RecordWSMessage(ctx, w.backend.Name, SourceClient)
|
||||
|
||||
// Route control messages to the backend. These don't
|
||||
// count towards the total RPC requests count.
|
||||
if msgType != websocket.TextMessage && msgType != websocket.BinaryMessage {
|
||||
err := outConn.WriteMessage(msgType, msg)
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
rpcRequestsTotal.Inc()
|
||||
|
||||
// Don't bother sending invalid requests to the backend,
|
||||
// just handle them here.
|
||||
req, err := w.prepareClientMsg(msg)
|
||||
if err != nil {
|
||||
var id json.RawMessage
|
||||
method := MethodUnknown
|
||||
if req != nil {
|
||||
id = req.ID
|
||||
method = req.Method
|
||||
}
|
||||
log.Info(
|
||||
"error preparing client message",
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"req_id", GetReqID(ctx),
|
||||
"err", err,
|
||||
)
|
||||
outConn = w.clientConn
|
||||
msg = mustMarshalJSON(NewRPCErrorRes(id, err))
|
||||
RecordRPCError(ctx, BackendProxyd, method, err)
|
||||
} else {
|
||||
RecordRPCForward(ctx, w.backend.Name, req.Method, RPCRequestSourceWS)
|
||||
log.Info(
|
||||
"forwarded WS message to backend",
|
||||
"method", req.Method,
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"req_id", GetReqID(ctx),
|
||||
)
|
||||
}
|
||||
|
||||
err = outConn.WriteMessage(msgType, msg)
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WSProxier) backendPump(ctx context.Context, errC chan error) {
|
||||
for {
|
||||
// Block until we get a message.
|
||||
msgType, msg, err := w.backendConn.ReadMessage()
|
||||
if err != nil {
|
||||
errC <- err
|
||||
if err := w.clientConn.WriteMessage(websocket.CloseMessage, formatWSError(err)); err != nil {
|
||||
log.Error("error writing clientConn message", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
RecordWSMessage(ctx, w.backend.Name, SourceBackend)
|
||||
|
||||
// Route control messages directly to the client.
|
||||
if msgType != websocket.TextMessage && msgType != websocket.BinaryMessage {
|
||||
err := w.clientConn.WriteMessage(msgType, msg)
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
res, err := w.parseBackendMsg(msg)
|
||||
if err != nil {
|
||||
var id json.RawMessage
|
||||
if res != nil {
|
||||
id = res.ID
|
||||
}
|
||||
msg = mustMarshalJSON(NewRPCErrorRes(id, err))
|
||||
}
|
||||
if res.IsError() {
|
||||
log.Info(
|
||||
"backend responded with RPC error",
|
||||
"code", res.Error.Code,
|
||||
"msg", res.Error.Message,
|
||||
"source", "ws",
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"req_id", GetReqID(ctx),
|
||||
)
|
||||
RecordRPCError(ctx, w.backend.Name, MethodUnknown, res.Error)
|
||||
} else {
|
||||
log.Info(
|
||||
"forwarded WS message to client",
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"req_id", GetReqID(ctx),
|
||||
)
|
||||
}
|
||||
|
||||
err = w.clientConn.WriteMessage(msgType, msg)
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WSProxier) close() {
|
||||
w.clientConn.Close()
|
||||
w.backendConn.Close()
|
||||
if err := w.backend.rateLimiter.DecBackendWSConns(w.backend.Name); err != nil {
|
||||
log.Error("error decrementing backend ws conns", "name", w.backend.Name, "err", err)
|
||||
}
|
||||
activeBackendWsConnsGauge.WithLabelValues(w.backend.Name).Dec()
|
||||
}
|
||||
|
||||
func (w *WSProxier) prepareClientMsg(msg []byte) (*RPCReq, error) {
|
||||
req, err := ParseRPCReq(msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !w.methodWhitelist.Has(req.Method) {
|
||||
return req, ErrMethodNotWhitelisted
|
||||
}
|
||||
|
||||
if w.backend.IsRateLimited() {
|
||||
return req, ErrBackendOverCapacity
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (w *WSProxier) parseBackendMsg(msg []byte) (*RPCRes, error) {
|
||||
res, err := ParseRPCRes(bytes.NewReader(msg))
|
||||
if err != nil {
|
||||
log.Warn("error parsing RPC response", "source", "ws", "err", err)
|
||||
return res, ErrBackendBadResponse
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func mustMarshalJSON(in interface{}) []byte {
|
||||
out, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func formatWSError(err error) []byte {
|
||||
m := websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%v", err))
|
||||
if e, ok := err.(*websocket.CloseError); ok {
|
||||
if e.Code != websocket.CloseNoStatusReceived {
|
||||
m = websocket.FormatCloseMessage(e.Code, e.Text)
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func sleepContext(ctx context.Context, duration time.Duration) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(duration):
|
||||
}
|
||||
}
|
||||
|
||||
type LimitedHTTPClient struct {
|
||||
http.Client
|
||||
sem *semaphore.Weighted
|
||||
backendName string
|
||||
}
|
||||
|
||||
func (c *LimitedHTTPClient) DoLimited(req *http.Request) (*http.Response, error) {
|
||||
if err := c.sem.Acquire(req.Context(), 1); err != nil {
|
||||
tooManyRequestErrorsTotal.WithLabelValues(c.backendName).Inc()
|
||||
return nil, wrapErr(err, "too many requests")
|
||||
}
|
||||
defer c.sem.Release(1)
|
||||
return c.Do(req)
|
||||
}
|
||||
|
||||
func RecordBatchRPCError(ctx context.Context, backendName string, reqs []*RPCReq, err error) {
|
||||
for _, req := range reqs {
|
||||
RecordRPCError(ctx, backendName, req.Method, err)
|
||||
}
|
||||
}
|
||||
|
||||
func MaybeRecordErrorsInRPCRes(ctx context.Context, backendName string, reqs []*RPCReq, resBatch []*RPCRes) {
|
||||
log.Info("forwarded RPC request",
|
||||
"backend", backendName,
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"req_id", GetReqID(ctx),
|
||||
"batch_size", len(reqs),
|
||||
)
|
||||
|
||||
var lastError *RPCErr
|
||||
for i, res := range resBatch {
|
||||
if res.IsError() {
|
||||
lastError = res.Error
|
||||
RecordRPCError(ctx, backendName, reqs[i].Method, res.Error)
|
||||
}
|
||||
}
|
||||
|
||||
if lastError != nil {
|
||||
log.Info(
|
||||
"backend responded with RPC error",
|
||||
"backend", backendName,
|
||||
"last_error_code", lastError.Code,
|
||||
"last_error_msg", lastError.Message,
|
||||
"req_id", GetReqID(ctx),
|
||||
"source", "rpc",
|
||||
"auth", GetAuthCtx(ctx),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func RecordBatchRPCForward(ctx context.Context, backendName string, reqs []*RPCReq, source string) {
|
||||
for _, req := range reqs {
|
||||
RecordRPCForward(ctx, backendName, req.Method, source)
|
||||
}
|
||||
}
|
165
proxyd/proxyd/cache.go
Normal file
165
proxyd/proxyd/cache.go
Normal file
@ -0,0 +1,165 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/golang/snappy"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
type Cache interface {
|
||||
Get(ctx context.Context, key string) (string, error)
|
||||
Put(ctx context.Context, key string, value string) error
|
||||
}
|
||||
|
||||
const (
|
||||
// assuming an average RPCRes size of 3 KB
|
||||
memoryCacheLimit = 4096
|
||||
// Set a large ttl to avoid expirations. However, a ttl must be set for volatile-lru to take effect.
|
||||
redisTTL = 30 * 7 * 24 * time.Hour
|
||||
)
|
||||
|
||||
type cache struct {
|
||||
lru *lru.Cache
|
||||
}
|
||||
|
||||
func newMemoryCache() *cache {
|
||||
rep, _ := lru.New(memoryCacheLimit)
|
||||
return &cache{rep}
|
||||
}
|
||||
|
||||
func (c *cache) Get(ctx context.Context, key string) (string, error) {
|
||||
if val, ok := c.lru.Get(key); ok {
|
||||
return val.(string), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (c *cache) Put(ctx context.Context, key string, value string) error {
|
||||
c.lru.Add(key, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type redisCache struct {
|
||||
rdb *redis.Client
|
||||
}
|
||||
|
||||
func newRedisCache(url string) (*redisCache, error) {
|
||||
opts, err := redis.ParseURL(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rdb := redis.NewClient(opts)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
return nil, wrapErr(err, "error connecting to redis")
|
||||
}
|
||||
return &redisCache{rdb}, nil
|
||||
}
|
||||
|
||||
func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
|
||||
start := time.Now()
|
||||
val, err := c.rdb.Get(ctx, key).Result()
|
||||
redisCacheDurationSumm.WithLabelValues("GET").Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
if err == redis.Nil {
|
||||
return "", nil
|
||||
} else if err != nil {
|
||||
RecordRedisError("CacheGet")
|
||||
return "", err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *redisCache) Put(ctx context.Context, key string, value string) error {
|
||||
start := time.Now()
|
||||
err := c.rdb.SetEX(ctx, key, value, redisTTL).Err()
|
||||
redisCacheDurationSumm.WithLabelValues("SETEX").Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
if err != nil {
|
||||
RecordRedisError("CacheSet")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type cacheWithCompression struct {
|
||||
cache Cache
|
||||
}
|
||||
|
||||
func newCacheWithCompression(cache Cache) *cacheWithCompression {
|
||||
return &cacheWithCompression{cache}
|
||||
}
|
||||
|
||||
func (c *cacheWithCompression) Get(ctx context.Context, key string) (string, error) {
|
||||
encodedVal, err := c.cache.Get(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if encodedVal == "" {
|
||||
return "", nil
|
||||
}
|
||||
val, err := snappy.Decode(nil, []byte(encodedVal))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(val), nil
|
||||
}
|
||||
|
||||
func (c *cacheWithCompression) Put(ctx context.Context, key string, value string) error {
|
||||
encodedVal := snappy.Encode(nil, []byte(value))
|
||||
return c.cache.Put(ctx, key, string(encodedVal))
|
||||
}
|
||||
|
||||
type GetLatestBlockNumFn func(ctx context.Context) (uint64, error)
|
||||
type GetLatestGasPriceFn func(ctx context.Context) (uint64, error)
|
||||
|
||||
type RPCCache interface {
|
||||
GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error)
|
||||
PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error
|
||||
}
|
||||
|
||||
type rpcCache struct {
|
||||
cache Cache
|
||||
handlers map[string]RPCMethodHandler
|
||||
}
|
||||
|
||||
func newRPCCache(cache Cache, getLatestBlockNumFn GetLatestBlockNumFn, getLatestGasPriceFn GetLatestGasPriceFn, numBlockConfirmations int) RPCCache {
|
||||
handlers := map[string]RPCMethodHandler{
|
||||
"eth_chainId": &StaticMethodHandler{},
|
||||
"net_version": &StaticMethodHandler{},
|
||||
"eth_getBlockByNumber": &EthGetBlockByNumberMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
|
||||
"eth_getBlockRange": &EthGetBlockRangeMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
|
||||
"eth_blockNumber": &EthBlockNumberMethodHandler{getLatestBlockNumFn},
|
||||
"eth_gasPrice": &EthGasPriceMethodHandler{getLatestGasPriceFn},
|
||||
"eth_call": &EthCallMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
|
||||
}
|
||||
return &rpcCache{
|
||||
cache: cache,
|
||||
handlers: handlers,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
handler := c.handlers[req.Method]
|
||||
if handler == nil {
|
||||
return nil, nil
|
||||
}
|
||||
res, err := handler.GetRPCMethod(ctx, req)
|
||||
if res != nil {
|
||||
if res == nil {
|
||||
RecordCacheMiss(req.Method)
|
||||
} else {
|
||||
RecordCacheHit(req.Method)
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error {
|
||||
handler := c.handlers[req.Method]
|
||||
if handler == nil {
|
||||
return nil
|
||||
}
|
||||
return handler.PutRPCMethod(ctx, req, res)
|
||||
}
|
622
proxyd/proxyd/cache_test.go
Normal file
622
proxyd/proxyd/cache_test.go
Normal file
@ -0,0 +1,622 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const numBlockConfirmations = 10
|
||||
|
||||
func TestRPCCacheImmutableRPCs(t *testing.T) {
|
||||
const blockHead = math.MaxUint64
|
||||
ctx := context.Background()
|
||||
|
||||
getBlockNum := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
cache := newRPCCache(newMemoryCache(), getBlockNum, nil, numBlockConfirmations)
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
rpcs := []struct {
|
||||
req *RPCReq
|
||||
res *RPCRes
|
||||
name string
|
||||
}{
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_chainId",
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: "0xff",
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_chainId",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "net_version",
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: "9999",
|
||||
ID: ID,
|
||||
},
|
||||
name: "net_version",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
Params: []byte(`["0x1", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"difficulty": "0x1", "number": "0x1"}`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_getBlockByNumber",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
Params: []byte(`["earliest", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"difficulty": "0x1", "number": "0x1"}`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_getBlockByNumber earliest",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["0x1", "0x2", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_getBlockRange",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["earliest", "0x2", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_getBlockRange earliest",
|
||||
},
|
||||
}
|
||||
|
||||
for _, rpc := range rpcs {
|
||||
t.Run(rpc.name, func(t *testing.T) {
|
||||
err := cache.PutRPC(ctx, rpc.req, rpc.res)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, rpc.req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rpc.res, cachedRes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCCacheBlockNumber(t *testing.T) {
|
||||
var blockHead uint64 = 0x1000
|
||||
var gasPrice uint64 = 0x100
|
||||
ctx := context.Background()
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
getGasPrice := func(ctx context.Context) (uint64, error) {
|
||||
return gasPrice, nil
|
||||
}
|
||||
getBlockNum := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
cache := newRPCCache(newMemoryCache(), getBlockNum, getGasPrice, numBlockConfirmations)
|
||||
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_blockNumber",
|
||||
ID: ID,
|
||||
}
|
||||
res := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `0x1000`,
|
||||
ID: ID,
|
||||
}
|
||||
|
||||
err := cache.PutRPC(ctx, req, res)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, res, cachedRes)
|
||||
|
||||
blockHead = 0x1001
|
||||
cachedRes, err = cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &RPCRes{JSONRPC: "2.0", Result: `0x1001`, ID: ID}, cachedRes)
|
||||
}
|
||||
|
||||
func TestRPCCacheGasPrice(t *testing.T) {
|
||||
var blockHead uint64 = 0x1000
|
||||
var gasPrice uint64 = 0x100
|
||||
ctx := context.Background()
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
getGasPrice := func(ctx context.Context) (uint64, error) {
|
||||
return gasPrice, nil
|
||||
}
|
||||
getBlockNum := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
cache := newRPCCache(newMemoryCache(), getBlockNum, getGasPrice, numBlockConfirmations)
|
||||
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_gasPrice",
|
||||
ID: ID,
|
||||
}
|
||||
res := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `0x100`,
|
||||
ID: ID,
|
||||
}
|
||||
|
||||
err := cache.PutRPC(ctx, req, res)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, res, cachedRes)
|
||||
|
||||
gasPrice = 0x101
|
||||
cachedRes, err = cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &RPCRes{JSONRPC: "2.0", Result: `0x101`, ID: ID}, cachedRes)
|
||||
}
|
||||
|
||||
func TestRPCCacheUnsupportedMethod(t *testing.T) {
|
||||
const blockHead = math.MaxUint64
|
||||
ctx := context.Background()
|
||||
|
||||
fn := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
cache := newRPCCache(newMemoryCache(), fn, nil, numBlockConfirmations)
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_syncing",
|
||||
ID: ID,
|
||||
}
|
||||
res := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: false,
|
||||
ID: ID,
|
||||
}
|
||||
|
||||
err := cache.PutRPC(ctx, req, res)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
}
|
||||
|
||||
func TestRPCCacheEthGetBlockByNumber(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
var blockHead uint64
|
||||
fn := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
makeCache := func() RPCCache { return newRPCCache(newMemoryCache(), fn, nil, numBlockConfirmations) }
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
Params: []byte(`["0xa", false]`),
|
||||
ID: ID,
|
||||
}
|
||||
res := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"difficulty": "0x1", "number": "0x1"}`,
|
||||
ID: ID,
|
||||
}
|
||||
req2 := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
Params: []byte(`["0xb", false]`),
|
||||
ID: ID,
|
||||
}
|
||||
res2 := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"difficulty": "0x2", "number": "0x2"}`,
|
||||
ID: ID,
|
||||
}
|
||||
|
||||
t.Run("set multiple finalized blocks", func(t *testing.T) {
|
||||
blockHead = 100
|
||||
cache := makeCache()
|
||||
require.NoError(t, cache.PutRPC(ctx, req, res))
|
||||
require.NoError(t, cache.PutRPC(ctx, req2, res2))
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, res, cachedRes)
|
||||
cachedRes, err = cache.GetRPC(ctx, req2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, res2, cachedRes)
|
||||
})
|
||||
|
||||
t.Run("unconfirmed block", func(t *testing.T) {
|
||||
blockHead = 0xc
|
||||
cache := makeCache()
|
||||
require.NoError(t, cache.PutRPC(ctx, req, res))
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRPCCacheEthGetBlockByNumberForRecentBlocks(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
var blockHead uint64 = 2
|
||||
fn := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
cache := newRPCCache(newMemoryCache(), fn, nil, numBlockConfirmations)
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
rpcs := []struct {
|
||||
req *RPCReq
|
||||
res *RPCRes
|
||||
name string
|
||||
}{
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
Params: []byte(`["latest", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"difficulty": "0x1", "number": "0x1"}`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "latest block",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
Params: []byte(`["pending", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"difficulty": "0x1", "number": "0x1"}`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "pending block",
|
||||
},
|
||||
}
|
||||
|
||||
for _, rpc := range rpcs {
|
||||
t.Run(rpc.name, func(t *testing.T) {
|
||||
err := cache.PutRPC(ctx, rpc.req, rpc.res)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, rpc.req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCCacheEthGetBlockByNumberInvalidRequest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
const blockHead = math.MaxUint64
|
||||
fn := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
cache := newRPCCache(newMemoryCache(), fn, nil, numBlockConfirmations)
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
Params: []byte(`["0x1"]`), // missing required boolean param
|
||||
ID: ID,
|
||||
}
|
||||
res := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"difficulty": "0x1", "number": "0x1"}`,
|
||||
ID: ID,
|
||||
}
|
||||
|
||||
err := cache.PutRPC(ctx, req, res)
|
||||
require.Error(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
}
|
||||
|
||||
func TestRPCCacheEthGetBlockRange(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
var blockHead uint64
|
||||
fn := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
makeCache := func() RPCCache { return newRPCCache(newMemoryCache(), fn, nil, numBlockConfirmations) }
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
t.Run("finalized block", func(t *testing.T) {
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["0x1", "0x10", false]`),
|
||||
ID: ID,
|
||||
}
|
||||
res := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x10"}]`,
|
||||
ID: ID,
|
||||
}
|
||||
blockHead = 0x1000
|
||||
cache := makeCache()
|
||||
require.NoError(t, cache.PutRPC(ctx, req, res))
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, res, cachedRes)
|
||||
})
|
||||
|
||||
t.Run("unconfirmed block", func(t *testing.T) {
|
||||
cache := makeCache()
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["0x1", "0x1000", false]`),
|
||||
ID: ID,
|
||||
}
|
||||
res := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
|
||||
ID: ID,
|
||||
}
|
||||
require.NoError(t, cache.PutRPC(ctx, req, res))
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRPCCacheEthGetBlockRangeForRecentBlocks(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
var blockHead uint64 = 0x1000
|
||||
fn := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
cache := newRPCCache(newMemoryCache(), fn, nil, numBlockConfirmations)
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
rpcs := []struct {
|
||||
req *RPCReq
|
||||
res *RPCRes
|
||||
name string
|
||||
}{
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["0x1", "latest", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "latest block",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["0x1", "pending", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "pending block",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["latest", "0x1000", false]`),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "latest block 2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, rpc := range rpcs {
|
||||
t.Run(rpc.name, func(t *testing.T) {
|
||||
err := cache.PutRPC(ctx, rpc.req, rpc.res)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, rpc.req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCCacheEthGetBlockRangeInvalidRequest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
const blockHead = math.MaxUint64
|
||||
fn := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
cache := newRPCCache(newMemoryCache(), fn, nil, numBlockConfirmations)
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
rpcs := []struct {
|
||||
req *RPCReq
|
||||
res *RPCRes
|
||||
name string
|
||||
}{
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["0x1", "0x2"]`), // missing required boolean param
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "missing boolean param",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
Params: []byte(`["abc", "0x2", true]`), // invalid block hex
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `[{"number": "0x1"}, {"number": "0x2"}]`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "invalid block hex",
|
||||
},
|
||||
}
|
||||
|
||||
for _, rpc := range rpcs {
|
||||
t.Run(rpc.name, func(t *testing.T) {
|
||||
err := cache.PutRPC(ctx, rpc.req, rpc.res)
|
||||
require.Error(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, rpc.req)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCCacheEthCall(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
var blockHead uint64
|
||||
fn := func(ctx context.Context) (uint64, error) {
|
||||
return blockHead, nil
|
||||
}
|
||||
|
||||
makeCache := func() RPCCache { return newRPCCache(newMemoryCache(), fn, nil, numBlockConfirmations) }
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_call",
|
||||
Params: []byte(`[{"to": "0xDEADBEEF", "data": "0x1"}, "0x10"]`),
|
||||
ID: ID,
|
||||
}
|
||||
res := &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `0x0`,
|
||||
ID: ID,
|
||||
}
|
||||
|
||||
t.Run("finalized block", func(t *testing.T) {
|
||||
blockHead = 0x100
|
||||
cache := makeCache()
|
||||
err := cache.PutRPC(ctx, req, res)
|
||||
require.NoError(t, err)
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, res, cachedRes)
|
||||
})
|
||||
|
||||
t.Run("unconfirmed block", func(t *testing.T) {
|
||||
blockHead = 0x10
|
||||
cache := makeCache()
|
||||
require.NoError(t, cache.PutRPC(ctx, req, res))
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
|
||||
t.Run("latest block", func(t *testing.T) {
|
||||
blockHead = 0x100
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_call",
|
||||
Params: []byte(`[{"to": "0xDEADBEEF", "data": "0x1"}, "latest"]`),
|
||||
ID: ID,
|
||||
}
|
||||
cache := makeCache()
|
||||
require.NoError(t, cache.PutRPC(ctx, req, res))
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
|
||||
t.Run("pending block", func(t *testing.T) {
|
||||
blockHead = 0x100
|
||||
req := &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_call",
|
||||
Params: []byte(`[{"to": "0xDEADBEEF", "data": "0x1"}, "pending"]`),
|
||||
ID: ID,
|
||||
}
|
||||
cache := makeCache()
|
||||
require.NoError(t, cache.PutRPC(ctx, req, res))
|
||||
cachedRes, err := cache.GetRPC(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
}
|
50
proxyd/proxyd/cmd/proxyd/main.go
Normal file
50
proxyd/proxyd/cmd/proxyd/main.go
Normal file
@ -0,0 +1,50 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
var (
|
||||
GitVersion = ""
|
||||
GitCommit = ""
|
||||
GitDate = ""
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set up logger with a default INFO level in case we fail to parse flags.
|
||||
// Otherwise the final critical log won't show what the parsing error was.
|
||||
log.Root().SetHandler(
|
||||
log.LvlFilterHandler(
|
||||
log.LvlInfo,
|
||||
log.StreamHandler(os.Stdout, log.JSONFormat()),
|
||||
),
|
||||
)
|
||||
|
||||
log.Info("starting proxyd", "version", GitVersion, "commit", GitCommit, "date", GitDate)
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
log.Crit("must specify a config file on the command line")
|
||||
}
|
||||
|
||||
config := new(proxyd.Config)
|
||||
if _, err := toml.DecodeFile(os.Args[1], config); err != nil {
|
||||
log.Crit("error reading config file", "err", err)
|
||||
}
|
||||
|
||||
shutdown, err := proxyd.Start(config)
|
||||
if err != nil {
|
||||
log.Crit("error starting proxyd", "err", err)
|
||||
}
|
||||
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
|
||||
recvSig := <-sig
|
||||
log.Info("caught signal, shutting down", "signal", recvSig)
|
||||
shutdown()
|
||||
}
|
97
proxyd/proxyd/config.go
Normal file
97
proxyd/proxyd/config.go
Normal file
@ -0,0 +1,97 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ServerConfig struct {
|
||||
RPCHost string `toml:"rpc_host"`
|
||||
RPCPort int `toml:"rpc_port"`
|
||||
WSHost string `toml:"ws_host"`
|
||||
WSPort int `toml:"ws_port"`
|
||||
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
|
||||
MaxConcurrentRPCs int64 `toml:"max_concurrent_rpcs"`
|
||||
|
||||
// TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections
|
||||
TimeoutSeconds int `toml:"timeout_seconds"`
|
||||
|
||||
MaxUpstreamBatchSize int `toml:"max_upstream_batch_size"`
|
||||
}
|
||||
|
||||
type CacheConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
BlockSyncRPCURL string `toml:"block_sync_rpc_url"`
|
||||
NumBlockConfirmations int `toml:"num_block_confirmations"`
|
||||
}
|
||||
|
||||
type RedisConfig struct {
|
||||
URL string `toml:"url"`
|
||||
}
|
||||
|
||||
type MetricsConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
Host string `toml:"host"`
|
||||
Port int `toml:"port"`
|
||||
}
|
||||
|
||||
type BackendOptions struct {
|
||||
ResponseTimeoutSeconds int `toml:"response_timeout_seconds"`
|
||||
MaxResponseSizeBytes int64 `toml:"max_response_size_bytes"`
|
||||
MaxRetries int `toml:"max_retries"`
|
||||
OutOfServiceSeconds int `toml:"out_of_service_seconds"`
|
||||
}
|
||||
|
||||
type BackendConfig struct {
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
RPCURL string `toml:"rpc_url"`
|
||||
WSURL string `toml:"ws_url"`
|
||||
MaxRPS int `toml:"max_rps"`
|
||||
MaxWSConns int `toml:"max_ws_conns"`
|
||||
CAFile string `toml:"ca_file"`
|
||||
ClientCertFile string `toml:"client_cert_file"`
|
||||
ClientKeyFile string `toml:"client_key_file"`
|
||||
StripTrailingXFF bool `toml:"strip_trailing_xff"`
|
||||
}
|
||||
|
||||
type BackendsConfig map[string]*BackendConfig
|
||||
|
||||
type BackendGroupConfig struct {
|
||||
Backends []string `toml:"backends"`
|
||||
}
|
||||
|
||||
type BackendGroupsConfig map[string]*BackendGroupConfig
|
||||
|
||||
type MethodMappingsConfig map[string]string
|
||||
|
||||
type Config struct {
|
||||
WSBackendGroup string `toml:"ws_backend_group"`
|
||||
Server ServerConfig `toml:"server"`
|
||||
Cache CacheConfig `toml:"cache"`
|
||||
Redis RedisConfig `toml:"redis"`
|
||||
Metrics MetricsConfig `toml:"metrics"`
|
||||
BackendOptions BackendOptions `toml:"backend"`
|
||||
Backends BackendsConfig `toml:"backends"`
|
||||
Authentication map[string]string `toml:"authentication"`
|
||||
BackendGroups BackendGroupsConfig `toml:"backend_groups"`
|
||||
RPCMethodMappings map[string]string `toml:"rpc_method_mappings"`
|
||||
WSMethodWhitelist []string `toml:"ws_method_whitelist"`
|
||||
}
|
||||
|
||||
func ReadFromEnvOrConfig(value string) (string, error) {
|
||||
if strings.HasPrefix(value, "$") {
|
||||
envValue := os.Getenv(strings.TrimPrefix(value, "$"))
|
||||
if envValue == "" {
|
||||
return "", fmt.Errorf("config env var %s not found", value)
|
||||
}
|
||||
return envValue, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(value, "\\") {
|
||||
return strings.TrimPrefix(value, "\\"), nil
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
6
proxyd/proxyd/entrypoint.sh
Normal file
6
proxyd/proxyd/entrypoint.sh
Normal file
@ -0,0 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "Updating CA certificates."
|
||||
update-ca-certificates
|
||||
echo "Running CMD."
|
||||
exec "$@"
|
7
proxyd/proxyd/errors.go
Normal file
7
proxyd/proxyd/errors.go
Normal file
@ -0,0 +1,7 @@
|
||||
package proxyd
|
||||
|
||||
import "fmt"
|
||||
|
||||
func wrapErr(err error, msg string) error {
|
||||
return fmt.Errorf("%s %v", msg, err)
|
||||
}
|
96
proxyd/proxyd/example.config.toml
Normal file
96
proxyd/proxyd/example.config.toml
Normal file
@ -0,0 +1,96 @@
|
||||
# List of WS methods to whitelist.
|
||||
ws_method_whitelist = [
|
||||
"eth_subscribe",
|
||||
"eth_call",
|
||||
"eth_chainId"
|
||||
]
|
||||
# Enable WS on this backend group. There can only be one WS-enabled backend group.
|
||||
ws_backend_group = "main"
|
||||
|
||||
[server]
|
||||
# Host for the proxyd RPC server to listen on.
|
||||
rpc_host = "0.0.0.0"
|
||||
# Port for the above.
|
||||
rpc_port = 8080
|
||||
# Host for the proxyd WS server to listen on.
|
||||
ws_host = "0.0.0.0"
|
||||
# Port for the above
|
||||
ws_port = 8085
|
||||
# Maximum client body size, in bytes, that the server will accept.
|
||||
max_body_size_bytes = 10485760
|
||||
max_concurrent_rpcs = 1000
|
||||
|
||||
[redis]
|
||||
# URL to a Redis instance.
|
||||
url = "redis://localhost:6379"
|
||||
|
||||
[metrics]
|
||||
# Whether or not to enable Prometheus metrics.
|
||||
enabled = true
|
||||
# Host for the Prometheus metrics endpoint to listen on.
|
||||
host = "0.0.0.0"
|
||||
# Port for the above.
|
||||
port = 9761
|
||||
|
||||
[backend]
|
||||
# How long proxyd should wait for a backend response before timing out.
|
||||
response_timeout_seconds = 5
|
||||
# Maximum response size, in bytes, that proxyd will accept from a backend.
|
||||
max_response_size_bytes = 5242880
|
||||
# Maximum number of times proxyd will try a backend before giving up.
|
||||
max_retries = 3
|
||||
# Number of seconds to wait before trying an unhealthy backend again.
|
||||
out_of_service_seconds = 600
|
||||
|
||||
[backends]
|
||||
# A map of backends by name.
|
||||
[backends.infura]
|
||||
# The URL to contact the backend at. Will be read from the environment
|
||||
# if an environment variable prefixed with $ is provided.
|
||||
rpc_url = ""
|
||||
# The WS URL to contact the backend at. Will be read from the environment
|
||||
# if an environment variable prefixed with $ is provided.
|
||||
ws_url = ""
|
||||
username = ""
|
||||
# An HTTP Basic password to authenticate with the backend. Will be read from
|
||||
# the environment if an environment variable prefixed with $ is provided.
|
||||
password = ""
|
||||
max_rps = 3
|
||||
max_ws_conns = 1
|
||||
# Path to a custom root CA.
|
||||
ca_file = ""
|
||||
# Path to a custom client cert file.
|
||||
client_cert_file = ""
|
||||
# Path to a custom client key file.
|
||||
client_key_file = ""
|
||||
|
||||
[backends.alchemy]
|
||||
rpc_url = ""
|
||||
ws_url = ""
|
||||
username = ""
|
||||
password = ""
|
||||
max_rps = 3
|
||||
max_ws_conns = 1
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["infura"]
|
||||
|
||||
[backend_groups.alchemy]
|
||||
backends = ["alchemy"]
|
||||
|
||||
# If the authentication group below is in the config,
|
||||
# proxyd will only accept authenticated requests.
|
||||
[authentication]
|
||||
# Mapping of auth key to alias. The alias is used to provide a human-
|
||||
# readable name for the auth key in monitoring. The auth key will be
|
||||
# read from the environment if an environment variable prefixed with $
|
||||
# is provided. Note that you will need to quote the environment variable
|
||||
# in order for it to be value TOML, e.g. "$FOO_AUTH_KEY" = "foo_alias".
|
||||
secret = "test"
|
||||
|
||||
# Mapping of methods to backend groups.
|
||||
[rpc_method_mappings]
|
||||
eth_call = "main"
|
||||
eth_chainId = "main"
|
||||
eth_blockNumber = "alchemy"
|
22
proxyd/proxyd/go.mod
Normal file
22
proxyd/proxyd/go.mod
Normal file
@ -0,0 +1,22 @@
|
||||
module github.com/ethereum-optimism/optimism/proxyd
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.4.1
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible
|
||||
github.com/ethereum/go-ethereum v1.10.16
|
||||
github.com/go-redis/redis/v8 v8.11.4
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/gomodule/redigo v1.8.8 // indirect
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/rs/cors v1.8.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
)
|
702
proxyd/proxyd/go.sum
Normal file
702
proxyd/proxyd/go.sum
Normal file
@ -0,0 +1,702 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8=
|
||||
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM=
|
||||
github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
|
||||
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
|
||||
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
|
||||
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/ethereum/go-ethereum v1.10.16 h1:3oPrumn0bCW/idjcxMn5YYVCdK7VzJYIvwGZUGLEaoc=
|
||||
github.com/ethereum/go-ethereum v1.10.16/go.mod h1:Anj6cxczl+AHy63o4X9O8yWNHuN5wMpfb8MAnHkWn7Y=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
|
||||
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
|
||||
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
|
||||
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||
github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E=
|
||||
github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I=
|
||||
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
|
||||
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
|
||||
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
|
||||
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
|
||||
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
|
||||
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
|
||||
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
|
||||
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
|
||||
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
|
||||
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
|
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
|
||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
|
||||
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so=
|
||||
github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4=
|
||||
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
|
||||
github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA=
|
||||
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw=
|
||||
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
||||
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
42
proxyd/proxyd/integration_tests/batch_timeout_test.go
Normal file
42
proxyd/proxyd/integration_tests/batch_timeout_test.go
Normal file
@ -0,0 +1,42 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
batchTimeoutResponse = `{"error":{"code":-32015,"message":"gateway timeout"},"id":null,"jsonrpc":"2.0"}`
|
||||
)
|
||||
|
||||
func TestBatchTimeout(t *testing.T) {
|
||||
slowBackend := NewMockBackend(nil)
|
||||
defer slowBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("SLOW_BACKEND_RPC_URL", slowBackend.URL()))
|
||||
|
||||
config := ReadConfig("batch_timeout")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
slowBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the config. The sleep duration should be at least double the server.timeout_seconds config to prevent flakes
|
||||
time.Sleep(time.Second * 2)
|
||||
BatchedResponseHandler(200, goodResponse)(w, r)
|
||||
}))
|
||||
res, statusCode, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 504, statusCode)
|
||||
RequireEqualJSON(t, []byte(batchTimeoutResponse), res)
|
||||
require.Equal(t, 1, len(slowBackend.Requests()))
|
||||
}
|
141
proxyd/proxyd/integration_tests/batching_test.go
Normal file
141
proxyd/proxyd/integration_tests/batching_test.go
Normal file
@ -0,0 +1,141 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBatching(t *testing.T) {
|
||||
config := ReadConfig("batching")
|
||||
|
||||
chainIDResponse1 := `{"jsonrpc": "2.0", "result": "hello1", "id": 1}`
|
||||
chainIDResponse2 := `{"jsonrpc": "2.0", "result": "hello2", "id": 2}`
|
||||
chainIDResponse3 := `{"jsonrpc": "2.0", "result": "hello3", "id": 3}`
|
||||
netVersionResponse1 := `{"jsonrpc": "2.0", "result": "1.0", "id": 1}`
|
||||
callResponse1 := `{"jsonrpc": "2.0", "result": "ekans1", "id": 1}`
|
||||
|
||||
type mockResult struct {
|
||||
method string
|
||||
id string
|
||||
result interface{}
|
||||
}
|
||||
|
||||
chainIDMock1 := mockResult{"eth_chainId", "1", "hello1"}
|
||||
chainIDMock2 := mockResult{"eth_chainId", "2", "hello2"}
|
||||
chainIDMock3 := mockResult{"eth_chainId", "3", "hello3"}
|
||||
netVersionMock1 := mockResult{"net_version", "1", "1.0"}
|
||||
callMock1 := mockResult{"eth_call", "1", "ekans1"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
handler http.Handler
|
||||
mocks []mockResult
|
||||
reqs []*proxyd.RPCReq
|
||||
expectedRes string
|
||||
maxBatchSize int
|
||||
numExpectedForwards int
|
||||
}{
|
||||
{
|
||||
name: "backend returns batches out of order",
|
||||
mocks: []mockResult{chainIDMock1, chainIDMock2, chainIDMock3},
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
NewRPCReq("3", "eth_chainId", nil),
|
||||
},
|
||||
expectedRes: asArray(chainIDResponse1, chainIDResponse2, chainIDResponse3),
|
||||
maxBatchSize: 2,
|
||||
numExpectedForwards: 2,
|
||||
},
|
||||
{
|
||||
// infura behavior
|
||||
name: "backend returns single RPC response object as error",
|
||||
handler: SingleResponseHandler(500, `{"jsonrpc":"2.0","error":{"code":-32001,"message":"internal server error"},"id":1}`),
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
},
|
||||
expectedRes: asArray(
|
||||
`{"error":{"code":-32011,"message":"no backends available for method"},"id":1,"jsonrpc":"2.0"}`,
|
||||
`{"error":{"code":-32011,"message":"no backends available for method"},"id":2,"jsonrpc":"2.0"}`,
|
||||
),
|
||||
maxBatchSize: 10,
|
||||
numExpectedForwards: 1,
|
||||
},
|
||||
{
|
||||
name: "backend returns single RPC response object for minibatches",
|
||||
handler: SingleResponseHandler(500, `{"jsonrpc":"2.0","error":{"code":-32001,"message":"internal server error"},"id":1}`),
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
},
|
||||
expectedRes: asArray(
|
||||
`{"error":{"code":-32011,"message":"no backends available for method"},"id":1,"jsonrpc":"2.0"}`,
|
||||
`{"error":{"code":-32011,"message":"no backends available for method"},"id":2,"jsonrpc":"2.0"}`,
|
||||
),
|
||||
maxBatchSize: 1,
|
||||
numExpectedForwards: 2,
|
||||
},
|
||||
{
|
||||
name: "duplicate request ids are on distinct batches",
|
||||
mocks: []mockResult{
|
||||
netVersionMock1,
|
||||
chainIDMock2,
|
||||
chainIDMock1,
|
||||
callMock1,
|
||||
},
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "net_version", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_call", nil),
|
||||
},
|
||||
expectedRes: asArray(netVersionResponse1, chainIDResponse2, chainIDResponse1, callResponse1),
|
||||
maxBatchSize: 2,
|
||||
numExpectedForwards: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config.Server.MaxUpstreamBatchSize = tt.maxBatchSize
|
||||
|
||||
handler := tt.handler
|
||||
if handler == nil {
|
||||
router := NewBatchRPCResponseRouter()
|
||||
for _, mock := range tt.mocks {
|
||||
router.SetRoute(mock.method, mock.id, mock.result)
|
||||
}
|
||||
handler = router
|
||||
}
|
||||
|
||||
goodBackend := NewMockBackend(handler)
|
||||
defer goodBackend.Close()
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
res, statusCode, err := client.SendBatchRPC(tt.reqs...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, statusCode)
|
||||
RequireEqualJSON(t, []byte(tt.expectedRes), res)
|
||||
|
||||
if tt.numExpectedForwards != 0 {
|
||||
require.Equal(t, tt.numExpectedForwards, len(goodBackend.Requests()))
|
||||
}
|
||||
|
||||
if handler, ok := handler.(*BatchRPCResponseRouter); ok {
|
||||
for i, mock := range tt.mocks {
|
||||
require.Equal(t, 1, handler.GetNumCalls(mock.method, mock.id), i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
215
proxyd/proxyd/integration_tests/caching_test.go
Normal file
215
proxyd/proxyd/integration_tests/caching_test.go
Normal file
@ -0,0 +1,215 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis"
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCaching(t *testing.T) {
|
||||
redis, err := miniredis.Run()
|
||||
require.NoError(t, err)
|
||||
defer redis.Close()
|
||||
|
||||
hdlr := NewBatchRPCResponseRouter()
|
||||
hdlr.SetRoute("eth_chainId", "999", "0x420")
|
||||
hdlr.SetRoute("net_version", "999", "0x1234")
|
||||
hdlr.SetRoute("eth_blockNumber", "999", "0x64")
|
||||
hdlr.SetRoute("eth_getBlockByNumber", "999", "dummy_block")
|
||||
hdlr.SetRoute("eth_call", "999", "dummy_call")
|
||||
|
||||
// mock LVC requests
|
||||
hdlr.SetFallbackRoute("eth_blockNumber", "0x64")
|
||||
hdlr.SetFallbackRoute("eth_gasPrice", "0x420")
|
||||
|
||||
backend := NewMockBackend(hdlr)
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())))
|
||||
config := ReadConfig("caching")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
// allow time for the block number fetcher to fire
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
|
||||
tests := []struct {
|
||||
method string
|
||||
params []interface{}
|
||||
response string
|
||||
backendCalls int
|
||||
}{
|
||||
{
|
||||
"eth_chainId",
|
||||
nil,
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"net_version",
|
||||
nil,
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"eth_getBlockByNumber",
|
||||
[]interface{}{
|
||||
"0x1",
|
||||
true,
|
||||
},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"dummy_block\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"eth_call",
|
||||
[]interface{}{
|
||||
struct {
|
||||
To string `json:"to"`
|
||||
}{
|
||||
"0x1234",
|
||||
},
|
||||
"0x60",
|
||||
},
|
||||
"{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"dummy_call\"}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"eth_blockNumber",
|
||||
nil,
|
||||
"{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"0x64\"}",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"eth_call",
|
||||
[]interface{}{
|
||||
struct {
|
||||
To string `json:"to"`
|
||||
}{
|
||||
"0x1234",
|
||||
},
|
||||
"latest",
|
||||
},
|
||||
"{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"dummy_call\"}",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"eth_call",
|
||||
[]interface{}{
|
||||
struct {
|
||||
To string `json:"to"`
|
||||
}{
|
||||
"0x1234",
|
||||
},
|
||||
"pending",
|
||||
},
|
||||
"{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"dummy_call\"}",
|
||||
2,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.method, func(t *testing.T) {
|
||||
resRaw, _, err := client.SendRPC(tt.method, tt.params)
|
||||
require.NoError(t, err)
|
||||
resCache, _, err := client.SendRPC(tt.method, tt.params)
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(tt.response), resCache)
|
||||
RequireEqualJSON(t, resRaw, resCache)
|
||||
require.Equal(t, tt.backendCalls, countRequests(backend, tt.method))
|
||||
backend.Reset()
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("block numbers update", func(t *testing.T) {
|
||||
hdlr.SetFallbackRoute("eth_blockNumber", "0x100")
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
resRaw, _, err := client.SendRPC("eth_blockNumber", nil)
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"0x100\"}"), resRaw)
|
||||
backend.Reset()
|
||||
})
|
||||
|
||||
t.Run("nil responses should not be cached", func(t *testing.T) {
|
||||
hdlr.SetRoute("eth_getBlockByNumber", "999", nil)
|
||||
resRaw, _, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x123"})
|
||||
require.NoError(t, err)
|
||||
resCache, _, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x123"})
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":null}"), resRaw)
|
||||
RequireEqualJSON(t, resRaw, resCache)
|
||||
require.Equal(t, 2, countRequests(backend, "eth_getBlockByNumber"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBatchCaching(t *testing.T) {
|
||||
redis, err := miniredis.Run()
|
||||
require.NoError(t, err)
|
||||
defer redis.Close()
|
||||
|
||||
hdlr := NewBatchRPCResponseRouter()
|
||||
hdlr.SetRoute("eth_chainId", "1", "0x420")
|
||||
hdlr.SetRoute("net_version", "1", "0x1234")
|
||||
hdlr.SetRoute("eth_call", "1", "dummy_call")
|
||||
|
||||
// mock LVC requests
|
||||
hdlr.SetFallbackRoute("eth_blockNumber", "0x64")
|
||||
hdlr.SetFallbackRoute("eth_gasPrice", "0x420")
|
||||
|
||||
backend := NewMockBackend(hdlr)
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())))
|
||||
|
||||
config := ReadConfig("caching")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
// allow time for the block number fetcher to fire
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
|
||||
goodChainIdResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 1}"
|
||||
goodNetVersionResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 1}"
|
||||
goodEthCallResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"dummy_call\", \"id\": 1}"
|
||||
|
||||
res, _, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "net_version", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(asArray(goodChainIdResponse, goodNetVersionResponse)), res)
|
||||
require.Equal(t, 1, countRequests(backend, "eth_chainId"))
|
||||
require.Equal(t, 1, countRequests(backend, "net_version"))
|
||||
|
||||
backend.Reset()
|
||||
res, _, err = client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_call", []interface{}{`{"to":"0x1234"}`, "pending"}),
|
||||
NewRPCReq("1", "net_version", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(asArray(goodChainIdResponse, goodEthCallResponse, goodNetVersionResponse)), res)
|
||||
require.Equal(t, 0, countRequests(backend, "eth_chainId"))
|
||||
require.Equal(t, 0, countRequests(backend, "net_version"))
|
||||
require.Equal(t, 1, countRequests(backend, "eth_call"))
|
||||
}
|
||||
|
||||
func countRequests(backend *MockBackend, name string) int {
|
||||
var count int
|
||||
for _, req := range backend.Requests() {
|
||||
if bytes.Contains(req.Body, []byte(name)) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
242
proxyd/proxyd/integration_tests/failover_test.go
Normal file
242
proxyd/proxyd/integration_tests/failover_test.go
Normal file
@ -0,0 +1,242 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
goodResponse = `{"jsonrpc": "2.0", "result": "hello", "id": 999}`
|
||||
noBackendsResponse = `{"error":{"code":-32011,"message":"no backends available for method"},"id":999,"jsonrpc":"2.0"}`
|
||||
)
|
||||
|
||||
func TestFailover(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
badBackend := NewMockBackend(nil)
|
||||
defer badBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
|
||||
|
||||
config := ReadConfig("failover")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
handler http.Handler
|
||||
}{
|
||||
{
|
||||
"backend responds 200 with non-JSON response",
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
_, _ = w.Write([]byte("this data is not JSON!"))
|
||||
}),
|
||||
},
|
||||
{
|
||||
"backend responds with no body",
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}),
|
||||
},
|
||||
}
|
||||
codes := []int{
|
||||
300,
|
||||
301,
|
||||
302,
|
||||
401,
|
||||
403,
|
||||
429,
|
||||
500,
|
||||
503,
|
||||
}
|
||||
for _, code := range codes {
|
||||
tests = append(tests, struct {
|
||||
name string
|
||||
handler http.Handler
|
||||
}{
|
||||
fmt.Sprintf("backend %d", code),
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(code)
|
||||
}),
|
||||
})
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
badBackend.SetHandler(tt.handler)
|
||||
res, statusCode, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 1, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
badBackend.Reset()
|
||||
goodBackend.Reset()
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("backend times out and falls back to another", func(t *testing.T) {
|
||||
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(2 * time.Second)
|
||||
_, _ = w.Write([]byte("[{}]"))
|
||||
}))
|
||||
res, statusCode, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 1, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
goodBackend.Reset()
|
||||
badBackend.Reset()
|
||||
})
|
||||
|
||||
t.Run("works with a batch request", func(t *testing.T) {
|
||||
goodBackend.SetHandler(BatchedResponseHandler(200, goodResponse, goodResponse))
|
||||
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(500)
|
||||
}))
|
||||
res, statusCode, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse)), res)
|
||||
require.Equal(t, 1, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
goodBackend.Reset()
|
||||
badBackend.Reset()
|
||||
})
|
||||
}
|
||||
|
||||
func TestRetries(t *testing.T) {
|
||||
backend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
config := ReadConfig("retries")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
attempts := int32(0)
|
||||
backend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
incremented := atomic.AddInt32(&attempts, 1)
|
||||
if incremented != 2 {
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
BatchedResponseHandler(200, goodResponse)(w, r)
|
||||
}))
|
||||
|
||||
// test case where request eventually succeeds
|
||||
res, statusCode, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 2, len(backend.Requests()))
|
||||
|
||||
// test case where it does not
|
||||
backend.Reset()
|
||||
attempts = -10
|
||||
res, statusCode, err = client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 503, statusCode)
|
||||
RequireEqualJSON(t, []byte(noBackendsResponse), res)
|
||||
require.Equal(t, 4, len(backend.Requests()))
|
||||
}
|
||||
|
||||
func TestOutOfServiceInterval(t *testing.T) {
|
||||
okHandler := BatchedResponseHandler(200, goodResponse)
|
||||
goodBackend := NewMockBackend(okHandler)
|
||||
defer goodBackend.Close()
|
||||
badBackend := NewMockBackend(nil)
|
||||
defer badBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
|
||||
|
||||
config := ReadConfig("out_of_service_interval")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(503)
|
||||
}))
|
||||
|
||||
res, statusCode, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 2, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
|
||||
res, statusCode, err = client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 2, len(badBackend.Requests()))
|
||||
require.Equal(t, 2, len(goodBackend.Requests()))
|
||||
|
||||
_, statusCode, err = client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
require.Equal(t, 2, len(badBackend.Requests()))
|
||||
require.Equal(t, 4, len(goodBackend.Requests()))
|
||||
|
||||
time.Sleep(time.Second)
|
||||
badBackend.SetHandler(okHandler)
|
||||
|
||||
res, statusCode, err = client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 3, len(badBackend.Requests()))
|
||||
require.Equal(t, 4, len(goodBackend.Requests()))
|
||||
}
|
||||
|
||||
func TestBatchWithPartialFailover(t *testing.T) {
|
||||
config := ReadConfig("failover")
|
||||
config.Server.MaxUpstreamBatchSize = 2
|
||||
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
badBackend := NewMockBackend(SingleResponseHandler(200, "this data is not JSON!"))
|
||||
defer badBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
|
||||
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
res, statusCode, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
NewRPCReq("3", "eth_chainId", nil),
|
||||
NewRPCReq("4", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse, goodResponse, goodResponse)), res)
|
||||
require.Equal(t, 2, len(badBackend.Requests()))
|
||||
require.Equal(t, 2, len(goodBackend.Requests()))
|
||||
}
|
79
proxyd/proxyd/integration_tests/max_rpc_conns_test.go
Normal file
79
proxyd/proxyd/integration_tests/max_rpc_conns_test.go
Normal file
@ -0,0 +1,79 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMaxConcurrentRPCs(t *testing.T) {
|
||||
var (
|
||||
mu sync.Mutex
|
||||
concurrentRPCs int
|
||||
maxConcurrentRPCs int
|
||||
)
|
||||
handler := func(w http.ResponseWriter, r *http.Request) {
|
||||
mu.Lock()
|
||||
concurrentRPCs++
|
||||
if maxConcurrentRPCs < concurrentRPCs {
|
||||
maxConcurrentRPCs = concurrentRPCs
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
time.Sleep(time.Second * 2)
|
||||
BatchedResponseHandler(200, goodResponse)(w, r)
|
||||
|
||||
mu.Lock()
|
||||
concurrentRPCs--
|
||||
mu.Unlock()
|
||||
}
|
||||
// We don't use the MockBackend because it serializes requests to the handler
|
||||
slowBackend := httptest.NewServer(http.HandlerFunc(handler))
|
||||
defer slowBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", slowBackend.URL))
|
||||
|
||||
config := ReadConfig("max_rpc_conns")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
type resWithCodeErr struct {
|
||||
res []byte
|
||||
code int
|
||||
err error
|
||||
}
|
||||
resCh := make(chan *resWithCodeErr)
|
||||
for i := 0; i < 3; i++ {
|
||||
go func() {
|
||||
res, code, err := client.SendRPC("eth_chainId", nil)
|
||||
resCh <- &resWithCodeErr{
|
||||
res: res,
|
||||
code: code,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
}
|
||||
res1 := <-resCh
|
||||
res2 := <-resCh
|
||||
res3 := <-resCh
|
||||
|
||||
require.NoError(t, res1.err)
|
||||
require.NoError(t, res2.err)
|
||||
require.NoError(t, res3.err)
|
||||
require.Equal(t, 200, res1.code)
|
||||
require.Equal(t, 200, res2.code)
|
||||
require.Equal(t, 200, res3.code)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res1.res)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res2.res)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res3.res)
|
||||
|
||||
require.EqualValues(t, 2, maxConcurrentRPCs)
|
||||
}
|
253
proxyd/proxyd/integration_tests/mock_backend_test.go
Normal file
253
proxyd/proxyd/integration_tests/mock_backend_test.go
Normal file
@ -0,0 +1,253 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
)
|
||||
|
||||
type RecordedRequest struct {
|
||||
Method string
|
||||
Headers http.Header
|
||||
Body []byte
|
||||
}
|
||||
|
||||
type MockBackend struct {
|
||||
handler http.Handler
|
||||
server *httptest.Server
|
||||
mtx sync.RWMutex
|
||||
requests []*RecordedRequest
|
||||
}
|
||||
|
||||
func SingleResponseHandler(code int, response string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(code)
|
||||
_, _ = w.Write([]byte(response))
|
||||
}
|
||||
}
|
||||
|
||||
func BatchedResponseHandler(code int, responses ...string) http.HandlerFunc {
|
||||
// all proxyd upstream requests are batched
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var body string
|
||||
body += "["
|
||||
for i, response := range responses {
|
||||
body += response
|
||||
if i+1 < len(responses) {
|
||||
body += ","
|
||||
}
|
||||
}
|
||||
body += "]"
|
||||
SingleResponseHandler(code, body)(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
type responseMapping struct {
|
||||
result interface{}
|
||||
calls int
|
||||
}
|
||||
type BatchRPCResponseRouter struct {
|
||||
m map[string]map[string]*responseMapping
|
||||
fallback map[string]interface{}
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func NewBatchRPCResponseRouter() *BatchRPCResponseRouter {
|
||||
return &BatchRPCResponseRouter{
|
||||
m: make(map[string]map[string]*responseMapping),
|
||||
fallback: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *BatchRPCResponseRouter) SetRoute(method string, id string, result interface{}) {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
switch result.(type) {
|
||||
case string:
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
panic("invalid result type")
|
||||
}
|
||||
|
||||
m := h.m[method]
|
||||
if m == nil {
|
||||
m = make(map[string]*responseMapping)
|
||||
}
|
||||
m[id] = &responseMapping{result: result}
|
||||
h.m[method] = m
|
||||
}
|
||||
|
||||
func (h *BatchRPCResponseRouter) SetFallbackRoute(method string, result interface{}) {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
switch result.(type) {
|
||||
case string:
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
panic("invalid result type")
|
||||
}
|
||||
|
||||
h.fallback[method] = result
|
||||
}
|
||||
|
||||
func (h *BatchRPCResponseRouter) GetNumCalls(method string, id string) int {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
if m := h.m[method]; m != nil {
|
||||
if rm := m[id]; rm != nil {
|
||||
return rm.calls
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (h *BatchRPCResponseRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if proxyd.IsBatch(body) {
|
||||
batch, err := proxyd.ParseBatchRPCReq(body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
out := make([]*proxyd.RPCRes, len(batch))
|
||||
for i := range batch {
|
||||
req, err := proxyd.ParseRPCReq(batch[i])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var result interface{}
|
||||
var resultHasValue bool
|
||||
|
||||
if mappings, exists := h.m[req.Method]; exists {
|
||||
if rm := mappings[string(req.ID)]; rm != nil {
|
||||
result = rm.result
|
||||
resultHasValue = true
|
||||
rm.calls++
|
||||
}
|
||||
}
|
||||
if !resultHasValue {
|
||||
result, resultHasValue = h.fallback[req.Method]
|
||||
}
|
||||
if !resultHasValue {
|
||||
w.WriteHeader(400)
|
||||
return
|
||||
}
|
||||
|
||||
out[i] = &proxyd.RPCRes{
|
||||
JSONRPC: proxyd.JSONRPCVersion,
|
||||
Result: result,
|
||||
ID: req.ID,
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(out); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
req, err := proxyd.ParseRPCReq(body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var result interface{}
|
||||
var resultHasValue bool
|
||||
|
||||
if mappings, exists := h.m[req.Method]; exists {
|
||||
if rm := mappings[string(req.ID)]; rm != nil {
|
||||
result = rm.result
|
||||
resultHasValue = true
|
||||
rm.calls++
|
||||
}
|
||||
}
|
||||
if !resultHasValue {
|
||||
result, resultHasValue = h.fallback[req.Method]
|
||||
}
|
||||
if !resultHasValue {
|
||||
w.WriteHeader(400)
|
||||
return
|
||||
}
|
||||
|
||||
out := &proxyd.RPCRes{
|
||||
JSONRPC: proxyd.JSONRPCVersion,
|
||||
Result: result,
|
||||
ID: req.ID,
|
||||
}
|
||||
enc := json.NewEncoder(w)
|
||||
if err := enc.Encode(out); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func NewMockBackend(handler http.Handler) *MockBackend {
|
||||
mb := &MockBackend{
|
||||
handler: handler,
|
||||
}
|
||||
mb.server = httptest.NewServer(http.HandlerFunc(mb.wrappedHandler))
|
||||
return mb
|
||||
}
|
||||
|
||||
func (m *MockBackend) URL() string {
|
||||
return m.server.URL
|
||||
}
|
||||
|
||||
func (m *MockBackend) Close() {
|
||||
m.server.Close()
|
||||
}
|
||||
|
||||
func (m *MockBackend) SetHandler(handler http.Handler) {
|
||||
m.mtx.Lock()
|
||||
m.handler = handler
|
||||
m.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (m *MockBackend) Reset() {
|
||||
m.mtx.Lock()
|
||||
m.requests = nil
|
||||
m.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (m *MockBackend) Requests() []*RecordedRequest {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
out := make([]*RecordedRequest, len(m.requests))
|
||||
for i := 0; i < len(m.requests); i++ {
|
||||
out[i] = m.requests[i]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *MockBackend) wrappedHandler(w http.ResponseWriter, r *http.Request) {
|
||||
m.mtx.Lock()
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
clone := r.Clone(context.Background())
|
||||
clone.Body = ioutil.NopCloser(bytes.NewReader(body))
|
||||
m.requests = append(m.requests, &RecordedRequest{
|
||||
Method: r.Method,
|
||||
Headers: r.Header.Clone(),
|
||||
Body: body,
|
||||
})
|
||||
m.handler.ServeHTTP(w, clone)
|
||||
m.mtx.Unlock()
|
||||
}
|
60
proxyd/proxyd/integration_tests/rate_limit_test.go
Normal file
60
proxyd/proxyd/integration_tests/rate_limit_test.go
Normal file
@ -0,0 +1,60 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type resWithCode struct {
|
||||
code int
|
||||
res []byte
|
||||
}
|
||||
|
||||
func TestMaxRPSLimit(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("rate_limit")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
resCh := make(chan *resWithCode)
|
||||
for i := 0; i < 3; i++ {
|
||||
go func() {
|
||||
res, code, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
resCh <- &resWithCode{
|
||||
code: code,
|
||||
res: res,
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
codes := make(map[int]int)
|
||||
var limitedRes []byte
|
||||
for i := 0; i < 3; i++ {
|
||||
res := <-resCh
|
||||
code := res.code
|
||||
if codes[code] == 0 {
|
||||
codes[code] = 1
|
||||
} else {
|
||||
codes[code] += 1
|
||||
}
|
||||
|
||||
// 503 because there's only one backend available
|
||||
if code == 503 {
|
||||
limitedRes = res.res
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, 2, codes[200])
|
||||
require.Equal(t, 1, codes[503])
|
||||
RequireEqualJSON(t, []byte(noBackendsResponse), limitedRes)
|
||||
}
|
20
proxyd/proxyd/integration_tests/testdata/batch_timeout.toml
vendored
Normal file
20
proxyd/proxyd/integration_tests/testdata/batch_timeout.toml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
timeout_seconds = 1
|
||||
max_upstream_batch_size = 1
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_retries = 3
|
||||
|
||||
[backends]
|
||||
[backends.slow]
|
||||
rpc_url = "$SLOW_BACKEND_RPC_URL"
|
||||
ws_url = "$SLOW_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["slow"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
19
proxyd/proxyd/integration_tests/testdata/batching.toml
vendored
Normal file
19
proxyd/proxyd/integration_tests/testdata/batching.toml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
||||
net_version = "main"
|
||||
eth_call = "main"
|
29
proxyd/proxyd/integration_tests/testdata/caching.toml
vendored
Normal file
29
proxyd/proxyd/integration_tests/testdata/caching.toml
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[redis]
|
||||
url = "$REDIS_URL"
|
||||
|
||||
[cache]
|
||||
enabled = true
|
||||
block_sync_rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
||||
net_version = "main"
|
||||
eth_getBlockByNumber = "main"
|
||||
eth_blockNumber = "main"
|
||||
eth_call = "main"
|
20
proxyd/proxyd/integration_tests/testdata/failover.toml
vendored
Normal file
20
proxyd/proxyd/integration_tests/testdata/failover.toml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
[backends.bad]
|
||||
rpc_url = "$BAD_BACKEND_RPC_URL"
|
||||
ws_url = "$BAD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["bad", "good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
19
proxyd/proxyd/integration_tests/testdata/max_rpc_conns.toml
vendored
Normal file
19
proxyd/proxyd/integration_tests/testdata/max_rpc_conns.toml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
max_concurrent_rpcs = 2
|
||||
|
||||
[backend]
|
||||
# this should cover blocked requests due to max_concurrent_rpcs
|
||||
response_timeout_seconds = 12
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
22
proxyd/proxyd/integration_tests/testdata/out_of_service_interval.toml
vendored
Normal file
22
proxyd/proxyd/integration_tests/testdata/out_of_service_interval.toml
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_retries = 1
|
||||
out_of_service_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
[backends.bad]
|
||||
rpc_url = "$BAD_BACKEND_RPC_URL"
|
||||
ws_url = "$BAD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["bad", "good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
18
proxyd/proxyd/integration_tests/testdata/rate_limit.toml
vendored
Normal file
18
proxyd/proxyd/integration_tests/testdata/rate_limit.toml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
max_rps = 2
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
18
proxyd/proxyd/integration_tests/testdata/retries.toml
vendored
Normal file
18
proxyd/proxyd/integration_tests/testdata/retries.toml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_retries = 3
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
17
proxyd/proxyd/integration_tests/testdata/whitelist.toml
vendored
Normal file
17
proxyd/proxyd/integration_tests/testdata/whitelist.toml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
109
proxyd/proxyd/integration_tests/util_test.go
Normal file
109
proxyd/proxyd/integration_tests/util_test.go
Normal file
@ -0,0 +1,109 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type ProxydClient struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func NewProxydClient(url string) *ProxydClient {
|
||||
return &ProxydClient{url: url}
|
||||
}
|
||||
|
||||
func (p *ProxydClient) SendRPC(method string, params []interface{}) ([]byte, int, error) {
|
||||
rpcReq := NewRPCReq("999", method, params)
|
||||
body, err := json.Marshal(rpcReq)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return p.SendRequest(body)
|
||||
}
|
||||
|
||||
func (p *ProxydClient) SendBatchRPC(reqs ...*proxyd.RPCReq) ([]byte, int, error) {
|
||||
body, err := json.Marshal(reqs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return p.SendRequest(body)
|
||||
}
|
||||
|
||||
func (p *ProxydClient) SendRequest(body []byte) ([]byte, int, error) {
|
||||
res, err := http.Post(p.url, "application/json", bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
code := res.StatusCode
|
||||
resBody, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return resBody, code, nil
|
||||
}
|
||||
|
||||
func RequireEqualJSON(t *testing.T, expected []byte, actual []byte) {
|
||||
expJSON := canonicalizeJSON(t, expected)
|
||||
actJSON := canonicalizeJSON(t, actual)
|
||||
require.Equal(t, string(expJSON), string(actJSON))
|
||||
}
|
||||
|
||||
func canonicalizeJSON(t *testing.T, in []byte) []byte {
|
||||
var any interface{}
|
||||
if in[0] == '[' {
|
||||
any = make([]interface{}, 0)
|
||||
} else {
|
||||
any = make(map[string]interface{})
|
||||
}
|
||||
|
||||
err := json.Unmarshal(in, &any)
|
||||
require.NoError(t, err)
|
||||
out, err := json.Marshal(any)
|
||||
require.NoError(t, err)
|
||||
return out
|
||||
}
|
||||
|
||||
func ReadConfig(name string) *proxyd.Config {
|
||||
config := new(proxyd.Config)
|
||||
_, err := toml.DecodeFile(fmt.Sprintf("testdata/%s.toml", name), config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func NewRPCReq(id string, method string, params []interface{}) *proxyd.RPCReq {
|
||||
jsonParams, err := json.Marshal(params)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &proxyd.RPCReq{
|
||||
JSONRPC: proxyd.JSONRPCVersion,
|
||||
Method: method,
|
||||
Params: jsonParams,
|
||||
ID: []byte(id),
|
||||
}
|
||||
}
|
||||
|
||||
func InitLogger() {
|
||||
log.Root().SetHandler(
|
||||
log.LvlFilterHandler(log.LvlDebug,
|
||||
log.StreamHandler(
|
||||
os.Stdout,
|
||||
log.TerminalFormat(false),
|
||||
)),
|
||||
)
|
||||
}
|
232
proxyd/proxyd/integration_tests/validation_test.go
Normal file
232
proxyd/proxyd/integration_tests/validation_test.go
Normal file
@ -0,0 +1,232 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
notWhitelistedResponse = `{"jsonrpc":"2.0","error":{"code":-32001,"message":"rpc method is not whitelisted"},"id":999}`
|
||||
parseErrResponse = `{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}`
|
||||
invalidJSONRPCVersionResponse = `{"error":{"code":-32601,"message":"invalid JSON-RPC version"},"id":null,"jsonrpc":"2.0"}`
|
||||
invalidIDResponse = `{"error":{"code":-32601,"message":"invalid ID"},"id":null,"jsonrpc":"2.0"}`
|
||||
invalidMethodResponse = `{"error":{"code":-32601,"message":"no method specified"},"id":null,"jsonrpc":"2.0"}`
|
||||
invalidBatchLenResponse = `{"error":{"code":-32601,"message":"must specify at least one batch call"},"id":null,"jsonrpc":"2.0"}`
|
||||
)
|
||||
|
||||
func TestSingleRPCValidation(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("whitelist")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
body string
|
||||
res string
|
||||
code int
|
||||
}{
|
||||
{
|
||||
"body not JSON",
|
||||
"this ain't an RPC call",
|
||||
parseErrResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"body not RPC",
|
||||
"{\"not\": \"rpc\"}",
|
||||
invalidJSONRPCVersionResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"body missing RPC ID",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}",
|
||||
invalidIDResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"body has array ID",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}",
|
||||
invalidIDResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"body has object ID",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}",
|
||||
invalidIDResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"bad method",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": 7, \"params\": [42, 23], \"id\": 1}",
|
||||
parseErrResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"bad JSON-RPC",
|
||||
"{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}",
|
||||
invalidJSONRPCVersionResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"omitted method",
|
||||
"{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}",
|
||||
invalidMethodResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"not whitelisted method",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}",
|
||||
notWhitelistedResponse,
|
||||
403,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res, code, err := client.SendRequest([]byte(tt.body))
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(tt.res), res)
|
||||
require.Equal(t, tt.code, code)
|
||||
require.Equal(t, 0, len(goodBackend.Requests()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchRPCValidation(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("whitelist")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
body string
|
||||
res string
|
||||
code int
|
||||
reqCount int
|
||||
}{
|
||||
{
|
||||
"empty batch",
|
||||
"[]",
|
||||
invalidBatchLenResponse,
|
||||
400,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"bad json",
|
||||
"[{,]",
|
||||
parseErrResponse,
|
||||
400,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"not object in batch",
|
||||
"[123]",
|
||||
asArray(parseErrResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"body not RPC",
|
||||
"[{\"not\": \"rpc\"}]",
|
||||
asArray(invalidJSONRPCVersionResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"body missing RPC ID",
|
||||
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}]",
|
||||
asArray(invalidIDResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"body has array ID",
|
||||
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}]",
|
||||
asArray(invalidIDResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"body has object ID",
|
||||
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}]",
|
||||
asArray(invalidIDResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
// this happens because we can't deserialize the method into a non
|
||||
// string value, and it blows up the parsing for the whole request.
|
||||
{
|
||||
"bad method",
|
||||
"[{\"error\":{\"code\":-32600,\"message\":\"invalid request\"},\"id\":null,\"jsonrpc\":\"2.0\"}]",
|
||||
asArray(invalidMethodResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"bad JSON-RPC",
|
||||
"[{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}]",
|
||||
asArray(invalidJSONRPCVersionResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"omitted method",
|
||||
"[{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}]",
|
||||
asArray(invalidMethodResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"not whitelisted method",
|
||||
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}]",
|
||||
asArray(notWhitelistedResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"mixed",
|
||||
asArray(
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"eth_chainId\", \"params\": [], \"id\": 123}",
|
||||
"123",
|
||||
),
|
||||
asArray(
|
||||
notWhitelistedResponse,
|
||||
goodResponse,
|
||||
parseErrResponse,
|
||||
),
|
||||
200,
|
||||
1,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res, code, err := client.SendRequest([]byte(tt.body))
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(tt.res), res)
|
||||
require.Equal(t, tt.code, code)
|
||||
require.Equal(t, tt.reqCount, len(goodBackend.Requests()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func asArray(in ...string) string {
|
||||
return "[" + strings.Join(in, ",") + "]"
|
||||
}
|
87
proxyd/proxyd/lvc.go
Normal file
87
proxyd/proxyd/lvc.go
Normal file
@ -0,0 +1,87 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
const cacheSyncRate = 1 * time.Second
|
||||
|
||||
type lvcUpdateFn func(context.Context, *ethclient.Client) (string, error)
|
||||
|
||||
type EthLastValueCache struct {
|
||||
client *ethclient.Client
|
||||
cache Cache
|
||||
key string
|
||||
updater lvcUpdateFn
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
func newLVC(client *ethclient.Client, cache Cache, cacheKey string, updater lvcUpdateFn) *EthLastValueCache {
|
||||
return &EthLastValueCache{
|
||||
client: client,
|
||||
cache: cache,
|
||||
key: cacheKey,
|
||||
updater: updater,
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *EthLastValueCache) Start() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(cacheSyncRate)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
lvcPollTimeGauge.WithLabelValues(h.key).SetToCurrentTime()
|
||||
|
||||
value, err := h.getUpdate()
|
||||
if err != nil {
|
||||
log.Error("error retrieving latest value", "key", h.key, "error", err)
|
||||
continue
|
||||
}
|
||||
log.Trace("polling latest value", "value", value)
|
||||
|
||||
if err := h.cache.Put(context.Background(), h.key, value); err != nil {
|
||||
log.Error("error writing last value to cache", "key", h.key, "error", err)
|
||||
}
|
||||
|
||||
case <-h.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (h *EthLastValueCache) getUpdate() (string, error) {
|
||||
const maxRetries = 5
|
||||
var err error
|
||||
|
||||
for i := 0; i <= maxRetries; i++ {
|
||||
var value string
|
||||
value, err = h.updater(context.Background(), h.client)
|
||||
if err != nil {
|
||||
backoff := calcBackoff(i)
|
||||
log.Warn("http operation failed. retrying...", "error", err, "backoff", backoff)
|
||||
lvcErrorsTotal.WithLabelValues(h.key).Inc()
|
||||
time.Sleep(backoff)
|
||||
continue
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
return "", wrapErr(err, "exceeded retries")
|
||||
}
|
||||
|
||||
func (h *EthLastValueCache) Stop() {
|
||||
close(h.quit)
|
||||
}
|
||||
|
||||
func (h *EthLastValueCache) Read(ctx context.Context) (string, error) {
|
||||
return h.cache.Get(ctx, h.key)
|
||||
}
|
399
proxyd/proxyd/methods.go
Normal file
399
proxyd/proxyd/methods.go
Normal file
@ -0,0 +1,399 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidRPCParams = errors.New("invalid RPC params")
|
||||
)
|
||||
|
||||
type RPCMethodHandler interface {
|
||||
GetRPCMethod(context.Context, *RPCReq) (*RPCRes, error)
|
||||
PutRPCMethod(context.Context, *RPCReq, *RPCRes) error
|
||||
}
|
||||
|
||||
type StaticMethodHandler struct {
|
||||
cache interface{}
|
||||
m sync.RWMutex
|
||||
}
|
||||
|
||||
func (e *StaticMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
e.m.RLock()
|
||||
cache := e.cache
|
||||
e.m.RUnlock()
|
||||
|
||||
if cache == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &RPCRes{
|
||||
JSONRPC: req.JSONRPC,
|
||||
Result: cache,
|
||||
ID: req.ID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *StaticMethodHandler) PutRPCMethod(ctx context.Context, req *RPCReq, res *RPCRes) error {
|
||||
e.m.Lock()
|
||||
if e.cache == nil {
|
||||
e.cache = res.Result
|
||||
}
|
||||
e.m.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
type EthGetBlockByNumberMethodHandler struct {
|
||||
cache Cache
|
||||
getLatestBlockNumFn GetLatestBlockNumFn
|
||||
numBlockConfirmations int
|
||||
}
|
||||
|
||||
func (e *EthGetBlockByNumberMethodHandler) cacheKey(req *RPCReq) string {
|
||||
input, includeTx, err := decodeGetBlockByNumberParams(req.Params)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("method:eth_getBlockByNumber:%s:%t", input, includeTx)
|
||||
}
|
||||
|
||||
func (e *EthGetBlockByNumberMethodHandler) cacheable(req *RPCReq) (bool, error) {
|
||||
blockNum, _, err := decodeGetBlockByNumberParams(req.Params)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !isBlockDependentParam(blockNum), nil
|
||||
}
|
||||
|
||||
func (e *EthGetBlockByNumberMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
if ok, err := e.cacheable(req); !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := e.cacheKey(req)
|
||||
return getImmutableRPCResponse(ctx, e.cache, key, req)
|
||||
}
|
||||
|
||||
func (e *EthGetBlockByNumberMethodHandler) PutRPCMethod(ctx context.Context, req *RPCReq, res *RPCRes) error {
|
||||
if ok, err := e.cacheable(req); !ok || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockInput, _, err := decodeGetBlockByNumberParams(req.Params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBlockDependentParam(blockInput) {
|
||||
return nil
|
||||
}
|
||||
if blockInput != "earliest" {
|
||||
curBlock, err := e.getLatestBlockNumFn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockNum, err := decodeBlockInput(blockInput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if curBlock <= blockNum+uint64(e.numBlockConfirmations) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
key := e.cacheKey(req)
|
||||
return putImmutableRPCResponse(ctx, e.cache, key, req, res)
|
||||
}
|
||||
|
||||
type EthGetBlockRangeMethodHandler struct {
|
||||
cache Cache
|
||||
getLatestBlockNumFn GetLatestBlockNumFn
|
||||
numBlockConfirmations int
|
||||
}
|
||||
|
||||
func (e *EthGetBlockRangeMethodHandler) cacheKey(req *RPCReq) string {
|
||||
start, end, includeTx, err := decodeGetBlockRangeParams(req.Params)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("method:eth_getBlockRange:%s:%s:%t", start, end, includeTx)
|
||||
}
|
||||
|
||||
func (e *EthGetBlockRangeMethodHandler) cacheable(req *RPCReq) (bool, error) {
|
||||
start, end, _, err := decodeGetBlockRangeParams(req.Params)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !isBlockDependentParam(start) && !isBlockDependentParam(end), nil
|
||||
}
|
||||
|
||||
func (e *EthGetBlockRangeMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
if ok, err := e.cacheable(req); !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := e.cacheKey(req)
|
||||
return getImmutableRPCResponse(ctx, e.cache, key, req)
|
||||
}
|
||||
|
||||
func (e *EthGetBlockRangeMethodHandler) PutRPCMethod(ctx context.Context, req *RPCReq, res *RPCRes) error {
|
||||
if ok, err := e.cacheable(req); !ok || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
start, end, _, err := decodeGetBlockRangeParams(req.Params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curBlock, err := e.getLatestBlockNumFn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if start != "earliest" {
|
||||
startNum, err := decodeBlockInput(start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if curBlock <= startNum+uint64(e.numBlockConfirmations) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if end != "earliest" {
|
||||
endNum, err := decodeBlockInput(end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if curBlock <= endNum+uint64(e.numBlockConfirmations) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
key := e.cacheKey(req)
|
||||
return putImmutableRPCResponse(ctx, e.cache, key, req, res)
|
||||
}
|
||||
|
||||
type EthCallMethodHandler struct {
|
||||
cache Cache
|
||||
getLatestBlockNumFn GetLatestBlockNumFn
|
||||
numBlockConfirmations int
|
||||
}
|
||||
|
||||
func (e *EthCallMethodHandler) cacheable(params *ethCallParams, blockTag string) bool {
|
||||
if isBlockDependentParam(blockTag) {
|
||||
return false
|
||||
}
|
||||
if params.From != "" || params.Gas != "" {
|
||||
return false
|
||||
}
|
||||
if params.Value != "" && params.Value != "0x0" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *EthCallMethodHandler) cacheKey(params *ethCallParams, blockTag string) string {
|
||||
keyParams := fmt.Sprintf("%s:%s:%s", params.To, params.Data, blockTag)
|
||||
return fmt.Sprintf("method:eth_call:%s", keyParams)
|
||||
}
|
||||
|
||||
func (e *EthCallMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
params, blockTag, err := decodeEthCallParams(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !e.cacheable(params, blockTag) {
|
||||
return nil, nil
|
||||
}
|
||||
key := e.cacheKey(params, blockTag)
|
||||
return getImmutableRPCResponse(ctx, e.cache, key, req)
|
||||
}
|
||||
|
||||
func (e *EthCallMethodHandler) PutRPCMethod(ctx context.Context, req *RPCReq, res *RPCRes) error {
|
||||
params, blockTag, err := decodeEthCallParams(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !e.cacheable(params, blockTag) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockTag != "earliest" {
|
||||
curBlock, err := e.getLatestBlockNumFn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockNum, err := decodeBlockInput(blockTag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if curBlock <= blockNum+uint64(e.numBlockConfirmations) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
key := e.cacheKey(params, blockTag)
|
||||
return putImmutableRPCResponse(ctx, e.cache, key, req, res)
|
||||
}
|
||||
|
||||
type EthBlockNumberMethodHandler struct {
|
||||
getLatestBlockNumFn GetLatestBlockNumFn
|
||||
}
|
||||
|
||||
func (e *EthBlockNumberMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
blockNum, err := e.getLatestBlockNumFn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return makeRPCRes(req, hexutil.EncodeUint64(blockNum)), nil
|
||||
}
|
||||
|
||||
func (e *EthBlockNumberMethodHandler) PutRPCMethod(context.Context, *RPCReq, *RPCRes) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type EthGasPriceMethodHandler struct {
|
||||
getLatestGasPrice GetLatestGasPriceFn
|
||||
}
|
||||
|
||||
func (e *EthGasPriceMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
gasPrice, err := e.getLatestGasPrice(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return makeRPCRes(req, hexutil.EncodeUint64(gasPrice)), nil
|
||||
}
|
||||
|
||||
func (e *EthGasPriceMethodHandler) PutRPCMethod(context.Context, *RPCReq, *RPCRes) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func isBlockDependentParam(s string) bool {
|
||||
return s == "latest" || s == "pending"
|
||||
}
|
||||
|
||||
func decodeGetBlockByNumberParams(params json.RawMessage) (string, bool, error) {
|
||||
var list []interface{}
|
||||
if err := json.Unmarshal(params, &list); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if len(list) != 2 {
|
||||
return "", false, errInvalidRPCParams
|
||||
}
|
||||
blockNum, ok := list[0].(string)
|
||||
if !ok {
|
||||
return "", false, errInvalidRPCParams
|
||||
}
|
||||
includeTx, ok := list[1].(bool)
|
||||
if !ok {
|
||||
return "", false, errInvalidRPCParams
|
||||
}
|
||||
if !validBlockInput(blockNum) {
|
||||
return "", false, errInvalidRPCParams
|
||||
}
|
||||
return blockNum, includeTx, nil
|
||||
}
|
||||
|
||||
func decodeGetBlockRangeParams(params json.RawMessage) (string, string, bool, error) {
|
||||
var list []interface{}
|
||||
if err := json.Unmarshal(params, &list); err != nil {
|
||||
return "", "", false, err
|
||||
}
|
||||
if len(list) != 3 {
|
||||
return "", "", false, errInvalidRPCParams
|
||||
}
|
||||
startBlockNum, ok := list[0].(string)
|
||||
if !ok {
|
||||
return "", "", false, errInvalidRPCParams
|
||||
}
|
||||
endBlockNum, ok := list[1].(string)
|
||||
if !ok {
|
||||
return "", "", false, errInvalidRPCParams
|
||||
}
|
||||
includeTx, ok := list[2].(bool)
|
||||
if !ok {
|
||||
return "", "", false, errInvalidRPCParams
|
||||
}
|
||||
if !validBlockInput(startBlockNum) || !validBlockInput(endBlockNum) {
|
||||
return "", "", false, errInvalidRPCParams
|
||||
}
|
||||
return startBlockNum, endBlockNum, includeTx, nil
|
||||
}
|
||||
|
||||
func decodeBlockInput(input string) (uint64, error) {
|
||||
return hexutil.DecodeUint64(input)
|
||||
}
|
||||
|
||||
type ethCallParams struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
Gas string `json:"gas"`
|
||||
GasPrice string `json:"gasPrice"`
|
||||
Value string `json:"value"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func decodeEthCallParams(req *RPCReq) (*ethCallParams, string, error) {
|
||||
var input []json.RawMessage
|
||||
if err := json.Unmarshal(req.Params, &input); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if len(input) != 2 {
|
||||
return nil, "", fmt.Errorf("invalid eth_call parameters")
|
||||
}
|
||||
params := new(ethCallParams)
|
||||
if err := json.Unmarshal(input[0], params); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var blockTag string
|
||||
if err := json.Unmarshal(input[1], &blockTag); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return params, blockTag, nil
|
||||
}
|
||||
|
||||
func validBlockInput(input string) bool {
|
||||
if input == "earliest" || input == "pending" || input == "latest" {
|
||||
return true
|
||||
}
|
||||
_, err := decodeBlockInput(input)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func makeRPCRes(req *RPCReq, result interface{}) *RPCRes {
|
||||
return &RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
ID: req.ID,
|
||||
Result: result,
|
||||
}
|
||||
}
|
||||
|
||||
func getImmutableRPCResponse(ctx context.Context, cache Cache, key string, req *RPCReq) (*RPCRes, error) {
|
||||
val, err := cache.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result interface{}
|
||||
if err := json.Unmarshal([]byte(val), &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RPCRes{
|
||||
JSONRPC: req.JSONRPC,
|
||||
Result: result,
|
||||
ID: req.ID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func putImmutableRPCResponse(ctx context.Context, cache Cache, key string, req *RPCReq, res *RPCRes) error {
|
||||
if key == "" {
|
||||
return nil
|
||||
}
|
||||
val := mustMarshalJSON(res.Result)
|
||||
return cache.Put(ctx, key, string(val))
|
||||
}
|
280
proxyd/proxyd/metrics.go
Normal file
280
proxyd/proxyd/metrics.go
Normal file
@ -0,0 +1,280 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
const (
|
||||
MetricsNamespace = "proxyd"
|
||||
|
||||
RPCRequestSourceHTTP = "http"
|
||||
RPCRequestSourceWS = "ws"
|
||||
|
||||
BackendProxyd = "proxyd"
|
||||
SourceClient = "client"
|
||||
SourceBackend = "backend"
|
||||
MethodUnknown = "unknown"
|
||||
)
|
||||
|
||||
var PayloadSizeBuckets = []float64{10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000}
|
||||
var MillisecondDurationBuckets = []float64{1, 10, 50, 100, 500, 1000, 5000, 10000, 100000}
|
||||
|
||||
var (
|
||||
rpcRequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_requests_total",
|
||||
Help: "Count of total client RPC requests.",
|
||||
})
|
||||
|
||||
rpcForwardsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_forwards_total",
|
||||
Help: "Count of total RPC requests forwarded to each backend.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"source",
|
||||
})
|
||||
|
||||
rpcBackendHTTPResponseCodesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_backend_http_response_codes_total",
|
||||
Help: "Count of total backend responses by HTTP status code.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"status_code",
|
||||
"batched",
|
||||
})
|
||||
|
||||
rpcErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_errors_total",
|
||||
Help: "Count of total RPC errors.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"error_code",
|
||||
})
|
||||
|
||||
rpcSpecialErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_special_errors_total",
|
||||
Help: "Count of total special RPC errors.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"error_type",
|
||||
})
|
||||
|
||||
rpcBackendRequestDurationSumm = promauto.NewSummaryVec(prometheus.SummaryOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_backend_request_duration_seconds",
|
||||
Help: "Summary of backend response times broken down by backend and method name.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
|
||||
}, []string{
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"batched",
|
||||
})
|
||||
|
||||
activeClientWsConnsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "active_client_ws_conns",
|
||||
Help: "Gauge of active client WS connections.",
|
||||
}, []string{
|
||||
"auth",
|
||||
})
|
||||
|
||||
activeBackendWsConnsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "active_backend_ws_conns",
|
||||
Help: "Gauge of active backend WS connections.",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
unserviceableRequestsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "unserviceable_requests_total",
|
||||
Help: "Count of total requests that were rejected due to no backends being available.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"request_source",
|
||||
})
|
||||
|
||||
httpResponseCodesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "http_response_codes_total",
|
||||
Help: "Count of total HTTP response codes.",
|
||||
}, []string{
|
||||
"status_code",
|
||||
})
|
||||
|
||||
httpRequestDurationSumm = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "http_request_duration_seconds",
|
||||
Help: "Summary of HTTP request durations, in seconds.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
|
||||
})
|
||||
|
||||
wsMessagesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "ws_messages_total",
|
||||
Help: "Count of total websocket messages including protocol control.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"source",
|
||||
})
|
||||
|
||||
redisErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "redis_errors_total",
|
||||
Help: "Count of total Redis errors.",
|
||||
}, []string{
|
||||
"source",
|
||||
})
|
||||
|
||||
requestPayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "request_payload_sizes",
|
||||
Help: "Histogram of client request payload sizes.",
|
||||
Buckets: PayloadSizeBuckets,
|
||||
}, []string{
|
||||
"auth",
|
||||
})
|
||||
|
||||
responsePayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "response_payload_sizes",
|
||||
Help: "Histogram of client response payload sizes.",
|
||||
Buckets: PayloadSizeBuckets,
|
||||
}, []string{
|
||||
"auth",
|
||||
})
|
||||
|
||||
cacheHitsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "cache_hits_total",
|
||||
Help: "Number of cache hits.",
|
||||
}, []string{
|
||||
"method",
|
||||
})
|
||||
|
||||
cacheMissesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "cache_misses_total",
|
||||
Help: "Number of cache misses.",
|
||||
}, []string{
|
||||
"method",
|
||||
})
|
||||
|
||||
lvcErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "lvc_errors_total",
|
||||
Help: "Count of lvc errors.",
|
||||
}, []string{
|
||||
"key",
|
||||
})
|
||||
|
||||
lvcPollTimeGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "lvc_poll_time_gauge",
|
||||
Help: "Gauge of lvc poll time.",
|
||||
}, []string{
|
||||
"key",
|
||||
})
|
||||
|
||||
batchRPCShortCircuitsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "batch_rpc_short_circuits_total",
|
||||
Help: "Count of total batch RPC short-circuits.",
|
||||
})
|
||||
|
||||
rpcSpecialErrors = []string{
|
||||
"nonce too low",
|
||||
"gas price too high",
|
||||
"gas price too low",
|
||||
"invalid parameters",
|
||||
}
|
||||
|
||||
redisCacheDurationSumm = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "redis_cache_duration_milliseconds",
|
||||
Help: "Histogram of Redis command durations, in milliseconds.",
|
||||
Buckets: MillisecondDurationBuckets,
|
||||
}, []string{"command"})
|
||||
|
||||
tooManyRequestErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "too_many_request_errors_total",
|
||||
Help: "Count of request timeouts due to too many concurrent RPCs.",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
)
|
||||
|
||||
func RecordRedisError(source string) {
|
||||
redisErrorsTotal.WithLabelValues(source).Inc()
|
||||
}
|
||||
|
||||
func RecordRPCError(ctx context.Context, backendName, method string, err error) {
|
||||
rpcErr, ok := err.(*RPCErr)
|
||||
var code int
|
||||
if ok {
|
||||
MaybeRecordSpecialRPCError(ctx, backendName, method, rpcErr)
|
||||
code = rpcErr.Code
|
||||
} else {
|
||||
code = -1
|
||||
}
|
||||
|
||||
rpcErrorsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, strconv.Itoa(code)).Inc()
|
||||
}
|
||||
|
||||
func RecordWSMessage(ctx context.Context, backendName, source string) {
|
||||
wsMessagesTotal.WithLabelValues(GetAuthCtx(ctx), backendName, source).Inc()
|
||||
}
|
||||
|
||||
func RecordUnserviceableRequest(ctx context.Context, source string) {
|
||||
unserviceableRequestsTotal.WithLabelValues(GetAuthCtx(ctx), source).Inc()
|
||||
}
|
||||
|
||||
func RecordRPCForward(ctx context.Context, backendName, method, source string) {
|
||||
rpcForwardsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, source).Inc()
|
||||
}
|
||||
|
||||
func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string, rpcErr *RPCErr) {
|
||||
errMsg := strings.ToLower(rpcErr.Message)
|
||||
for _, errStr := range rpcSpecialErrors {
|
||||
if strings.Contains(errMsg, errStr) {
|
||||
rpcSpecialErrorsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, errStr).Inc()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RecordRequestPayloadSize(ctx context.Context, payloadSize int) {
|
||||
requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
|
||||
}
|
||||
|
||||
func RecordResponsePayloadSize(ctx context.Context, payloadSize int) {
|
||||
responsePayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
|
||||
}
|
||||
|
||||
func RecordCacheHit(method string) {
|
||||
cacheHitsTotal.WithLabelValues(method).Inc()
|
||||
}
|
||||
|
||||
func RecordCacheMiss(method string) {
|
||||
cacheMissesTotal.WithLabelValues(method).Inc()
|
||||
}
|
6
proxyd/proxyd/package.json
Normal file
6
proxyd/proxyd/package.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "@eth-optimism/proxyd",
|
||||
"version": "3.8.5",
|
||||
"private": true,
|
||||
"dependencies": {}
|
||||
}
|
344
proxyd/proxyd/proxyd.go
Normal file
344
proxyd/proxyd/proxyd.go
Normal file
@ -0,0 +1,344 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
func Start(config *Config) (func(), error) {
|
||||
if len(config.Backends) == 0 {
|
||||
return nil, errors.New("must define at least one backend")
|
||||
}
|
||||
if len(config.BackendGroups) == 0 {
|
||||
return nil, errors.New("must define at least one backend group")
|
||||
}
|
||||
if len(config.RPCMethodMappings) == 0 {
|
||||
return nil, errors.New("must define at least one RPC method mapping")
|
||||
}
|
||||
|
||||
for authKey := range config.Authentication {
|
||||
if authKey == "none" {
|
||||
return nil, errors.New("cannot use none as an auth key")
|
||||
}
|
||||
}
|
||||
|
||||
var redisURL string
|
||||
if config.Redis.URL != "" {
|
||||
rURL, err := ReadFromEnvOrConfig(config.Redis.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
redisURL = rURL
|
||||
}
|
||||
|
||||
var lim RateLimiter
|
||||
var err error
|
||||
if redisURL == "" {
|
||||
log.Warn("redis is not configured, using local rate limiter")
|
||||
lim = NewLocalRateLimiter()
|
||||
} else {
|
||||
lim, err = NewRedisRateLimiter(redisURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
maxConcurrentRPCs := config.Server.MaxConcurrentRPCs
|
||||
if maxConcurrentRPCs == 0 {
|
||||
maxConcurrentRPCs = math.MaxInt64
|
||||
}
|
||||
rpcRequestSemaphore := semaphore.NewWeighted(maxConcurrentRPCs)
|
||||
|
||||
backendNames := make([]string, 0)
|
||||
backendsByName := make(map[string]*Backend)
|
||||
for name, cfg := range config.Backends {
|
||||
opts := make([]BackendOpt, 0)
|
||||
|
||||
rpcURL, err := ReadFromEnvOrConfig(cfg.RPCURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wsURL, err := ReadFromEnvOrConfig(cfg.WSURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rpcURL == "" {
|
||||
return nil, fmt.Errorf("must define an RPC URL for backend %s", name)
|
||||
}
|
||||
if wsURL == "" {
|
||||
return nil, fmt.Errorf("must define a WS URL for backend %s", name)
|
||||
}
|
||||
|
||||
if config.BackendOptions.ResponseTimeoutSeconds != 0 {
|
||||
timeout := secondsToDuration(config.BackendOptions.ResponseTimeoutSeconds)
|
||||
opts = append(opts, WithTimeout(timeout))
|
||||
}
|
||||
if config.BackendOptions.MaxRetries != 0 {
|
||||
opts = append(opts, WithMaxRetries(config.BackendOptions.MaxRetries))
|
||||
}
|
||||
if config.BackendOptions.MaxResponseSizeBytes != 0 {
|
||||
opts = append(opts, WithMaxResponseSize(config.BackendOptions.MaxResponseSizeBytes))
|
||||
}
|
||||
if config.BackendOptions.OutOfServiceSeconds != 0 {
|
||||
opts = append(opts, WithOutOfServiceDuration(secondsToDuration(config.BackendOptions.OutOfServiceSeconds)))
|
||||
}
|
||||
if cfg.MaxRPS != 0 {
|
||||
opts = append(opts, WithMaxRPS(cfg.MaxRPS))
|
||||
}
|
||||
if cfg.MaxWSConns != 0 {
|
||||
opts = append(opts, WithMaxWSConns(cfg.MaxWSConns))
|
||||
}
|
||||
if cfg.Password != "" {
|
||||
passwordVal, err := ReadFromEnvOrConfig(cfg.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, WithBasicAuth(cfg.Username, passwordVal))
|
||||
}
|
||||
tlsConfig, err := configureBackendTLS(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tlsConfig != nil {
|
||||
log.Info("using custom TLS config for backend", "name", name)
|
||||
opts = append(opts, WithTLSConfig(tlsConfig))
|
||||
}
|
||||
if cfg.StripTrailingXFF {
|
||||
opts = append(opts, WithStrippedTrailingXFF())
|
||||
}
|
||||
opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP")))
|
||||
back := NewBackend(name, rpcURL, wsURL, lim, rpcRequestSemaphore, opts...)
|
||||
backendNames = append(backendNames, name)
|
||||
backendsByName[name] = back
|
||||
log.Info("configured backend", "name", name, "rpc_url", rpcURL, "ws_url", wsURL)
|
||||
}
|
||||
|
||||
backendGroups := make(map[string]*BackendGroup)
|
||||
for bgName, bg := range config.BackendGroups {
|
||||
backends := make([]*Backend, 0)
|
||||
for _, bName := range bg.Backends {
|
||||
if backendsByName[bName] == nil {
|
||||
return nil, fmt.Errorf("backend %s is not defined", bName)
|
||||
}
|
||||
backends = append(backends, backendsByName[bName])
|
||||
}
|
||||
group := &BackendGroup{
|
||||
Name: bgName,
|
||||
Backends: backends,
|
||||
}
|
||||
backendGroups[bgName] = group
|
||||
}
|
||||
|
||||
var wsBackendGroup *BackendGroup
|
||||
if config.WSBackendGroup != "" {
|
||||
wsBackendGroup = backendGroups[config.WSBackendGroup]
|
||||
if wsBackendGroup == nil {
|
||||
return nil, fmt.Errorf("ws backend group %s does not exist", config.WSBackendGroup)
|
||||
}
|
||||
}
|
||||
|
||||
if wsBackendGroup == nil && config.Server.WSPort != 0 {
|
||||
return nil, fmt.Errorf("a ws port was defined, but no ws group was defined")
|
||||
}
|
||||
|
||||
for _, bg := range config.RPCMethodMappings {
|
||||
if backendGroups[bg] == nil {
|
||||
return nil, fmt.Errorf("undefined backend group %s", bg)
|
||||
}
|
||||
}
|
||||
|
||||
var resolvedAuth map[string]string
|
||||
|
||||
if config.Authentication != nil {
|
||||
resolvedAuth = make(map[string]string)
|
||||
for secret, alias := range config.Authentication {
|
||||
resolvedSecret, err := ReadFromEnvOrConfig(secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolvedAuth[resolvedSecret] = alias
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
rpcCache RPCCache
|
||||
blockNumLVC *EthLastValueCache
|
||||
gasPriceLVC *EthLastValueCache
|
||||
)
|
||||
if config.Cache.Enabled {
|
||||
var (
|
||||
cache Cache
|
||||
blockNumFn GetLatestBlockNumFn
|
||||
gasPriceFn GetLatestGasPriceFn
|
||||
)
|
||||
|
||||
if config.Cache.BlockSyncRPCURL == "" {
|
||||
return nil, fmt.Errorf("block sync node required for caching")
|
||||
}
|
||||
blockSyncRPCURL, err := ReadFromEnvOrConfig(config.Cache.BlockSyncRPCURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if redisURL != "" {
|
||||
if cache, err = newRedisCache(redisURL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
log.Warn("redis is not configured, using in-memory cache")
|
||||
cache = newMemoryCache()
|
||||
}
|
||||
// Ideally, the BlocKSyncRPCURL should be the sequencer or a HA replica that's not far behind
|
||||
ethClient, err := ethclient.Dial(blockSyncRPCURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ethClient.Close()
|
||||
|
||||
blockNumLVC, blockNumFn = makeGetLatestBlockNumFn(ethClient, cache)
|
||||
gasPriceLVC, gasPriceFn = makeGetLatestGasPriceFn(ethClient, cache)
|
||||
rpcCache = newRPCCache(newCacheWithCompression(cache), blockNumFn, gasPriceFn, config.Cache.NumBlockConfirmations)
|
||||
}
|
||||
|
||||
srv := NewServer(
|
||||
backendGroups,
|
||||
wsBackendGroup,
|
||||
NewStringSetFromStrings(config.WSMethodWhitelist),
|
||||
config.RPCMethodMappings,
|
||||
config.Server.MaxBodySizeBytes,
|
||||
resolvedAuth,
|
||||
secondsToDuration(config.Server.TimeoutSeconds),
|
||||
config.Server.MaxUpstreamBatchSize,
|
||||
rpcCache,
|
||||
)
|
||||
|
||||
if config.Metrics.Enabled {
|
||||
addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port)
|
||||
log.Info("starting metrics server", "addr", addr)
|
||||
go func() {
|
||||
if err := http.ListenAndServe(addr, promhttp.Handler()); err != nil {
|
||||
log.Error("error starting metrics server", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// To allow integration tests to cleanly come up, wait
|
||||
// 10ms to give the below goroutines enough time to
|
||||
// encounter an error creating their servers
|
||||
errTimer := time.NewTimer(10 * time.Millisecond)
|
||||
|
||||
if config.Server.RPCPort != 0 {
|
||||
go func() {
|
||||
if err := srv.RPCListenAndServe(config.Server.RPCHost, config.Server.RPCPort); err != nil {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
log.Info("RPC server shut down")
|
||||
return
|
||||
}
|
||||
log.Crit("error starting RPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if config.Server.WSPort != 0 {
|
||||
go func() {
|
||||
if err := srv.WSListenAndServe(config.Server.WSHost, config.Server.WSPort); err != nil {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
log.Info("WS server shut down")
|
||||
return
|
||||
}
|
||||
log.Crit("error starting WS server", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
<-errTimer.C
|
||||
log.Info("started proxyd")
|
||||
|
||||
return func() {
|
||||
log.Info("shutting down proxyd")
|
||||
if blockNumLVC != nil {
|
||||
blockNumLVC.Stop()
|
||||
}
|
||||
if gasPriceLVC != nil {
|
||||
gasPriceLVC.Stop()
|
||||
}
|
||||
srv.Shutdown()
|
||||
if err := lim.FlushBackendWSConns(backendNames); err != nil {
|
||||
log.Error("error flushing backend ws conns", "err", err)
|
||||
}
|
||||
log.Info("goodbye")
|
||||
}, nil
|
||||
}
|
||||
|
||||
func secondsToDuration(seconds int) time.Duration {
|
||||
return time.Duration(seconds) * time.Second
|
||||
}
|
||||
|
||||
func configureBackendTLS(cfg *BackendConfig) (*tls.Config, error) {
|
||||
if cfg.CAFile == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tlsConfig, err := CreateTLSClient(cfg.CAFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.ClientCertFile != "" && cfg.ClientKeyFile != "" {
|
||||
cert, err := ParseKeyPair(cfg.ClientCertFile, cfg.ClientKeyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
func makeUint64LastValueFn(client *ethclient.Client, cache Cache, key string, updater lvcUpdateFn) (*EthLastValueCache, func(context.Context) (uint64, error)) {
|
||||
lvc := newLVC(client, cache, key, updater)
|
||||
lvc.Start()
|
||||
return lvc, func(ctx context.Context) (uint64, error) {
|
||||
value, err := lvc.Read(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if value == "" {
|
||||
return 0, fmt.Errorf("%s is unavailable", key)
|
||||
}
|
||||
valueUint, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return valueUint, nil
|
||||
}
|
||||
}
|
||||
|
||||
func makeGetLatestBlockNumFn(client *ethclient.Client, cache Cache) (*EthLastValueCache, GetLatestBlockNumFn) {
|
||||
return makeUint64LastValueFn(client, cache, "lvc:block_number", func(ctx context.Context, c *ethclient.Client) (string, error) {
|
||||
blockNum, err := c.BlockNumber(ctx)
|
||||
return strconv.FormatUint(blockNum, 10), err
|
||||
})
|
||||
}
|
||||
|
||||
func makeGetLatestGasPriceFn(client *ethclient.Client, cache Cache) (*EthLastValueCache, GetLatestGasPriceFn) {
|
||||
return makeUint64LastValueFn(client, cache, "lvc:gas_price", func(ctx context.Context, c *ethclient.Client) (string, error) {
|
||||
gasPrice, err := c.SuggestGasPrice(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gasPrice.String(), nil
|
||||
})
|
||||
}
|
265
proxyd/proxyd/rate_limiter.go
Normal file
265
proxyd/proxyd/rate_limiter.go
Normal file
@ -0,0 +1,265 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
const MaxRPSScript = `
|
||||
local current
|
||||
current = redis.call("incr", KEYS[1])
|
||||
if current == 1 then
|
||||
redis.call("expire", KEYS[1], 1)
|
||||
end
|
||||
return current
|
||||
`
|
||||
|
||||
const MaxConcurrentWSConnsScript = `
|
||||
redis.call("sadd", KEYS[1], KEYS[2])
|
||||
local total = 0
|
||||
local scanres = redis.call("sscan", KEYS[1], 0)
|
||||
for _, k in ipairs(scanres[2]) do
|
||||
local value = redis.call("get", k)
|
||||
if value then
|
||||
total = total + value
|
||||
end
|
||||
end
|
||||
|
||||
if total < tonumber(ARGV[1]) then
|
||||
redis.call("incr", KEYS[2])
|
||||
redis.call("expire", KEYS[2], 300)
|
||||
return true
|
||||
end
|
||||
|
||||
return false
|
||||
`
|
||||
|
||||
type RateLimiter interface {
|
||||
IsBackendOnline(name string) (bool, error)
|
||||
SetBackendOffline(name string, duration time.Duration) error
|
||||
IncBackendRPS(name string) (int, error)
|
||||
IncBackendWSConns(name string, max int) (bool, error)
|
||||
DecBackendWSConns(name string) error
|
||||
FlushBackendWSConns(names []string) error
|
||||
}
|
||||
|
||||
type RedisRateLimiter struct {
|
||||
rdb *redis.Client
|
||||
randID string
|
||||
touchKeys map[string]time.Duration
|
||||
tkMtx sync.Mutex
|
||||
}
|
||||
|
||||
func NewRedisRateLimiter(url string) (RateLimiter, error) {
|
||||
opts, err := redis.ParseURL(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rdb := redis.NewClient(opts)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
return nil, wrapErr(err, "error connecting to redis")
|
||||
}
|
||||
out := &RedisRateLimiter{
|
||||
rdb: rdb,
|
||||
randID: randStr(20),
|
||||
touchKeys: make(map[string]time.Duration),
|
||||
}
|
||||
go out.touch()
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (r *RedisRateLimiter) IsBackendOnline(name string) (bool, error) {
|
||||
exists, err := r.rdb.Exists(context.Background(), fmt.Sprintf("backend:%s:offline", name)).Result()
|
||||
if err != nil {
|
||||
RecordRedisError("IsBackendOnline")
|
||||
return false, wrapErr(err, "error getting backend availability")
|
||||
}
|
||||
|
||||
return exists == 0, nil
|
||||
}
|
||||
|
||||
func (r *RedisRateLimiter) SetBackendOffline(name string, duration time.Duration) error {
|
||||
if duration == 0 {
|
||||
return nil
|
||||
}
|
||||
err := r.rdb.SetEX(
|
||||
context.Background(),
|
||||
fmt.Sprintf("backend:%s:offline", name),
|
||||
1,
|
||||
duration,
|
||||
).Err()
|
||||
if err != nil {
|
||||
RecordRedisError("SetBackendOffline")
|
||||
return wrapErr(err, "error setting backend unavailable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RedisRateLimiter) IncBackendRPS(name string) (int, error) {
|
||||
cmd := r.rdb.Eval(
|
||||
context.Background(),
|
||||
MaxRPSScript,
|
||||
[]string{fmt.Sprintf("backend:%s:ratelimit", name)},
|
||||
)
|
||||
rps, err := cmd.Int()
|
||||
if err != nil {
|
||||
RecordRedisError("IncBackendRPS")
|
||||
return -1, wrapErr(err, "error upserting backend rate limit")
|
||||
}
|
||||
return rps, nil
|
||||
}
|
||||
|
||||
func (r *RedisRateLimiter) IncBackendWSConns(name string, max int) (bool, error) {
|
||||
connsKey := fmt.Sprintf("proxy:%s:wsconns:%s", r.randID, name)
|
||||
r.tkMtx.Lock()
|
||||
r.touchKeys[connsKey] = 5 * time.Minute
|
||||
r.tkMtx.Unlock()
|
||||
cmd := r.rdb.Eval(
|
||||
context.Background(),
|
||||
MaxConcurrentWSConnsScript,
|
||||
[]string{
|
||||
fmt.Sprintf("backend:%s:proxies", name),
|
||||
connsKey,
|
||||
},
|
||||
max,
|
||||
)
|
||||
incremented, err := cmd.Bool()
|
||||
// false gets coerced to redis.nil, see https://redis.io/commands/eval#conversion-between-lua-and-redis-data-types
|
||||
if err == redis.Nil {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
RecordRedisError("IncBackendWSConns")
|
||||
return false, wrapErr(err, "error incrementing backend ws conns")
|
||||
}
|
||||
return incremented, nil
|
||||
}
|
||||
|
||||
func (r *RedisRateLimiter) DecBackendWSConns(name string) error {
|
||||
connsKey := fmt.Sprintf("proxy:%s:wsconns:%s", r.randID, name)
|
||||
err := r.rdb.Decr(context.Background(), connsKey).Err()
|
||||
if err != nil {
|
||||
RecordRedisError("DecBackendWSConns")
|
||||
return wrapErr(err, "error decrementing backend ws conns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RedisRateLimiter) FlushBackendWSConns(names []string) error {
|
||||
ctx := context.Background()
|
||||
for _, name := range names {
|
||||
connsKey := fmt.Sprintf("proxy:%s:wsconns:%s", r.randID, name)
|
||||
err := r.rdb.SRem(
|
||||
ctx,
|
||||
fmt.Sprintf("backend:%s:proxies", name),
|
||||
connsKey,
|
||||
).Err()
|
||||
if err != nil {
|
||||
return wrapErr(err, "error flushing backend ws conns")
|
||||
}
|
||||
err = r.rdb.Del(ctx, connsKey).Err()
|
||||
if err != nil {
|
||||
return wrapErr(err, "error flushing backend ws conns")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RedisRateLimiter) touch() {
|
||||
for {
|
||||
r.tkMtx.Lock()
|
||||
for key, dur := range r.touchKeys {
|
||||
if err := r.rdb.Expire(context.Background(), key, dur).Err(); err != nil {
|
||||
RecordRedisError("touch")
|
||||
log.Error("error touching redis key", "key", key, "err", err)
|
||||
}
|
||||
}
|
||||
r.tkMtx.Unlock()
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
type LocalRateLimiter struct {
|
||||
deadBackends map[string]time.Time
|
||||
backendRPS map[string]int
|
||||
backendWSConns map[string]int
|
||||
mtx sync.RWMutex
|
||||
}
|
||||
|
||||
func NewLocalRateLimiter() *LocalRateLimiter {
|
||||
out := &LocalRateLimiter{
|
||||
deadBackends: make(map[string]time.Time),
|
||||
backendRPS: make(map[string]int),
|
||||
backendWSConns: make(map[string]int),
|
||||
}
|
||||
go out.clear()
|
||||
return out
|
||||
}
|
||||
|
||||
func (l *LocalRateLimiter) IsBackendOnline(name string) (bool, error) {
|
||||
l.mtx.RLock()
|
||||
defer l.mtx.RUnlock()
|
||||
return l.deadBackends[name].Before(time.Now()), nil
|
||||
}
|
||||
|
||||
func (l *LocalRateLimiter) SetBackendOffline(name string, duration time.Duration) error {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
l.deadBackends[name] = time.Now().Add(duration)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LocalRateLimiter) IncBackendRPS(name string) (int, error) {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
l.backendRPS[name] += 1
|
||||
return l.backendRPS[name], nil
|
||||
}
|
||||
|
||||
func (l *LocalRateLimiter) IncBackendWSConns(name string, max int) (bool, error) {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
if l.backendWSConns[name] == max {
|
||||
return false, nil
|
||||
}
|
||||
l.backendWSConns[name] += 1
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (l *LocalRateLimiter) DecBackendWSConns(name string) error {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
if l.backendWSConns[name] == 0 {
|
||||
return nil
|
||||
}
|
||||
l.backendWSConns[name] -= 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LocalRateLimiter) FlushBackendWSConns(names []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LocalRateLimiter) clear() {
|
||||
for {
|
||||
time.Sleep(time.Second)
|
||||
l.mtx.Lock()
|
||||
l.backendRPS = make(map[string]int)
|
||||
l.mtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func randStr(l int) string {
|
||||
b := make([]byte, l)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hex.EncodeToString(b)
|
||||
}
|
154
proxyd/proxyd/rpc.go
Normal file
154
proxyd/proxyd/rpc.go
Normal file
@ -0,0 +1,154 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type RPCReq struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
Method string `json:"method"`
|
||||
Params json.RawMessage `json:"params"`
|
||||
ID json.RawMessage `json:"id"`
|
||||
}
|
||||
|
||||
type RPCRes struct {
|
||||
JSONRPC string
|
||||
Result interface{}
|
||||
Error *RPCErr
|
||||
ID json.RawMessage
|
||||
}
|
||||
|
||||
type rpcResJSON struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
Result interface{} `json:"result,omitempty"`
|
||||
Error *RPCErr `json:"error,omitempty"`
|
||||
ID json.RawMessage `json:"id"`
|
||||
}
|
||||
|
||||
type nullResultRPCRes struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
Result interface{} `json:"result"`
|
||||
ID json.RawMessage `json:"id"`
|
||||
}
|
||||
|
||||
func (r *RPCRes) IsError() bool {
|
||||
return r.Error != nil
|
||||
}
|
||||
|
||||
func (r *RPCRes) MarshalJSON() ([]byte, error) {
|
||||
if r.Result == nil && r.Error == nil {
|
||||
return json.Marshal(&nullResultRPCRes{
|
||||
JSONRPC: r.JSONRPC,
|
||||
Result: nil,
|
||||
ID: r.ID,
|
||||
})
|
||||
}
|
||||
|
||||
return json.Marshal(&rpcResJSON{
|
||||
JSONRPC: r.JSONRPC,
|
||||
Result: r.Result,
|
||||
Error: r.Error,
|
||||
ID: r.ID,
|
||||
})
|
||||
}
|
||||
|
||||
type RPCErr struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
HTTPErrorCode int `json:"-"`
|
||||
}
|
||||
|
||||
func (r *RPCErr) Error() string {
|
||||
return r.Message
|
||||
}
|
||||
|
||||
func IsValidID(id json.RawMessage) bool {
|
||||
// handle the case where the ID is a string
|
||||
if strings.HasPrefix(string(id), "\"") && strings.HasSuffix(string(id), "\"") {
|
||||
return len(id) > 2
|
||||
}
|
||||
|
||||
// technically allows a boolean/null ID, but so does Geth
|
||||
// https://github.com/ethereum/go-ethereum/blob/master/rpc/json.go#L72
|
||||
return len(id) > 0 && id[0] != '{' && id[0] != '['
|
||||
}
|
||||
|
||||
func ParseRPCReq(body []byte) (*RPCReq, error) {
|
||||
req := new(RPCReq)
|
||||
if err := json.Unmarshal(body, req); err != nil {
|
||||
return nil, ErrParseErr
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func ParseBatchRPCReq(body []byte) ([]json.RawMessage, error) {
|
||||
batch := make([]json.RawMessage, 0)
|
||||
if err := json.Unmarshal(body, &batch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
func ParseRPCRes(r io.Reader) (*RPCRes, error) {
|
||||
body, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, wrapErr(err, "error reading RPC response")
|
||||
}
|
||||
|
||||
res := new(RPCRes)
|
||||
if err := json.Unmarshal(body, res); err != nil {
|
||||
return nil, wrapErr(err, "error unmarshaling RPC response")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func ValidateRPCReq(req *RPCReq) error {
|
||||
if req.JSONRPC != JSONRPCVersion {
|
||||
return ErrInvalidRequest("invalid JSON-RPC version")
|
||||
}
|
||||
|
||||
if req.Method == "" {
|
||||
return ErrInvalidRequest("no method specified")
|
||||
}
|
||||
|
||||
if !IsValidID(req.ID) {
|
||||
return ErrInvalidRequest("invalid ID")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes {
|
||||
var rpcErr *RPCErr
|
||||
if rr, ok := err.(*RPCErr); ok {
|
||||
rpcErr = rr
|
||||
} else {
|
||||
rpcErr = &RPCErr{
|
||||
Code: JSONRPCErrorInternal,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
return &RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Error: rpcErr,
|
||||
ID: id,
|
||||
}
|
||||
}
|
||||
|
||||
func IsBatch(raw []byte) bool {
|
||||
for _, c := range raw {
|
||||
// skip insignificant whitespace (http://www.ietf.org/rfc/rfc4627.txt)
|
||||
if c == 0x20 || c == 0x09 || c == 0x0a || c == 0x0d {
|
||||
continue
|
||||
}
|
||||
return c == '['
|
||||
}
|
||||
return false
|
||||
}
|
76
proxyd/proxyd/rpc_test.go
Normal file
76
proxyd/proxyd/rpc_test.go
Normal file
@ -0,0 +1,76 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRPCResJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in *RPCRes
|
||||
out string
|
||||
}{
|
||||
{
|
||||
"string result",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: "foobar",
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","result":"foobar","id":123}`,
|
||||
},
|
||||
{
|
||||
"object result",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: struct {
|
||||
Str string `json:"str"`
|
||||
}{
|
||||
"test",
|
||||
},
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","result":{"str":"test"},"id":123}`,
|
||||
},
|
||||
{
|
||||
"nil result",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: nil,
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","result":null,"id":123}`,
|
||||
},
|
||||
{
|
||||
"error result",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Error: &RPCErr{
|
||||
Code: 1234,
|
||||
Message: "test err",
|
||||
},
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","error":{"code":1234,"message":"test err"},"id":123}`,
|
||||
},
|
||||
{
|
||||
"string ID",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: "foobar",
|
||||
ID: []byte("\"123\""),
|
||||
},
|
||||
`{"jsonrpc":"2.0","result":"foobar","id":"123"}`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
out, err := json.Marshal(tt.in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.out, string(out))
|
||||
})
|
||||
}
|
||||
}
|
533
proxyd/proxyd/server.go
Normal file
533
proxyd/proxyd/server.go
Normal file
@ -0,0 +1,533 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
const (
|
||||
ContextKeyAuth = "authorization"
|
||||
ContextKeyReqID = "req_id"
|
||||
ContextKeyXForwardedFor = "x_forwarded_for"
|
||||
MaxBatchRPCCalls = 100
|
||||
cacheStatusHdr = "X-Proxyd-Cache-Status"
|
||||
defaultServerTimeout = time.Second * 10
|
||||
maxLogLength = 2000
|
||||
defaultMaxUpstreamBatchSize = 10
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
backendGroups map[string]*BackendGroup
|
||||
wsBackendGroup *BackendGroup
|
||||
wsMethodWhitelist *StringSet
|
||||
rpcMethodMappings map[string]string
|
||||
maxBodySize int64
|
||||
authenticatedPaths map[string]string
|
||||
timeout time.Duration
|
||||
maxUpstreamBatchSize int
|
||||
upgrader *websocket.Upgrader
|
||||
rpcServer *http.Server
|
||||
wsServer *http.Server
|
||||
cache RPCCache
|
||||
}
|
||||
|
||||
func NewServer(
|
||||
backendGroups map[string]*BackendGroup,
|
||||
wsBackendGroup *BackendGroup,
|
||||
wsMethodWhitelist *StringSet,
|
||||
rpcMethodMappings map[string]string,
|
||||
maxBodySize int64,
|
||||
authenticatedPaths map[string]string,
|
||||
timeout time.Duration,
|
||||
maxUpstreamBatchSize int,
|
||||
cache RPCCache,
|
||||
) *Server {
|
||||
if cache == nil {
|
||||
cache = &NoopRPCCache{}
|
||||
}
|
||||
|
||||
if maxBodySize == 0 {
|
||||
maxBodySize = math.MaxInt64
|
||||
}
|
||||
|
||||
if timeout == 0 {
|
||||
timeout = defaultServerTimeout
|
||||
}
|
||||
|
||||
if maxUpstreamBatchSize == 0 {
|
||||
maxUpstreamBatchSize = defaultMaxUpstreamBatchSize
|
||||
}
|
||||
|
||||
return &Server{
|
||||
backendGroups: backendGroups,
|
||||
wsBackendGroup: wsBackendGroup,
|
||||
wsMethodWhitelist: wsMethodWhitelist,
|
||||
rpcMethodMappings: rpcMethodMappings,
|
||||
maxBodySize: maxBodySize,
|
||||
authenticatedPaths: authenticatedPaths,
|
||||
timeout: timeout,
|
||||
maxUpstreamBatchSize: maxUpstreamBatchSize,
|
||||
cache: cache,
|
||||
upgrader: &websocket.Upgrader{
|
||||
HandshakeTimeout: 5 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) RPCListenAndServe(host string, port int) error {
|
||||
hdlr := mux.NewRouter()
|
||||
hdlr.HandleFunc("/healthz", s.HandleHealthz).Methods("GET")
|
||||
hdlr.HandleFunc("/", s.HandleRPC).Methods("POST")
|
||||
hdlr.HandleFunc("/{authorization}", s.HandleRPC).Methods("POST")
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
})
|
||||
addr := fmt.Sprintf("%s:%d", host, port)
|
||||
s.rpcServer = &http.Server{
|
||||
Handler: instrumentedHdlr(c.Handler(hdlr)),
|
||||
Addr: addr,
|
||||
}
|
||||
log.Info("starting HTTP server", "addr", addr)
|
||||
return s.rpcServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func (s *Server) WSListenAndServe(host string, port int) error {
|
||||
hdlr := mux.NewRouter()
|
||||
hdlr.HandleFunc("/", s.HandleWS)
|
||||
hdlr.HandleFunc("/{authorization}", s.HandleWS)
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
})
|
||||
addr := fmt.Sprintf("%s:%d", host, port)
|
||||
s.wsServer = &http.Server{
|
||||
Handler: instrumentedHdlr(c.Handler(hdlr)),
|
||||
Addr: addr,
|
||||
}
|
||||
log.Info("starting WS server", "addr", addr)
|
||||
return s.wsServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func (s *Server) Shutdown() {
|
||||
if s.rpcServer != nil {
|
||||
_ = s.rpcServer.Shutdown(context.Background())
|
||||
}
|
||||
if s.wsServer != nil {
|
||||
_ = s.wsServer.Shutdown(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte("OK"))
|
||||
}
|
||||
|
||||
func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := s.populateContext(w, r)
|
||||
if ctx == nil {
|
||||
return
|
||||
}
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, s.timeout)
|
||||
defer cancel()
|
||||
|
||||
log.Info(
|
||||
"received RPC request",
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"user_agent", r.Header.Get("user-agent"),
|
||||
)
|
||||
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, s.maxBodySize))
|
||||
if err != nil {
|
||||
log.Error("error reading request body", "err", err)
|
||||
writeRPCError(ctx, w, nil, ErrInternal)
|
||||
return
|
||||
}
|
||||
RecordRequestPayloadSize(ctx, len(body))
|
||||
|
||||
log.Info("Raw RPC request",
|
||||
"body", truncate(string(body)),
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
)
|
||||
|
||||
if IsBatch(body) {
|
||||
reqs, err := ParseBatchRPCReq(body)
|
||||
if err != nil {
|
||||
log.Error("error parsing batch RPC request", "err", err)
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
||||
writeRPCError(ctx, w, nil, ErrParseErr)
|
||||
return
|
||||
}
|
||||
|
||||
if len(reqs) > MaxBatchRPCCalls {
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrTooManyBatchRequests)
|
||||
writeRPCError(ctx, w, nil, ErrTooManyBatchRequests)
|
||||
return
|
||||
}
|
||||
|
||||
if len(reqs) == 0 {
|
||||
writeRPCError(ctx, w, nil, ErrInvalidRequest("must specify at least one batch call"))
|
||||
return
|
||||
}
|
||||
|
||||
batchRes, batchContainsCached, err := s.handleBatchRPC(ctx, reqs, true)
|
||||
if err == context.DeadlineExceeded {
|
||||
writeRPCError(ctx, w, nil, ErrGatewayTimeout)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
writeRPCError(ctx, w, nil, ErrInternal)
|
||||
return
|
||||
}
|
||||
|
||||
setCacheHeader(w, batchContainsCached)
|
||||
writeBatchRPCRes(ctx, w, batchRes)
|
||||
return
|
||||
}
|
||||
|
||||
rawBody := json.RawMessage(body)
|
||||
backendRes, cached, err := s.handleBatchRPC(ctx, []json.RawMessage{rawBody}, false)
|
||||
if err != nil {
|
||||
writeRPCError(ctx, w, nil, ErrInternal)
|
||||
return
|
||||
}
|
||||
setCacheHeader(w, cached)
|
||||
writeRPCRes(ctx, w, backendRes[0])
|
||||
}
|
||||
|
||||
func (s *Server) handleBatchRPC(ctx context.Context, reqs []json.RawMessage, isBatch bool) ([]*RPCRes, bool, error) {
|
||||
// A request set is transformed into groups of batches.
|
||||
// Each batch group maps to a forwarded JSON-RPC batch request (subject to maxUpstreamBatchSize constraints)
|
||||
// A groupID is used to decouple Requests that have duplicate ID so they're not part of the same batch that's
|
||||
// forwarded to the backend. This is done to ensure that the order of JSON-RPC Responses match the Request order
|
||||
// as the backend MAY return Responses out of order.
|
||||
// NOTE: Duplicate request ids induces 1-sized JSON-RPC batches
|
||||
type batchGroup struct {
|
||||
groupID int
|
||||
backendGroup string
|
||||
}
|
||||
|
||||
responses := make([]*RPCRes, len(reqs))
|
||||
batches := make(map[batchGroup][]batchElem)
|
||||
ids := make(map[string]int, len(reqs))
|
||||
|
||||
for i := range reqs {
|
||||
parsedReq, err := ParseRPCReq(reqs[i])
|
||||
if err != nil {
|
||||
log.Info("error parsing RPC call", "source", "rpc", "err", err)
|
||||
responses[i] = NewRPCErrorRes(nil, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := ValidateRPCReq(parsedReq); err != nil {
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
||||
responses[i] = NewRPCErrorRes(nil, err)
|
||||
continue
|
||||
}
|
||||
|
||||
group := s.rpcMethodMappings[parsedReq.Method]
|
||||
if group == "" {
|
||||
// use unknown below to prevent DOS vector that fills up memory
|
||||
// with arbitrary method names.
|
||||
log.Info(
|
||||
"blocked request for non-whitelisted method",
|
||||
"source", "rpc",
|
||||
"req_id", GetReqID(ctx),
|
||||
"method", parsedReq.Method,
|
||||
)
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted)
|
||||
responses[i] = NewRPCErrorRes(parsedReq.ID, ErrMethodNotWhitelisted)
|
||||
continue
|
||||
}
|
||||
|
||||
id := string(parsedReq.ID)
|
||||
// If this is a duplicate Request ID, move the Request to a new batchGroup
|
||||
ids[id]++
|
||||
batchGroupID := ids[id]
|
||||
batchGroup := batchGroup{groupID: batchGroupID, backendGroup: group}
|
||||
batches[batchGroup] = append(batches[batchGroup], batchElem{parsedReq, i})
|
||||
}
|
||||
|
||||
var cached bool
|
||||
for group, batch := range batches {
|
||||
var cacheMisses []batchElem
|
||||
|
||||
for _, req := range batch {
|
||||
backendRes, _ := s.cache.GetRPC(ctx, req.Req)
|
||||
if backendRes != nil {
|
||||
responses[req.Index] = backendRes
|
||||
cached = true
|
||||
} else {
|
||||
cacheMisses = append(cacheMisses, req)
|
||||
}
|
||||
}
|
||||
|
||||
// Create minibatches - each minibatch must be no larger than the maxUpstreamBatchSize
|
||||
numBatches := int(math.Ceil(float64(len(cacheMisses)) / float64(s.maxUpstreamBatchSize)))
|
||||
for i := 0; i < numBatches; i++ {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
log.Info("short-circuiting batch RPC",
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"batch_index", i,
|
||||
)
|
||||
batchRPCShortCircuitsTotal.Inc()
|
||||
return nil, false, context.DeadlineExceeded
|
||||
}
|
||||
|
||||
start := i * s.maxUpstreamBatchSize
|
||||
end := int(math.Min(float64(start+s.maxUpstreamBatchSize), float64(len(cacheMisses))))
|
||||
elems := cacheMisses[start:end]
|
||||
res, err := s.backendGroups[group.backendGroup].Forward(ctx, createBatchRequest(elems), isBatch)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"error forwarding RPC batch",
|
||||
"batch_size", len(elems),
|
||||
"backend_group", group,
|
||||
"err", err,
|
||||
)
|
||||
res = nil
|
||||
for _, elem := range elems {
|
||||
res = append(res, NewRPCErrorRes(elem.Req.ID, err))
|
||||
}
|
||||
}
|
||||
|
||||
for i := range elems {
|
||||
responses[elems[i].Index] = res[i]
|
||||
|
||||
// TODO(inphi): batch put these
|
||||
if res[i].Error == nil && res[i].Result != nil {
|
||||
if err := s.cache.PutRPC(ctx, elems[i].Req, res[i]); err != nil {
|
||||
log.Warn(
|
||||
"cache put error",
|
||||
"req_id", GetReqID(ctx),
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return responses, cached, nil
|
||||
}
|
||||
|
||||
func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := s.populateContext(w, r)
|
||||
if ctx == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("received WS connection", "req_id", GetReqID(ctx))
|
||||
|
||||
clientConn, err := s.upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Error("error upgrading client conn", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxier, err := s.wsBackendGroup.ProxyWS(ctx, clientConn, s.wsMethodWhitelist)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoBackends) {
|
||||
RecordUnserviceableRequest(ctx, RPCRequestSourceWS)
|
||||
}
|
||||
log.Error("error dialing ws backend", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
||||
clientConn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Inc()
|
||||
go func() {
|
||||
// Below call blocks so run it in a goroutine.
|
||||
if err := proxier.Proxy(ctx); err != nil {
|
||||
log.Error("error proxying websocket", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
||||
}
|
||||
activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Dec()
|
||||
}()
|
||||
|
||||
log.Info("accepted WS connection", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx))
|
||||
}
|
||||
|
||||
func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context.Context {
|
||||
vars := mux.Vars(r)
|
||||
authorization := vars["authorization"]
|
||||
|
||||
if s.authenticatedPaths == nil {
|
||||
// handle the edge case where auth is disabled
|
||||
// but someone sends in an auth key anyway
|
||||
if authorization != "" {
|
||||
log.Info("blocked authenticated request against unauthenticated proxy")
|
||||
httpResponseCodesTotal.WithLabelValues("404").Inc()
|
||||
w.WriteHeader(404)
|
||||
return nil
|
||||
}
|
||||
return context.WithValue(
|
||||
r.Context(),
|
||||
ContextKeyReqID, // nolint:staticcheck
|
||||
randStr(10),
|
||||
)
|
||||
}
|
||||
|
||||
if authorization == "" || s.authenticatedPaths[authorization] == "" {
|
||||
log.Info("blocked unauthorized request", "authorization", authorization)
|
||||
httpResponseCodesTotal.WithLabelValues("401").Inc()
|
||||
w.WriteHeader(401)
|
||||
return nil
|
||||
}
|
||||
|
||||
xff := r.Header.Get("X-Forwarded-For")
|
||||
if xff == "" {
|
||||
ipPort := strings.Split(r.RemoteAddr, ":")
|
||||
if len(ipPort) == 2 {
|
||||
xff = ipPort[0]
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.WithValue(r.Context(), ContextKeyAuth, s.authenticatedPaths[authorization]) // nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, ContextKeyXForwardedFor, xff) // nolint:staticcheck
|
||||
return context.WithValue(
|
||||
ctx,
|
||||
ContextKeyReqID, // nolint:staticcheck
|
||||
randStr(10),
|
||||
)
|
||||
}
|
||||
|
||||
func setCacheHeader(w http.ResponseWriter, cached bool) {
|
||||
if cached {
|
||||
w.Header().Set(cacheStatusHdr, "HIT")
|
||||
} else {
|
||||
w.Header().Set(cacheStatusHdr, "MISS")
|
||||
}
|
||||
}
|
||||
|
||||
func writeRPCError(ctx context.Context, w http.ResponseWriter, id json.RawMessage, err error) {
|
||||
var res *RPCRes
|
||||
if r, ok := err.(*RPCErr); ok {
|
||||
res = NewRPCErrorRes(id, r)
|
||||
} else {
|
||||
res = NewRPCErrorRes(id, ErrInternal)
|
||||
}
|
||||
writeRPCRes(ctx, w, res)
|
||||
}
|
||||
|
||||
func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
|
||||
statusCode := 200
|
||||
if res.IsError() && res.Error.HTTPErrorCode != 0 {
|
||||
statusCode = res.Error.HTTPErrorCode
|
||||
}
|
||||
|
||||
w.Header().Set("content-type", "application/json")
|
||||
w.WriteHeader(statusCode)
|
||||
ww := &recordLenWriter{Writer: w}
|
||||
enc := json.NewEncoder(ww)
|
||||
if err := enc.Encode(res); err != nil {
|
||||
log.Error("error writing rpc response", "err", err)
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
||||
return
|
||||
}
|
||||
httpResponseCodesTotal.WithLabelValues(strconv.Itoa(statusCode)).Inc()
|
||||
RecordResponsePayloadSize(ctx, ww.Len)
|
||||
}
|
||||
|
||||
func writeBatchRPCRes(ctx context.Context, w http.ResponseWriter, res []*RPCRes) {
|
||||
w.Header().Set("content-type", "application/json")
|
||||
w.WriteHeader(200)
|
||||
ww := &recordLenWriter{Writer: w}
|
||||
enc := json.NewEncoder(ww)
|
||||
if err := enc.Encode(res); err != nil {
|
||||
log.Error("error writing batch rpc response", "err", err)
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
||||
return
|
||||
}
|
||||
RecordResponsePayloadSize(ctx, ww.Len)
|
||||
}
|
||||
|
||||
func instrumentedHdlr(h http.Handler) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
respTimer := prometheus.NewTimer(httpRequestDurationSumm)
|
||||
h.ServeHTTP(w, r)
|
||||
respTimer.ObserveDuration()
|
||||
}
|
||||
}
|
||||
|
||||
func GetAuthCtx(ctx context.Context) string {
|
||||
authUser, ok := ctx.Value(ContextKeyAuth).(string)
|
||||
if !ok {
|
||||
return "none"
|
||||
}
|
||||
|
||||
return authUser
|
||||
}
|
||||
|
||||
func GetReqID(ctx context.Context) string {
|
||||
reqId, ok := ctx.Value(ContextKeyReqID).(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return reqId
|
||||
}
|
||||
|
||||
func GetXForwardedFor(ctx context.Context) string {
|
||||
xff, ok := ctx.Value(ContextKeyXForwardedFor).(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return xff
|
||||
}
|
||||
|
||||
type recordLenWriter struct {
|
||||
io.Writer
|
||||
Len int
|
||||
}
|
||||
|
||||
func (w *recordLenWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = w.Writer.Write(p)
|
||||
w.Len += n
|
||||
return
|
||||
}
|
||||
|
||||
type NoopRPCCache struct{}
|
||||
|
||||
func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func truncate(str string) string {
|
||||
if len(str) > maxLogLength {
|
||||
return str[:maxLogLength] + "..."
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
type batchElem struct {
|
||||
Req *RPCReq
|
||||
Index int
|
||||
}
|
||||
|
||||
func createBatchRequest(elems []batchElem) []*RPCReq {
|
||||
batch := make([]*RPCReq, len(elems))
|
||||
for i := range elems {
|
||||
batch[i] = elems[i].Req
|
||||
}
|
||||
return batch
|
||||
}
|
56
proxyd/proxyd/string_set.go
Normal file
56
proxyd/proxyd/string_set.go
Normal file
@ -0,0 +1,56 @@
|
||||
package proxyd
|
||||
|
||||
import "sync"
|
||||
|
||||
type StringSet struct {
|
||||
underlying map[string]bool
|
||||
mtx sync.RWMutex
|
||||
}
|
||||
|
||||
func NewStringSet() *StringSet {
|
||||
return &StringSet{
|
||||
underlying: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func NewStringSetFromStrings(in []string) *StringSet {
|
||||
underlying := make(map[string]bool)
|
||||
for _, str := range in {
|
||||
underlying[str] = true
|
||||
}
|
||||
return &StringSet{
|
||||
underlying: underlying,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StringSet) Has(test string) bool {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
return s.underlying[test]
|
||||
}
|
||||
|
||||
func (s *StringSet) Add(str string) {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
s.underlying[str] = true
|
||||
}
|
||||
|
||||
func (s *StringSet) Entries() []string {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
out := make([]string, len(s.underlying))
|
||||
var i int
|
||||
for entry := range s.underlying {
|
||||
out[i] = entry
|
||||
i++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *StringSet) Extend(in []string) *StringSet {
|
||||
out := NewStringSetFromStrings(in)
|
||||
for k := range s.underlying {
|
||||
out.Add(k)
|
||||
}
|
||||
return out
|
||||
}
|
33
proxyd/proxyd/tls.go
Normal file
33
proxyd/proxyd/tls.go
Normal file
@ -0,0 +1,33 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
func CreateTLSClient(ca string) (*tls.Config, error) {
|
||||
pem, err := ioutil.ReadFile(ca)
|
||||
if err != nil {
|
||||
return nil, wrapErr(err, "error reading CA")
|
||||
}
|
||||
|
||||
roots := x509.NewCertPool()
|
||||
ok := roots.AppendCertsFromPEM(pem)
|
||||
if !ok {
|
||||
return nil, errors.New("error parsing TLS client cert")
|
||||
}
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: roots,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ParseKeyPair(crt, key string) (tls.Certificate, error) {
|
||||
cert, err := tls.LoadX509KeyPair(crt, key)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, wrapErr(err, "error loading x509 key pair")
|
||||
}
|
||||
return cert, nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user