Merge pull request #8 from ethereum-optimism/feat-add-proxyd-bkp
feat: add proxyd to infra
This commit is contained in:
commit
7cee326211
@ -17,6 +17,7 @@ workflows:
|
||||
mapping: |
|
||||
op-conductor-mon/.* run-build-op-conductor-mon true
|
||||
op-ufm/.* run-build-op-ufm true
|
||||
proxyd/.* run-build-proxyd true
|
||||
.circleci/.* run-all true
|
||||
.github/.* run-all true
|
||||
|
||||
|
@ -13,6 +13,9 @@ parameters:
|
||||
run-build-op-ufm:
|
||||
type: boolean
|
||||
default: false
|
||||
run-build-proxyd:
|
||||
type: boolean
|
||||
default: false
|
||||
run-all:
|
||||
type: boolean
|
||||
default: false
|
||||
@ -148,7 +151,7 @@ jobs:
|
||||
- run: sudo sed -i '13 i \ \ \ \ \ \ \ \ \ \ \ \ nameservers:' /etc/netplan/50-cloud-init.yaml
|
||||
- run: sudo sed -i '14 i \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ addresses:' /etc/netplan/50-cloud-init.yaml
|
||||
- run: sudo sed -i "s/addresses:/ addresses":" [8.8.8.8, 8.8.4.4] /g" /etc/netplan/50-cloud-init.yaml
|
||||
- run: cat /etc/netplan/50-cloud-init.yaml
|
||||
- run: sudo cat /etc/netplan/50-cloud-init.yaml
|
||||
- run: sudo netplan apply
|
||||
- run:
|
||||
name: Publish
|
||||
@ -380,11 +383,82 @@ workflows:
|
||||
docker_name: op-ufm
|
||||
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
|
||||
docker_context: .
|
||||
op-proxyd:
|
||||
when:
|
||||
or: [<< pipeline.parameters.run-build-proxyd >>, << pipeline.parameters.run-all >>]
|
||||
jobs:
|
||||
- go-lint:
|
||||
name: proxyd-lint
|
||||
module: proxyd
|
||||
- go-test:
|
||||
name: proxyd-tests
|
||||
module: proxyd
|
||||
- docker-build:
|
||||
name: proxyd-docker-build
|
||||
docker_file: proxyd/Dockerfile
|
||||
docker_name: proxyd
|
||||
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
|
||||
docker_context: .
|
||||
release:
|
||||
when:
|
||||
not:
|
||||
equal: [ scheduled_pipeline, << pipeline.trigger_source >> ]
|
||||
jobs:
|
||||
- hold:
|
||||
type: approval
|
||||
filters:
|
||||
tags:
|
||||
only: /^(proxyd|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
- docker-build:
|
||||
name: op-ufm-docker-build
|
||||
filters:
|
||||
tags:
|
||||
only: /^op-ufm\/v.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
docker_name: op-ufm
|
||||
docker_tags: <<pipeline.git.revision>>
|
||||
docker_context: .
|
||||
docker_file: op-ufm/Dockerfile
|
||||
context:
|
||||
- oplabs-gcr-release
|
||||
requires:
|
||||
- hold
|
||||
- docker-publish:
|
||||
name: op-ufm-docker-publish
|
||||
docker_name: op-ufm
|
||||
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
|
||||
docker_tags: <<pipeline.git.revision>>
|
||||
context:
|
||||
- oplabs-gcr
|
||||
requires:
|
||||
- op-ufm-docker-build
|
||||
- docker-build:
|
||||
name: proxyd-docker-build
|
||||
filters:
|
||||
tags:
|
||||
only: /^proxyd\/v.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
docker_name: proxyd
|
||||
docker_tags: <<pipeline.git.revision>>
|
||||
docker_context: .
|
||||
docker_file: proxyd/Dockerfile
|
||||
context:
|
||||
- oplabs-gcr-release
|
||||
requires:
|
||||
- hold
|
||||
- docker-publish:
|
||||
name: proxyd-docker-release
|
||||
filters:
|
||||
tags:
|
||||
only: /^proxyd\/v.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
docker_name: proxyd
|
||||
docker_tags: <<pipeline.git.revision>>
|
||||
context:
|
||||
- oplabs-gcr-release
|
||||
requires:
|
||||
- proxyd-docker-build
|
||||
|
55
.github/workflows/tag-service.yml
vendored
Normal file
55
.github/workflows/tag-service.yml
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
name: Tag Service
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
bump:
|
||||
description: 'How much to bump the version by'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- major
|
||||
- minor
|
||||
- patch
|
||||
- prerelease
|
||||
- finalize-prerelease
|
||||
service:
|
||||
description: 'Which service to release'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- op-ufm
|
||||
- proxyd
|
||||
prerelease:
|
||||
description: Increment major/minor/patch as prerelease?
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
environment: op-stack-production
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Fetch tags
|
||||
run: git fetch --tags origin --force
|
||||
- name: Setup Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install deps
|
||||
run: pip install -r requirements.txt
|
||||
working-directory: ops/tag-service
|
||||
- run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE"
|
||||
env:
|
||||
INPUT_GITHUB_TOKEN: ${{ github.token }}
|
||||
BUMP: ${{ github.event.inputs.bump }}
|
||||
SERVICE: ${{ github.event.inputs.service }}
|
||||
if: ${{ github.event.inputs.prerelease == 'false' }}
|
||||
- run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" --pre-release
|
||||
env:
|
||||
INPUT_GITHUB_TOKEN: ${{ github.token }}
|
||||
BUMP: ${{ github.event.inputs.bump }}
|
||||
SERVICE: ${{ github.event.inputs.service }}
|
||||
if: ${{ github.event.inputs.prerelease == 'true' }}
|
1
ops/tag-service/.gitignore
vendored
Normal file
1
ops/tag-service/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
venv
|
21
ops/tag-service/README.md
Normal file
21
ops/tag-service/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# Tag Service
|
||||
Tag Service is a Github action which builds new tags and applies them to services in the monorepo.
|
||||
It accepts:
|
||||
* Service name
|
||||
* Bump Amount [major, minor, patch]
|
||||
* Prerelease and Finalize-Prerelease (to add/remove `rc` versions)
|
||||
|
||||
It can be triggered from the Github Actions panel in the monorepo
|
||||
|
||||
# Tag Tool
|
||||
Tag Tool is a minimal rewrite of the Tag Service to let operators prepare and commit tags from commandline
|
||||
It accepts:
|
||||
* Service name
|
||||
* Bump Amount [major, minor, patch, prerelease, finalize-prerelease]
|
||||
|
||||
Tag Tool is meant to be run locally, and *does not* perform any write operations. Instead, it prints the git commands to console for the operator to use.
|
||||
|
||||
Additionally, a special service name "op-stack" is available, which will bump versions for `op-node`, `op-batcher` and `op-proposer` from the highest semver amongst them.
|
||||
|
||||
To run Tag Tool locally, the only dependency is `pip install semver`
|
||||
|
2
ops/tag-service/requirements.txt
Normal file
2
ops/tag-service/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
click==8.1.3
|
||||
semver==3.0.0-dev4
|
124
ops/tag-service/tag-service.py
Executable file
124
ops/tag-service/tag-service.py
Executable file
@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env python3
|
||||
import logging.config
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import click
|
||||
import semver
|
||||
|
||||
# Minimum version numbers for packages migrating from legacy versioning.
|
||||
MIN_VERSIONS = {
|
||||
'proxyd': '4.6.1',
|
||||
}
|
||||
|
||||
VALID_BUMPS = ('major', 'minor', 'patch', 'prerelease', 'finalize-prerelease')
|
||||
|
||||
MESSAGE_TEMPLATE = '[tag-service-release] Tag {service} at {version}'
|
||||
|
||||
LOGGING_CONFIG = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': True,
|
||||
'formatters': {
|
||||
'standard': {
|
||||
'format': '%(asctime)s [%(levelname)s]: %(message)s'
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'default': {
|
||||
'level': 'INFO',
|
||||
'formatter': 'standard',
|
||||
'class': 'logging.StreamHandler',
|
||||
'stream': 'ext://sys.stderr'
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'': {
|
||||
'handlers': ['default'],
|
||||
'level': 'INFO',
|
||||
'propagate': False
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
logging.config.dictConfig(LOGGING_CONFIG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option('--bump', required=True, type=click.Choice(VALID_BUMPS))
|
||||
@click.option('--service', required=True, type=click.Choice(list(MIN_VERSIONS.keys())))
|
||||
@click.option('--pre-release/--no-pre-release', default=False)
|
||||
def tag_version(bump, service, pre_release):
|
||||
tags = subprocess.run(['git', 'tag', '--list'], capture_output=True, check=True) \
|
||||
.stdout.decode('utf-8').splitlines()
|
||||
|
||||
# Filter out tags that don't match the service name, and tags
|
||||
# for prerelease versions.
|
||||
version_pattern = f'^{service}/v\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?$'
|
||||
svc_versions = [t.replace(f'{service}/v', '') for t in tags if re.match(version_pattern, t)]
|
||||
svc_versions = sorted(svc_versions, key=lambda v: semver.Version.parse(v), reverse=True)
|
||||
|
||||
if pre_release and bump == 'prerelease':
|
||||
raise Exception('Cannot use --bump=prerelease with --pre-release')
|
||||
|
||||
if pre_release and bump == 'finalize-prerelease':
|
||||
raise Exception('Cannot use --bump=finalize-prerelease with --pre-release')
|
||||
|
||||
if len(svc_versions) == 0:
|
||||
latest_version = MIN_VERSIONS[service]
|
||||
else:
|
||||
latest_version = svc_versions[0]
|
||||
|
||||
latest_version = semver.Version.parse(latest_version)
|
||||
|
||||
log.info(f'Latest version: v{latest_version}')
|
||||
|
||||
if bump == 'major':
|
||||
bumped = latest_version.bump_major()
|
||||
elif bump == 'minor':
|
||||
bumped = latest_version.bump_minor()
|
||||
elif bump == 'patch':
|
||||
bumped = latest_version.bump_patch()
|
||||
elif bump == 'prerelease':
|
||||
bumped = latest_version.bump_prerelease()
|
||||
elif bump == 'finalize-prerelease':
|
||||
bumped = latest_version.finalize_version()
|
||||
else:
|
||||
raise Exception('Invalid bump type: {}'.format(bump))
|
||||
|
||||
if pre_release:
|
||||
bumped = bumped.bump_prerelease()
|
||||
|
||||
new_version = 'v' + str(bumped)
|
||||
new_tag = f'{service}/{new_version}'
|
||||
|
||||
log.info(f'Bumped version: {new_version}')
|
||||
|
||||
log.info('Configuring git')
|
||||
# The below env vars are set by GHA.
|
||||
gh_actor = os.environ['GITHUB_ACTOR']
|
||||
gh_token = os.environ['INPUT_GITHUB_TOKEN']
|
||||
gh_repo = os.environ['GITHUB_REPOSITORY']
|
||||
origin_url = f'https://{gh_actor}:${gh_token}@github.com/{gh_repo}.git'
|
||||
subprocess.run(['git', 'config', 'user.name', gh_actor], check=True)
|
||||
subprocess.run(['git', 'config', 'user.email', f'{gh_actor}@users.noreply.github.com'], check=True)
|
||||
subprocess.run(['git', 'remote', 'set-url', 'origin', origin_url], check=True)
|
||||
|
||||
log.info(f'Creating tag: {new_tag}')
|
||||
subprocess.run([
|
||||
'git',
|
||||
'tag',
|
||||
'-a',
|
||||
new_tag,
|
||||
'-m',
|
||||
MESSAGE_TEMPLATE.format(service=service, version=new_version)
|
||||
], check=True)
|
||||
|
||||
log.info('Pushing tag to origin')
|
||||
subprocess.run(['git', 'push', 'origin', new_tag], check=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tag_version()
|
81
ops/tag-service/tag-tool.py
Normal file
81
ops/tag-service/tag-tool.py
Normal file
@ -0,0 +1,81 @@
|
||||
import argparse
|
||||
import subprocess
|
||||
import re
|
||||
import semver
|
||||
|
||||
SERVICES = [
|
||||
'proxyd',
|
||||
'op-ufm',
|
||||
'op-conductor-mon',
|
||||
]
|
||||
VERSION_PATTERN = '^{service}/v\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?$'
|
||||
GIT_TAG_COMMAND = 'git tag -a {tag} -m "{message}"'
|
||||
GIT_PUSH_COMMAND = 'git push origin {tag}'
|
||||
|
||||
def new_tag(service, version, bump):
|
||||
if bump == 'major':
|
||||
bumped = version.bump_major()
|
||||
elif bump == 'minor':
|
||||
bumped = version.bump_minor()
|
||||
elif bump == 'patch':
|
||||
bumped = version.bump_patch()
|
||||
elif bump == 'prerelease':
|
||||
bumped = version.bump_prerelease()
|
||||
elif bump == 'finalize-prerelease':
|
||||
bumped = version.finalize_version()
|
||||
else:
|
||||
raise Exception('Invalid bump type: {}'.format(bump))
|
||||
return f'{service}/v{bumped}'
|
||||
|
||||
def latest_version(service):
|
||||
# Get the list of tags from the git repository.
|
||||
tags = subprocess.run(['git', 'tag', '--list', f'{service}/v*'], capture_output=True, check=True) \
|
||||
.stdout.decode('utf-8').splitlines()
|
||||
# Filter out tags that don't match the service name, and tags for prerelease versions.
|
||||
svc_versions = sorted([t.replace(f'{service}/v', '') for t in tags])
|
||||
if len(svc_versions) == 0:
|
||||
raise Exception(f'No tags found for service: {service}')
|
||||
return svc_versions[-1]
|
||||
|
||||
def latest_among_services(services):
|
||||
latest = '0.0.0'
|
||||
for service in services:
|
||||
candidate = latest_version(service)
|
||||
if semver.compare(candidate, latest) > 0:
|
||||
latest = candidate
|
||||
return latest
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Create a new git tag for a service')
|
||||
parser.add_argument('--service', type=str, help='The name of the Service')
|
||||
parser.add_argument('--bump', type=str, help='The type of bump to apply to the version number')
|
||||
parser.add_argument('--message', type=str, help='Message to include in git tag', default='[tag-tool-release]')
|
||||
args = parser.parse_args()
|
||||
|
||||
service = args.service
|
||||
|
||||
if service == 'op-stack':
|
||||
latest = latest_among_services(['op-node', 'op-batcher', 'op-proposer'])
|
||||
else:
|
||||
latest = latest_version(service)
|
||||
|
||||
bumped = new_tag(service, semver.VersionInfo.parse(latest), args.bump)
|
||||
|
||||
print(f'latest tag: {latest}')
|
||||
print(f'new tag: {bumped}')
|
||||
print('run the following commands to create the new tag:\n')
|
||||
# special case for tagging op-node, op-batcher, and op-proposer together. All three would share the same semver
|
||||
if args.service == 'op-stack':
|
||||
print(GIT_TAG_COMMAND.format(tag=bumped.replace('op-stack', 'op-node'), message=args.message))
|
||||
print(GIT_PUSH_COMMAND.format(tag=bumped.replace('op-stack', 'op-node')))
|
||||
print(GIT_TAG_COMMAND.format(tag=bumped.replace('op-stack', 'op-batcher'), message=args.message))
|
||||
print(GIT_PUSH_COMMAND.format(tag=bumped.replace('op-stack', 'op-batcher')))
|
||||
print(GIT_TAG_COMMAND.format(tag=bumped.replace('op-stack', 'op-proposer'), message=args.message))
|
||||
print(GIT_PUSH_COMMAND.format(tag=bumped.replace('op-stack', 'op-proposer')))
|
||||
else:
|
||||
print(GIT_TAG_COMMAND.format(tag=bumped, message=args.message))
|
||||
print(GIT_PUSH_COMMAND.format(tag=bumped))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
3
proxyd/.gitignore
vendored
Normal file
3
proxyd/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
bin
|
||||
|
||||
config.toml
|
252
proxyd/CHANGELOG.md
Normal file
252
proxyd/CHANGELOG.md
Normal file
@ -0,0 +1,252 @@
|
||||
# @eth-optimism/proxyd
|
||||
|
||||
## 3.14.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 5602deec7: chore(deps): bump github.com/prometheus/client_golang from 1.11.0 to 1.11.1 in /proxyd
|
||||
- 6b3cf2070: Remove useless logging
|
||||
|
||||
## 3.14.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 9cc39bcfa: Add support for global method override rate limit
|
||||
- 30db32862: Include nonce in sender rate limit
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- b9bb1a98a: proxyd: Add req_id to log
|
||||
|
||||
## 3.13.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 6de891d3b: Add sender-based rate limiter
|
||||
|
||||
## 3.12.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- e9f2c701: Allow disabling backend rate limiter
|
||||
- ca45a85e: Support pattern matching in exempt origins/user agents
|
||||
- f4faa44c: adds server.log_level config
|
||||
|
||||
## 3.11.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- b3c5eeec: Fixed JSON-RPC 2.0 specification compliance by adding the optional data field on an RPCError
|
||||
- 01ae6625: Adds new Redis rate limiter
|
||||
|
||||
## 3.10.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 6bb35fd8: Add customizable whitelist error
|
||||
- 7121648c: Batch metrics and max batch size
|
||||
|
||||
## 3.10.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- b82a8f48: Add logging for origin and remote IP'
|
||||
- 1bf9559c: Carry over custom limit message in batches
|
||||
|
||||
## 3.10.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 157ccc84: Support per-method rate limiting
|
||||
|
||||
## 3.9.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- dc4f6a06: Add logging/metrics
|
||||
|
||||
## 3.9.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- b6f4bfcf: Add frontend rate limiting
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 406a4fce: Unwrap single RPC batches
|
||||
- 915f3b28: Parameterize full RPC request logging
|
||||
|
||||
## 3.8.9
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 063c55cf: Use canned response for eth_accounts
|
||||
|
||||
## 3.8.8
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 58dc7adc: Improve robustness against unexpected JSON-RPC from upstream
|
||||
- 552cd641: Fix concurrent write panic in WS
|
||||
|
||||
## 3.8.7
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 6f458607: Bump go-ethereum to 1.10.17
|
||||
|
||||
## 3.8.6
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- d79d40c4: proxyd: Proxy requests using batch JSON-RPC
|
||||
|
||||
## 3.8.5
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 2a062b11: proxyd: Log ssanitized RPC requests
|
||||
- d9f058ce: proxyd: Reduced RPC request logging
|
||||
- a4bfd9e7: proxyd: Limit the number of concurrent RPCs to backends
|
||||
|
||||
## 3.8.4
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 08329ba2: proxyd: Record redis cache operation latency
|
||||
- ae112021: proxyd: Request-scoped context for fast batch RPC short-circuiting
|
||||
|
||||
## 3.8.3
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 160f4c3d: Update docker image to use golang 1.18.0
|
||||
|
||||
## 3.8.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- ae18cea1: Don't hit Redis when the out of service interval is zero
|
||||
|
||||
## 3.8.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- acf7dbd5: Update to go-ethereum v1.10.16
|
||||
|
||||
## 3.8.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 527448bb: Handle nil responses better
|
||||
|
||||
## 3.7.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 3c2926b1: Add debug cache status header to proxyd responses
|
||||
|
||||
## 3.6.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 096c5f20: proxyd: Allow cached RPCs to be evicted by redis
|
||||
- 71d64834: Add caching for block-dependent RPCs
|
||||
- fd2e1523: proxyd: Cache block-dependent RPCs
|
||||
- 1760613c: Add integration tests and batching
|
||||
|
||||
## 3.5.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 025a3c0d: Add request/response payload size metrics to proxyd
|
||||
- daf8db0b: cache immutable RPC responses in proxyd
|
||||
- 8aa89bf3: Add X-Forwarded-For header when proxying RPCs on proxyd
|
||||
|
||||
## 3.4.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 415164e1: Force proxyd build
|
||||
|
||||
## 3.4.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 4b56ed84: Various proxyd fixes
|
||||
|
||||
## 3.3.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 7b7ffd2e: Allows string RPC ids on proxyd
|
||||
|
||||
## 3.2.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 73484138: Adds ability to specify env vars in config
|
||||
|
||||
## 3.1.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 1b79aa62: Release proxyd
|
||||
|
||||
## 3.1.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- b8802054: Trigger release of proxyd
|
||||
- 34fcb277: Bump proxyd to test release build workflow
|
||||
|
||||
## 3.1.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- da6138fd: Updated metrics, support local rate limiter
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 6c7f483b: Add support for additional SSL certificates in Docker container
|
||||
|
||||
## 3.0.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
- abe231bf: Make endpoints match Geth, better logging
|
||||
|
||||
## 2.0.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
- 6c50098b: Update metrics, support WS
|
||||
- f827dbda: Brings back the ability to selectively route RPC methods to backend groups
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- 8cc824e5: Updates proxyd to include additional error metrics.
|
||||
- 9ba4c5e0: Update metrics, support authenticated endpoints
|
||||
- 78d0f3f0: Put special errors in a dedicated metric, pass along the content-type header
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 6e6a55b1: Canary release
|
||||
|
||||
## 1.0.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- b9d2fbee: Trigger releases
|
||||
|
||||
## 1.0.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- 893623c9: Trigger patch releases for dockerhub
|
||||
|
||||
## 1.0.0
|
||||
|
||||
### Major Changes
|
||||
|
||||
- 28aabc41: Initial release of RPC proxy daemon
|
32
proxyd/Dockerfile
Normal file
32
proxyd/Dockerfile
Normal file
@ -0,0 +1,32 @@
|
||||
FROM golang:1.21.3-alpine3.18 as builder
|
||||
|
||||
ARG GITCOMMIT=docker
|
||||
ARG GITDATE=docker
|
||||
ARG GITVERSION=docker
|
||||
|
||||
RUN apk add make jq git gcc musl-dev linux-headers
|
||||
|
||||
COPY ./proxyd /app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN make proxyd
|
||||
|
||||
FROM alpine:3.18
|
||||
|
||||
RUN apk add bind-tools jq curl bash git redis
|
||||
|
||||
COPY ./proxyd/entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
chmod +x /bin/entrypoint.sh
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
VOLUME /etc/proxyd
|
||||
|
||||
COPY --from=builder /app/bin/proxyd /bin/proxyd
|
||||
|
||||
ENTRYPOINT ["/bin/entrypoint.sh"]
|
||||
CMD ["/bin/proxyd", "/etc/proxyd/proxyd.toml"]
|
3
proxyd/Dockerfile.ignore
Normal file
3
proxyd/Dockerfile.ignore
Normal file
@ -0,0 +1,3 @@
|
||||
# ignore everything but proxyd, proxyd defines all its dependencies in the go.mod
|
||||
*
|
||||
!/proxyd
|
25
proxyd/Makefile
Normal file
25
proxyd/Makefile
Normal file
@ -0,0 +1,25 @@
|
||||
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
|
||||
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
|
||||
LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION)
|
||||
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
|
||||
|
||||
proxyd:
|
||||
go build -v $(LDFLAGS) -o ./bin/proxyd ./cmd/proxyd
|
||||
.PHONY: proxyd
|
||||
|
||||
fmt:
|
||||
go mod tidy
|
||||
gofmt -w .
|
||||
.PHONY: fmt
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
.PHONY: test
|
||||
|
||||
lint:
|
||||
go vet ./...
|
||||
.PHONY: test
|
||||
|
||||
test-fallback:
|
||||
go test -v ./... -test.run ^TestFallback$
|
||||
.PHONY: test-fallback
|
146
proxyd/README.md
Normal file
146
proxyd/README.md
Normal file
@ -0,0 +1,146 @@
|
||||
# rpc-proxy
|
||||
|
||||
This tool implements `proxyd`, an RPC request router and proxy. It does the following things:
|
||||
|
||||
1. Whitelists RPC methods.
|
||||
2. Routes RPC methods to groups of backend services.
|
||||
3. Automatically retries failed backend requests.
|
||||
4. Track backend consensus (`latest`, `safe`, `finalized` blocks), peer count and sync state.
|
||||
5. Re-write requests and responses to enforce consensus.
|
||||
6. Load balance requests across backend services.
|
||||
7. Cache immutable responses from backends.
|
||||
8. Provides metrics to measure request latency, error rates, and the like.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Run `make proxyd` to build the binary. No additional dependencies are necessary.
|
||||
|
||||
To configure `proxyd` for use, you'll need to create a configuration file to define your proxy backends and routing rules. Check out [example.config.toml](./example.config.toml) for how to do this alongside a full list of all options with commentary.
|
||||
|
||||
Once you have a config file, start the daemon via `proxyd <path-to-config>.toml`.
|
||||
|
||||
|
||||
## Consensus awareness
|
||||
|
||||
Starting on v4.0.0, `proxyd` is aware of the consensus state of its backends. This helps minimize chain reorgs experienced by clients.
|
||||
|
||||
To enable this behavior, you must set `consensus_aware` value to `true` in the backend group.
|
||||
|
||||
When consensus awareness is enabled, `proxyd` will poll the backends for their states and resolve a consensus group based on:
|
||||
* the common ancestor `latest` block, i.e. if a backend is experiencing a fork, the fork won't be visible to the clients
|
||||
* the lowest `safe` block
|
||||
* the lowest `finalized` block
|
||||
* peer count
|
||||
* sync state
|
||||
|
||||
The backend group then acts as a round-robin load balancer distributing traffic equally across healthy backends in the consensus group, increasing the availability of the proxy.
|
||||
|
||||
A backend is considered healthy if it meets the following criteria:
|
||||
* not banned
|
||||
* avg 1-min moving window error rate ≤ configurable threshold
|
||||
* avg 1-min moving window latency ≤ configurable threshold
|
||||
* peer count ≥ configurable threshold
|
||||
* `latest` block lag ≤ configurable threshold
|
||||
* last state update ≤ configurable threshold
|
||||
* not currently syncing
|
||||
|
||||
When a backend is experiencing inconsistent consensus, high error rates or high latency,
|
||||
the backend will be banned for a configurable amount of time (default 5 minutes)
|
||||
and won't receive any traffic during this period.
|
||||
|
||||
|
||||
## Tag rewrite
|
||||
|
||||
When consensus awareness is enabled, `proxyd` will enforce the consensus state transparently for all the clients.
|
||||
|
||||
For example, if a client requests the `eth_getBlockByNumber` method with the `latest` tag,
|
||||
`proxyd` will rewrite the request to use the resolved latest block from the consensus group
|
||||
and forward it to the backend.
|
||||
|
||||
The following request methods are rewritten:
|
||||
* `eth_getLogs`
|
||||
* `eth_newFilter`
|
||||
* `eth_getBalance`
|
||||
* `eth_getCode`
|
||||
* `eth_getTransactionCount`
|
||||
* `eth_call`
|
||||
* `eth_getStorageAt`
|
||||
* `eth_getBlockTransactionCountByNumber`
|
||||
* `eth_getUncleCountByBlockNumber`
|
||||
* `eth_getBlockByNumber`
|
||||
* `eth_getTransactionByBlockNumberAndIndex`
|
||||
* `eth_getUncleByBlockNumberAndIndex`
|
||||
* `debug_getRawReceipts`
|
||||
|
||||
And `eth_blockNumber` response is overridden with current block consensus.
|
||||
|
||||
|
||||
## Cacheable methods
|
||||
|
||||
Cache use Redis and can be enabled for the following immutable methods:
|
||||
|
||||
* `eth_chainId`
|
||||
* `net_version`
|
||||
* `eth_getBlockTransactionCountByHash`
|
||||
* `eth_getUncleCountByBlockHash`
|
||||
* `eth_getBlockByHash`
|
||||
* `eth_getTransactionByBlockHashAndIndex`
|
||||
* `eth_getUncleByBlockHashAndIndex`
|
||||
* `debug_getRawReceipts` (block hash only)
|
||||
|
||||
## Meta method `consensus_getReceipts`
|
||||
|
||||
To support backends with different specifications in the same backend group,
|
||||
proxyd exposes a convenient method to fetch receipts abstracting away
|
||||
what specific backend will serve the request.
|
||||
|
||||
Each backend specifies their preferred method to fetch receipts with `consensus_receipts_target` config,
|
||||
which will be translated from `consensus_getReceipts`.
|
||||
|
||||
This method takes a `blockNumberOrHash` (i.e. `tag|qty|hash`)
|
||||
and returns the receipts for all transactions in the block.
|
||||
|
||||
Request example
|
||||
```json
|
||||
{
|
||||
"jsonrpc":"2.0",
|
||||
"id": 1,
|
||||
"params": ["0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"]
|
||||
}
|
||||
```
|
||||
|
||||
It currently supports translation to the following targets:
|
||||
* `debug_getRawReceipts(blockOrHash)` (default)
|
||||
* `alchemy_getTransactionReceipts(blockOrHash)`
|
||||
* `parity_getBlockReceipts(blockOrHash)`
|
||||
* `eth_getBlockReceipts(blockOrHash)`
|
||||
|
||||
The selected target is returned in the response, in a wrapped result.
|
||||
|
||||
Response example
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": {
|
||||
"method": "debug_getRawReceipts",
|
||||
"result": {
|
||||
// the actual raw result from backend
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
See [op-node receipt fetcher](https://github.com/ethereum-optimism/optimism/blob/186e46a47647a51a658e699e9ff047d39444c2de/op-node/sources/receipts.go#L186-L253).
|
||||
|
||||
|
||||
## Metrics
|
||||
|
||||
See `metrics.go` for a list of all available metrics.
|
||||
|
||||
The metrics port is configurable via the `metrics.port` and `metrics.host` keys in the config.
|
||||
|
||||
## Adding Backend SSL Certificates in Docker
|
||||
|
||||
The Docker image runs on Alpine Linux. If you get SSL errors when connecting to a backend within Docker, you may need to add additional certificates to Alpine's certificate store. To do this, bind mount the certificate bundle into a file in `/usr/local/share/ca-certificates`. The `entrypoint.sh` script will then update the store with whatever is in the `ca-certificates` directory prior to starting `proxyd`.
|
1272
proxyd/backend.go
Normal file
1272
proxyd/backend.go
Normal file
File diff suppressed because it is too large
Load Diff
22
proxyd/backend_test.go
Normal file
22
proxyd/backend_test.go
Normal file
@ -0,0 +1,22 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStripXFF(t *testing.T) {
|
||||
tests := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{"1.2.3, 4.5.6, 7.8.9", "1.2.3"},
|
||||
{"1.2.3,4.5.6", "1.2.3"},
|
||||
{" 1.2.3 , 4.5.6 ", "1.2.3"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := stripXFF(test.in)
|
||||
assert.Equal(t, test.out, actual)
|
||||
}
|
||||
}
|
192
proxyd/cache.go
Normal file
192
proxyd/cache.go
Normal file
@ -0,0 +1,192 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
type Cache interface {
|
||||
Get(ctx context.Context, key string) (string, error)
|
||||
Put(ctx context.Context, key string, value string) error
|
||||
}
|
||||
|
||||
const (
|
||||
// assuming an average RPCRes size of 3 KB
|
||||
memoryCacheLimit = 4096
|
||||
)
|
||||
|
||||
type cache struct {
|
||||
lru *lru.Cache
|
||||
}
|
||||
|
||||
func newMemoryCache() *cache {
|
||||
rep, _ := lru.New(memoryCacheLimit)
|
||||
return &cache{rep}
|
||||
}
|
||||
|
||||
func (c *cache) Get(ctx context.Context, key string) (string, error) {
|
||||
if val, ok := c.lru.Get(key); ok {
|
||||
return val.(string), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (c *cache) Put(ctx context.Context, key string, value string) error {
|
||||
c.lru.Add(key, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type redisCache struct {
|
||||
rdb *redis.Client
|
||||
prefix string
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
func newRedisCache(rdb *redis.Client, prefix string, ttl time.Duration) *redisCache {
|
||||
return &redisCache{rdb, prefix, ttl}
|
||||
}
|
||||
|
||||
func (c *redisCache) namespaced(key string) string {
|
||||
if c.prefix == "" {
|
||||
return key
|
||||
}
|
||||
return strings.Join([]string{c.prefix, key}, ":")
|
||||
}
|
||||
|
||||
func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
|
||||
start := time.Now()
|
||||
val, err := c.rdb.Get(ctx, c.namespaced(key)).Result()
|
||||
redisCacheDurationSumm.WithLabelValues("GET").Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
if err == redis.Nil {
|
||||
return "", nil
|
||||
} else if err != nil {
|
||||
RecordRedisError("CacheGet")
|
||||
return "", err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *redisCache) Put(ctx context.Context, key string, value string) error {
|
||||
start := time.Now()
|
||||
err := c.rdb.SetEx(ctx, c.namespaced(key), value, c.ttl).Err()
|
||||
redisCacheDurationSumm.WithLabelValues("SETEX").Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
if err != nil {
|
||||
RecordRedisError("CacheSet")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type cacheWithCompression struct {
|
||||
cache Cache
|
||||
}
|
||||
|
||||
func newCacheWithCompression(cache Cache) *cacheWithCompression {
|
||||
return &cacheWithCompression{cache}
|
||||
}
|
||||
|
||||
func (c *cacheWithCompression) Get(ctx context.Context, key string) (string, error) {
|
||||
encodedVal, err := c.cache.Get(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if encodedVal == "" {
|
||||
return "", nil
|
||||
}
|
||||
val, err := snappy.Decode(nil, []byte(encodedVal))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(val), nil
|
||||
}
|
||||
|
||||
func (c *cacheWithCompression) Put(ctx context.Context, key string, value string) error {
|
||||
encodedVal := snappy.Encode(nil, []byte(value))
|
||||
return c.cache.Put(ctx, key, string(encodedVal))
|
||||
}
|
||||
|
||||
type RPCCache interface {
|
||||
GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error)
|
||||
PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error
|
||||
}
|
||||
|
||||
type rpcCache struct {
|
||||
cache Cache
|
||||
handlers map[string]RPCMethodHandler
|
||||
}
|
||||
|
||||
func newRPCCache(cache Cache) RPCCache {
|
||||
staticHandler := &StaticMethodHandler{cache: cache}
|
||||
debugGetRawReceiptsHandler := &StaticMethodHandler{cache: cache,
|
||||
filterGet: func(req *RPCReq) bool {
|
||||
// cache only if the request is for a block hash
|
||||
|
||||
var p []rpc.BlockNumberOrHash
|
||||
err := json.Unmarshal(req.Params, &p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if len(p) != 1 {
|
||||
return false
|
||||
}
|
||||
return p[0].BlockHash != nil
|
||||
},
|
||||
filterPut: func(req *RPCReq, res *RPCRes) bool {
|
||||
// don't cache if response contains 0 receipts
|
||||
rawReceipts, ok := res.Result.([]interface{})
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return len(rawReceipts) > 0
|
||||
},
|
||||
}
|
||||
handlers := map[string]RPCMethodHandler{
|
||||
"eth_chainId": staticHandler,
|
||||
"net_version": staticHandler,
|
||||
"eth_getBlockTransactionCountByHash": staticHandler,
|
||||
"eth_getUncleCountByBlockHash": staticHandler,
|
||||
"eth_getBlockByHash": staticHandler,
|
||||
"eth_getTransactionByBlockHashAndIndex": staticHandler,
|
||||
"eth_getUncleByBlockHashAndIndex": staticHandler,
|
||||
"debug_getRawReceipts": debugGetRawReceiptsHandler,
|
||||
}
|
||||
return &rpcCache{
|
||||
cache: cache,
|
||||
handlers: handlers,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
handler := c.handlers[req.Method]
|
||||
if handler == nil {
|
||||
return nil, nil
|
||||
}
|
||||
res, err := handler.GetRPCMethod(ctx, req)
|
||||
if err != nil {
|
||||
RecordCacheError(req.Method)
|
||||
return nil, err
|
||||
}
|
||||
if res == nil {
|
||||
RecordCacheMiss(req.Method)
|
||||
} else {
|
||||
RecordCacheHit(req.Method)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error {
|
||||
handler := c.handlers[req.Method]
|
||||
if handler == nil {
|
||||
return nil
|
||||
}
|
||||
return handler.PutRPCMethod(ctx, req, res)
|
||||
}
|
213
proxyd/cache_test.go
Normal file
213
proxyd/cache_test.go
Normal file
@ -0,0 +1,213 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRPCCacheImmutableRPCs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cache := newRPCCache(newMemoryCache())
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
rpcs := []struct {
|
||||
req *RPCReq
|
||||
res *RPCRes
|
||||
name string
|
||||
}{
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_chainId",
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: "0xff",
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_chainId",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "net_version",
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: "9999",
|
||||
ID: ID,
|
||||
},
|
||||
name: "net_version",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockTransactionCountByHash",
|
||||
Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"eth_getBlockTransactionCountByHash":"!"}`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_getBlockTransactionCountByHash",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getUncleCountByBlockHash",
|
||||
Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"eth_getUncleCountByBlockHash":"!"}`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_getUncleCountByBlockHash",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByHash",
|
||||
Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"eth_getBlockByHash":"!"}`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_getBlockByHash",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getUncleByBlockHashAndIndex",
|
||||
Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", "0x90"}),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: `{"eth_getUncleByBlockHashAndIndex":"!"}`,
|
||||
ID: ID,
|
||||
},
|
||||
name: "eth_getUncleByBlockHashAndIndex",
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "debug_getRawReceipts",
|
||||
Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"}),
|
||||
ID: ID,
|
||||
},
|
||||
res: &RPCRes{
|
||||
JSONRPC: "2.0",
|
||||
Result: []interface{}{"a"},
|
||||
ID: ID,
|
||||
},
|
||||
name: "debug_getRawReceipts",
|
||||
},
|
||||
}
|
||||
|
||||
for _, rpc := range rpcs {
|
||||
t.Run(rpc.name, func(t *testing.T) {
|
||||
err := cache.PutRPC(ctx, rpc.req, rpc.res)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, rpc.req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rpc.res, cachedRes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCCacheUnsupportedMethod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cache := newRPCCache(newMemoryCache())
|
||||
ID := []byte(strconv.Itoa(1))
|
||||
|
||||
rpcs := []struct {
|
||||
req *RPCReq
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "eth_syncing",
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_syncing",
|
||||
ID: ID,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_blockNumber",
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_blockNumber",
|
||||
ID: ID,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getBlockByNumber",
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
ID: ID,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getBlockRange",
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_getBlockRange",
|
||||
ID: ID,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_gasPrice",
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_gasPrice",
|
||||
ID: ID,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_call",
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "eth_call",
|
||||
ID: ID,
|
||||
},
|
||||
},
|
||||
{
|
||||
req: &RPCReq{
|
||||
JSONRPC: "2.0",
|
||||
Method: "debug_getRawReceipts",
|
||||
Params: mustMarshalJSON([]string{"0x100"}),
|
||||
ID: ID,
|
||||
},
|
||||
name: "debug_getRawReceipts",
|
||||
},
|
||||
}
|
||||
|
||||
for _, rpc := range rpcs {
|
||||
t.Run(rpc.name, func(t *testing.T) {
|
||||
fakeval := mustMarshalJSON([]string{rpc.name})
|
||||
err := cache.PutRPC(ctx, rpc.req, &RPCRes{Result: fakeval})
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedRes, err := cache.GetRPC(ctx, rpc.req)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, cachedRes)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
122
proxyd/cmd/proxyd/main.go
Normal file
122
proxyd/cmd/proxyd/main.go
Normal file
@ -0,0 +1,122 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"golang.org/x/exp/slog"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
)
|
||||
|
||||
var (
|
||||
GitVersion = ""
|
||||
GitCommit = ""
|
||||
GitDate = ""
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set up logger with a default INFO level in case we fail to parse flags.
|
||||
// Otherwise the final critical log won't show what the parsing error was.
|
||||
proxyd.SetLogLevel(slog.LevelInfo)
|
||||
|
||||
log.Info("starting proxyd", "version", GitVersion, "commit", GitCommit, "date", GitDate)
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
log.Crit("must specify a config file on the command line")
|
||||
}
|
||||
|
||||
config := new(proxyd.Config)
|
||||
if _, err := toml.DecodeFile(os.Args[1], config); err != nil {
|
||||
log.Crit("error reading config file", "err", err)
|
||||
}
|
||||
|
||||
// update log level from config
|
||||
logLevel, err := LevelFromString(config.Server.LogLevel)
|
||||
if err != nil {
|
||||
logLevel = log.LevelInfo
|
||||
if config.Server.LogLevel != "" {
|
||||
log.Warn("invalid server.log_level set: " + config.Server.LogLevel)
|
||||
}
|
||||
}
|
||||
proxyd.SetLogLevel(logLevel)
|
||||
|
||||
if config.Server.EnablePprof {
|
||||
log.Info("starting pprof", "addr", "0.0.0.0", "port", "6060")
|
||||
pprofSrv := StartPProf("0.0.0.0", 6060)
|
||||
log.Info("started pprof server", "addr", pprofSrv.Addr)
|
||||
defer func() {
|
||||
if err := pprofSrv.Close(); err != nil {
|
||||
log.Error("failed to stop pprof server", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
if err != nil {
|
||||
log.Crit("error starting proxyd", "err", err)
|
||||
}
|
||||
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
|
||||
recvSig := <-sig
|
||||
log.Info("caught signal, shutting down", "signal", recvSig)
|
||||
shutdown()
|
||||
}
|
||||
|
||||
// LevelFromString returns the appropriate Level from a string name.
|
||||
// Useful for parsing command line args and configuration files.
|
||||
// It also converts strings to lowercase.
|
||||
// Note: copied from op-service/log to avoid monorepo dependency
|
||||
func LevelFromString(lvlString string) (slog.Level, error) {
|
||||
lvlString = strings.ToLower(lvlString) // ignore case
|
||||
switch lvlString {
|
||||
case "trace", "trce":
|
||||
return log.LevelTrace, nil
|
||||
case "debug", "dbug":
|
||||
return log.LevelDebug, nil
|
||||
case "info":
|
||||
return log.LevelInfo, nil
|
||||
case "warn":
|
||||
return log.LevelWarn, nil
|
||||
case "error", "eror":
|
||||
return log.LevelError, nil
|
||||
case "crit":
|
||||
return log.LevelCrit, nil
|
||||
default:
|
||||
return log.LevelDebug, fmt.Errorf("unknown level: %v", lvlString)
|
||||
}
|
||||
}
|
||||
|
||||
func StartPProf(hostname string, port int) *http.Server {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// have to do below to support multiple servers, since the
|
||||
// pprof import only uses DefaultServeMux
|
||||
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
|
||||
mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
|
||||
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
|
||||
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
|
||||
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
|
||||
|
||||
addr := net.JoinHostPort(hostname, strconv.Itoa(port))
|
||||
srv := &http.Server{
|
||||
Handler: mux,
|
||||
Addr: addr,
|
||||
}
|
||||
|
||||
// nolint:errcheck
|
||||
go srv.ListenAndServe()
|
||||
|
||||
return srv
|
||||
}
|
184
proxyd/config.go
Normal file
184
proxyd/config.go
Normal file
@ -0,0 +1,184 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ServerConfig struct {
|
||||
RPCHost string `toml:"rpc_host"`
|
||||
RPCPort int `toml:"rpc_port"`
|
||||
WSHost string `toml:"ws_host"`
|
||||
WSPort int `toml:"ws_port"`
|
||||
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
|
||||
MaxConcurrentRPCs int64 `toml:"max_concurrent_rpcs"`
|
||||
LogLevel string `toml:"log_level"`
|
||||
|
||||
// TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections
|
||||
TimeoutSeconds int `toml:"timeout_seconds"`
|
||||
|
||||
MaxUpstreamBatchSize int `toml:"max_upstream_batch_size"`
|
||||
|
||||
EnableRequestLog bool `toml:"enable_request_log"`
|
||||
MaxRequestBodyLogLen int `toml:"max_request_body_log_len"`
|
||||
EnablePprof bool `toml:"enable_pprof"`
|
||||
EnableXServedByHeader bool `toml:"enable_served_by_header"`
|
||||
AllowAllOrigins bool `toml:"allow_all_origins"`
|
||||
}
|
||||
|
||||
type CacheConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
TTL TOMLDuration `toml:"ttl"`
|
||||
}
|
||||
|
||||
type RedisConfig struct {
|
||||
URL string `toml:"url"`
|
||||
Namespace string `toml:"namespace"`
|
||||
}
|
||||
|
||||
type MetricsConfig struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
Host string `toml:"host"`
|
||||
Port int `toml:"port"`
|
||||
}
|
||||
|
||||
type RateLimitConfig struct {
|
||||
UseRedis bool `toml:"use_redis"`
|
||||
BaseRate int `toml:"base_rate"`
|
||||
BaseInterval TOMLDuration `toml:"base_interval"`
|
||||
ExemptOrigins []string `toml:"exempt_origins"`
|
||||
ExemptUserAgents []string `toml:"exempt_user_agents"`
|
||||
ErrorMessage string `toml:"error_message"`
|
||||
MethodOverrides map[string]*RateLimitMethodOverride `toml:"method_overrides"`
|
||||
IPHeaderOverride string `toml:"ip_header_override"`
|
||||
}
|
||||
|
||||
type RateLimitMethodOverride struct {
|
||||
Limit int `toml:"limit"`
|
||||
Interval TOMLDuration `toml:"interval"`
|
||||
Global bool `toml:"global"`
|
||||
}
|
||||
|
||||
type TOMLDuration time.Duration
|
||||
|
||||
func (t *TOMLDuration) UnmarshalText(b []byte) error {
|
||||
d, err := time.ParseDuration(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = TOMLDuration(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
type BackendOptions struct {
|
||||
ResponseTimeoutSeconds int `toml:"response_timeout_seconds"`
|
||||
MaxResponseSizeBytes int64 `toml:"max_response_size_bytes"`
|
||||
MaxRetries int `toml:"max_retries"`
|
||||
OutOfServiceSeconds int `toml:"out_of_service_seconds"`
|
||||
MaxDegradedLatencyThreshold TOMLDuration `toml:"max_degraded_latency_threshold"`
|
||||
MaxLatencyThreshold TOMLDuration `toml:"max_latency_threshold"`
|
||||
MaxErrorRateThreshold float64 `toml:"max_error_rate_threshold"`
|
||||
}
|
||||
|
||||
type BackendConfig struct {
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
RPCURL string `toml:"rpc_url"`
|
||||
WSURL string `toml:"ws_url"`
|
||||
WSPort int `toml:"ws_port"`
|
||||
MaxRPS int `toml:"max_rps"`
|
||||
MaxWSConns int `toml:"max_ws_conns"`
|
||||
CAFile string `toml:"ca_file"`
|
||||
ClientCertFile string `toml:"client_cert_file"`
|
||||
ClientKeyFile string `toml:"client_key_file"`
|
||||
StripTrailingXFF bool `toml:"strip_trailing_xff"`
|
||||
Headers map[string]string `toml:"headers"`
|
||||
|
||||
Weight int `toml:"weight"`
|
||||
|
||||
ConsensusSkipPeerCountCheck bool `toml:"consensus_skip_peer_count"`
|
||||
ConsensusForcedCandidate bool `toml:"consensus_forced_candidate"`
|
||||
ConsensusReceiptsTarget string `toml:"consensus_receipts_target"`
|
||||
}
|
||||
|
||||
type BackendsConfig map[string]*BackendConfig
|
||||
|
||||
type BackendGroupConfig struct {
|
||||
Backends []string `toml:"backends"`
|
||||
|
||||
WeightedRouting bool `toml:"weighted_routing"`
|
||||
|
||||
ConsensusAware bool `toml:"consensus_aware"`
|
||||
ConsensusAsyncHandler string `toml:"consensus_handler"`
|
||||
ConsensusPollerInterval TOMLDuration `toml:"consensus_poller_interval"`
|
||||
|
||||
ConsensusBanPeriod TOMLDuration `toml:"consensus_ban_period"`
|
||||
ConsensusMaxUpdateThreshold TOMLDuration `toml:"consensus_max_update_threshold"`
|
||||
ConsensusMaxBlockLag uint64 `toml:"consensus_max_block_lag"`
|
||||
ConsensusMaxBlockRange uint64 `toml:"consensus_max_block_range"`
|
||||
ConsensusMinPeerCount int `toml:"consensus_min_peer_count"`
|
||||
|
||||
ConsensusHA bool `toml:"consensus_ha"`
|
||||
ConsensusHAHeartbeatInterval TOMLDuration `toml:"consensus_ha_heartbeat_interval"`
|
||||
ConsensusHALockPeriod TOMLDuration `toml:"consensus_ha_lock_period"`
|
||||
ConsensusHARedis RedisConfig `toml:"consensus_ha_redis"`
|
||||
|
||||
Fallbacks []string `toml:"fallbacks"`
|
||||
}
|
||||
|
||||
type BackendGroupsConfig map[string]*BackendGroupConfig
|
||||
|
||||
type MethodMappingsConfig map[string]string
|
||||
|
||||
type BatchConfig struct {
|
||||
MaxSize int `toml:"max_size"`
|
||||
ErrorMessage string `toml:"error_message"`
|
||||
}
|
||||
|
||||
// SenderRateLimitConfig configures the sender-based rate limiter
|
||||
// for eth_sendRawTransaction requests.
|
||||
// To enable pre-eip155 transactions, add '0' to allowed_chain_ids.
|
||||
type SenderRateLimitConfig struct {
|
||||
Enabled bool
|
||||
Interval TOMLDuration
|
||||
Limit int
|
||||
AllowedChainIds []*big.Int `toml:"allowed_chain_ids"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
WSBackendGroup string `toml:"ws_backend_group"`
|
||||
Server ServerConfig `toml:"server"`
|
||||
Cache CacheConfig `toml:"cache"`
|
||||
Redis RedisConfig `toml:"redis"`
|
||||
Metrics MetricsConfig `toml:"metrics"`
|
||||
RateLimit RateLimitConfig `toml:"rate_limit"`
|
||||
BackendOptions BackendOptions `toml:"backend"`
|
||||
Backends BackendsConfig `toml:"backends"`
|
||||
BatchConfig BatchConfig `toml:"batch"`
|
||||
Authentication map[string]string `toml:"authentication"`
|
||||
BackendGroups BackendGroupsConfig `toml:"backend_groups"`
|
||||
RPCMethodMappings map[string]string `toml:"rpc_method_mappings"`
|
||||
WSMethodWhitelist []string `toml:"ws_method_whitelist"`
|
||||
WhitelistErrorMessage string `toml:"whitelist_error_message"`
|
||||
SenderRateLimit SenderRateLimitConfig `toml:"sender_rate_limit"`
|
||||
}
|
||||
|
||||
func ReadFromEnvOrConfig(value string) (string, error) {
|
||||
if strings.HasPrefix(value, "$") {
|
||||
envValue := os.Getenv(strings.TrimPrefix(value, "$"))
|
||||
if envValue == "" {
|
||||
return "", fmt.Errorf("config env var %s not found", value)
|
||||
}
|
||||
return envValue, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(value, "\\") {
|
||||
return strings.TrimPrefix(value, "\\"), nil
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
746
proxyd/consensus_poller.go
Normal file
746
proxyd/consensus_poller.go
Normal file
@ -0,0 +1,746 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPollerInterval = 1 * time.Second
|
||||
)
|
||||
|
||||
type OnConsensusBroken func()
|
||||
|
||||
// ConsensusPoller checks the consensus state for each member of a BackendGroup
|
||||
// resolves the highest common block for multiple nodes, and reconciles the consensus
|
||||
// in case of block hash divergence to minimize re-orgs
|
||||
type ConsensusPoller struct {
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
listeners []OnConsensusBroken
|
||||
|
||||
backendGroup *BackendGroup
|
||||
backendState map[*Backend]*backendState
|
||||
consensusGroupMux sync.Mutex
|
||||
consensusGroup []*Backend
|
||||
|
||||
tracker ConsensusTracker
|
||||
asyncHandler ConsensusAsyncHandler
|
||||
|
||||
minPeerCount uint64
|
||||
banPeriod time.Duration
|
||||
maxUpdateThreshold time.Duration
|
||||
maxBlockLag uint64
|
||||
maxBlockRange uint64
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
type backendState struct {
|
||||
backendStateMux sync.Mutex
|
||||
|
||||
latestBlockNumber hexutil.Uint64
|
||||
latestBlockHash string
|
||||
safeBlockNumber hexutil.Uint64
|
||||
finalizedBlockNumber hexutil.Uint64
|
||||
|
||||
peerCount uint64
|
||||
inSync bool
|
||||
|
||||
lastUpdate time.Time
|
||||
|
||||
bannedUntil time.Time
|
||||
}
|
||||
|
||||
func (bs *backendState) IsBanned() bool {
|
||||
return time.Now().Before(bs.bannedUntil)
|
||||
}
|
||||
|
||||
// GetConsensusGroup returns the backend members that are agreeing in a consensus
|
||||
func (cp *ConsensusPoller) GetConsensusGroup() []*Backend {
|
||||
defer cp.consensusGroupMux.Unlock()
|
||||
cp.consensusGroupMux.Lock()
|
||||
|
||||
g := make([]*Backend, len(cp.consensusGroup))
|
||||
copy(g, cp.consensusGroup)
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
// GetLatestBlockNumber returns the `latest` agreed block number in a consensus
|
||||
func (ct *ConsensusPoller) GetLatestBlockNumber() hexutil.Uint64 {
|
||||
return ct.tracker.GetLatestBlockNumber()
|
||||
}
|
||||
|
||||
// GetSafeBlockNumber returns the `safe` agreed block number in a consensus
|
||||
func (ct *ConsensusPoller) GetSafeBlockNumber() hexutil.Uint64 {
|
||||
return ct.tracker.GetSafeBlockNumber()
|
||||
}
|
||||
|
||||
// GetFinalizedBlockNumber returns the `finalized` agreed block number in a consensus
|
||||
func (ct *ConsensusPoller) GetFinalizedBlockNumber() hexutil.Uint64 {
|
||||
return ct.tracker.GetFinalizedBlockNumber()
|
||||
}
|
||||
|
||||
func (cp *ConsensusPoller) Shutdown() {
|
||||
cp.asyncHandler.Shutdown()
|
||||
}
|
||||
|
||||
// ConsensusAsyncHandler controls the asynchronous polling mechanism, interval and shutdown
|
||||
type ConsensusAsyncHandler interface {
|
||||
Init()
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// NoopAsyncHandler allows fine control updating the consensus
|
||||
type NoopAsyncHandler struct{}
|
||||
|
||||
func NewNoopAsyncHandler() ConsensusAsyncHandler {
|
||||
log.Warn("using NewNoopAsyncHandler")
|
||||
return &NoopAsyncHandler{}
|
||||
}
|
||||
func (ah *NoopAsyncHandler) Init() {}
|
||||
func (ah *NoopAsyncHandler) Shutdown() {}
|
||||
|
||||
// PollerAsyncHandler asynchronously updates each individual backend and the group consensus
|
||||
type PollerAsyncHandler struct {
|
||||
ctx context.Context
|
||||
cp *ConsensusPoller
|
||||
}
|
||||
|
||||
func NewPollerAsyncHandler(ctx context.Context, cp *ConsensusPoller) ConsensusAsyncHandler {
|
||||
return &PollerAsyncHandler{
|
||||
ctx: ctx,
|
||||
cp: cp,
|
||||
}
|
||||
}
|
||||
func (ah *PollerAsyncHandler) Init() {
|
||||
// create the individual backend pollers.
|
||||
log.Info("total number of primary candidates", "primaries", len(ah.cp.backendGroup.Primaries()))
|
||||
log.Info("total number of fallback candidates", "fallbacks", len(ah.cp.backendGroup.Fallbacks()))
|
||||
|
||||
for _, be := range ah.cp.backendGroup.Primaries() {
|
||||
go func(be *Backend) {
|
||||
for {
|
||||
timer := time.NewTimer(ah.cp.interval)
|
||||
ah.cp.UpdateBackend(ah.ctx, be)
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-ah.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}(be)
|
||||
}
|
||||
|
||||
for _, be := range ah.cp.backendGroup.Fallbacks() {
|
||||
go func(be *Backend) {
|
||||
for {
|
||||
timer := time.NewTimer(ah.cp.interval)
|
||||
|
||||
healthyCandidates := ah.cp.FilterCandidates(ah.cp.backendGroup.Primaries())
|
||||
|
||||
log.Info("number of healthy primary candidates", "healthy_candidates", len(healthyCandidates))
|
||||
if len(healthyCandidates) == 0 {
|
||||
log.Debug("zero healthy candidates, querying fallback backend",
|
||||
"backend_name", be.Name)
|
||||
ah.cp.UpdateBackend(ah.ctx, be)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-ah.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}(be)
|
||||
}
|
||||
|
||||
// create the group consensus poller
|
||||
go func() {
|
||||
for {
|
||||
timer := time.NewTimer(ah.cp.interval)
|
||||
log.Info("updating backend group consensus")
|
||||
ah.cp.UpdateBackendGroupConsensus(ah.ctx)
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-ah.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
func (ah *PollerAsyncHandler) Shutdown() {
|
||||
ah.cp.cancelFunc()
|
||||
}
|
||||
|
||||
type ConsensusOpt func(cp *ConsensusPoller)
|
||||
|
||||
func WithTracker(tracker ConsensusTracker) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.tracker = tracker
|
||||
}
|
||||
}
|
||||
|
||||
func WithAsyncHandler(asyncHandler ConsensusAsyncHandler) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.asyncHandler = asyncHandler
|
||||
}
|
||||
}
|
||||
|
||||
func WithListener(listener OnConsensusBroken) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.AddListener(listener)
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ConsensusPoller) AddListener(listener OnConsensusBroken) {
|
||||
cp.listeners = append(cp.listeners, listener)
|
||||
}
|
||||
|
||||
func (cp *ConsensusPoller) ClearListeners() {
|
||||
cp.listeners = []OnConsensusBroken{}
|
||||
}
|
||||
|
||||
func WithBanPeriod(banPeriod time.Duration) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.banPeriod = banPeriod
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxUpdateThreshold(maxUpdateThreshold time.Duration) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.maxUpdateThreshold = maxUpdateThreshold
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxBlockLag(maxBlockLag uint64) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.maxBlockLag = maxBlockLag
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxBlockRange(maxBlockRange uint64) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.maxBlockRange = maxBlockRange
|
||||
}
|
||||
}
|
||||
|
||||
func WithMinPeerCount(minPeerCount uint64) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.minPeerCount = minPeerCount
|
||||
}
|
||||
}
|
||||
|
||||
func WithPollerInterval(interval time.Duration) ConsensusOpt {
|
||||
return func(cp *ConsensusPoller) {
|
||||
cp.interval = interval
|
||||
}
|
||||
}
|
||||
|
||||
func NewConsensusPoller(bg *BackendGroup, opts ...ConsensusOpt) *ConsensusPoller {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
state := make(map[*Backend]*backendState, len(bg.Backends))
|
||||
|
||||
cp := &ConsensusPoller{
|
||||
ctx: ctx,
|
||||
cancelFunc: cancelFunc,
|
||||
backendGroup: bg,
|
||||
backendState: state,
|
||||
|
||||
banPeriod: 5 * time.Minute,
|
||||
maxUpdateThreshold: 30 * time.Second,
|
||||
maxBlockLag: 8, // 8*12 seconds = 96 seconds ~ 1.6 minutes
|
||||
minPeerCount: 3,
|
||||
interval: DefaultPollerInterval,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(cp)
|
||||
}
|
||||
|
||||
if cp.tracker == nil {
|
||||
cp.tracker = NewInMemoryConsensusTracker()
|
||||
}
|
||||
|
||||
if cp.asyncHandler == nil {
|
||||
cp.asyncHandler = NewPollerAsyncHandler(ctx, cp)
|
||||
}
|
||||
|
||||
cp.Reset()
|
||||
cp.asyncHandler.Init()
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// UpdateBackend refreshes the consensus state of a single backend
|
||||
func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) {
|
||||
bs := cp.getBackendState(be)
|
||||
RecordConsensusBackendBanned(be, bs.IsBanned())
|
||||
|
||||
if bs.IsBanned() {
|
||||
log.Debug("skipping backend - banned", "backend", be.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// if backend is not healthy state we'll only resume checking it after ban
|
||||
if !be.IsHealthy() && !be.forcedCandidate {
|
||||
log.Warn("backend banned - not healthy", "backend", be.Name)
|
||||
cp.Ban(be)
|
||||
return
|
||||
}
|
||||
|
||||
inSync, err := cp.isInSync(ctx, be)
|
||||
RecordConsensusBackendInSync(be, err == nil && inSync)
|
||||
if err != nil {
|
||||
log.Warn("error updating backend sync state", "name", be.Name, "err", err)
|
||||
}
|
||||
|
||||
var peerCount uint64
|
||||
if !be.skipPeerCountCheck {
|
||||
peerCount, err = cp.getPeerCount(ctx, be)
|
||||
if err != nil {
|
||||
log.Warn("error updating backend peer count", "name", be.Name, "err", err)
|
||||
}
|
||||
RecordConsensusBackendPeerCount(be, peerCount)
|
||||
}
|
||||
|
||||
latestBlockNumber, latestBlockHash, err := cp.fetchBlock(ctx, be, "latest")
|
||||
if err != nil {
|
||||
log.Warn("error updating backend - latest block", "name", be.Name, "err", err)
|
||||
}
|
||||
|
||||
safeBlockNumber, _, err := cp.fetchBlock(ctx, be, "safe")
|
||||
if err != nil {
|
||||
log.Warn("error updating backend - safe block", "name", be.Name, "err", err)
|
||||
}
|
||||
|
||||
finalizedBlockNumber, _, err := cp.fetchBlock(ctx, be, "finalized")
|
||||
if err != nil {
|
||||
log.Warn("error updating backend - finalized block", "name", be.Name, "err", err)
|
||||
}
|
||||
|
||||
RecordConsensusBackendUpdateDelay(be, bs.lastUpdate)
|
||||
|
||||
changed := cp.setBackendState(be, peerCount, inSync,
|
||||
latestBlockNumber, latestBlockHash,
|
||||
safeBlockNumber, finalizedBlockNumber)
|
||||
|
||||
RecordBackendLatestBlock(be, latestBlockNumber)
|
||||
RecordBackendSafeBlock(be, safeBlockNumber)
|
||||
RecordBackendFinalizedBlock(be, finalizedBlockNumber)
|
||||
|
||||
if changed {
|
||||
log.Debug("backend state updated",
|
||||
"name", be.Name,
|
||||
"peerCount", peerCount,
|
||||
"inSync", inSync,
|
||||
"latestBlockNumber", latestBlockNumber,
|
||||
"latestBlockHash", latestBlockHash,
|
||||
"safeBlockNumber", safeBlockNumber,
|
||||
"finalizedBlockNumber", finalizedBlockNumber,
|
||||
"lastUpdate", bs.lastUpdate)
|
||||
}
|
||||
|
||||
// sanity check for latest, safe and finalized block tags
|
||||
expectedBlockTags := cp.checkExpectedBlockTags(
|
||||
latestBlockNumber,
|
||||
bs.safeBlockNumber, safeBlockNumber,
|
||||
bs.finalizedBlockNumber, finalizedBlockNumber)
|
||||
|
||||
RecordBackendUnexpectedBlockTags(be, !expectedBlockTags)
|
||||
|
||||
if !expectedBlockTags && !be.forcedCandidate {
|
||||
log.Warn("backend banned - unexpected block tags",
|
||||
"backend", be.Name,
|
||||
"oldFinalized", bs.finalizedBlockNumber,
|
||||
"finalizedBlockNumber", finalizedBlockNumber,
|
||||
"oldSafe", bs.safeBlockNumber,
|
||||
"safeBlockNumber", safeBlockNumber,
|
||||
"latestBlockNumber", latestBlockNumber,
|
||||
)
|
||||
cp.Ban(be)
|
||||
}
|
||||
}
|
||||
|
||||
// checkExpectedBlockTags for unexpected conditions on block tags
|
||||
// - finalized block number should never decrease
|
||||
// - safe block number should never decrease
|
||||
// - finalized block should be <= safe block <= latest block
|
||||
func (cp *ConsensusPoller) checkExpectedBlockTags(
|
||||
currentLatest hexutil.Uint64,
|
||||
oldSafe hexutil.Uint64, currentSafe hexutil.Uint64,
|
||||
oldFinalized hexutil.Uint64, currentFinalized hexutil.Uint64) bool {
|
||||
return currentFinalized >= oldFinalized &&
|
||||
currentSafe >= oldSafe &&
|
||||
currentFinalized <= currentSafe &&
|
||||
currentSafe <= currentLatest
|
||||
}
|
||||
|
||||
// UpdateBackendGroupConsensus resolves the current group consensus based on the state of the backends
|
||||
func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) {
|
||||
// get the latest block number from the tracker
|
||||
currentConsensusBlockNumber := cp.GetLatestBlockNumber()
|
||||
|
||||
// get the candidates for the consensus group
|
||||
candidates := cp.getConsensusCandidates()
|
||||
|
||||
// update the lowest latest block number and hash
|
||||
// the lowest safe block number
|
||||
// the lowest finalized block number
|
||||
var lowestLatestBlock hexutil.Uint64
|
||||
var lowestLatestBlockHash string
|
||||
var lowestFinalizedBlock hexutil.Uint64
|
||||
var lowestSafeBlock hexutil.Uint64
|
||||
for _, bs := range candidates {
|
||||
if lowestLatestBlock == 0 || bs.latestBlockNumber < lowestLatestBlock {
|
||||
lowestLatestBlock = bs.latestBlockNumber
|
||||
lowestLatestBlockHash = bs.latestBlockHash
|
||||
}
|
||||
if lowestFinalizedBlock == 0 || bs.finalizedBlockNumber < lowestFinalizedBlock {
|
||||
lowestFinalizedBlock = bs.finalizedBlockNumber
|
||||
}
|
||||
if lowestSafeBlock == 0 || bs.safeBlockNumber < lowestSafeBlock {
|
||||
lowestSafeBlock = bs.safeBlockNumber
|
||||
}
|
||||
}
|
||||
|
||||
// find the proposed block among the candidates
|
||||
// the proposed block needs have the same hash in the entire consensus group
|
||||
proposedBlock := lowestLatestBlock
|
||||
proposedBlockHash := lowestLatestBlockHash
|
||||
hasConsensus := false
|
||||
broken := false
|
||||
|
||||
if lowestLatestBlock > currentConsensusBlockNumber {
|
||||
log.Debug("validating consensus on block", "lowestLatestBlock", lowestLatestBlock)
|
||||
}
|
||||
|
||||
// if there is a block to propose, check if it is the same in all backends
|
||||
if proposedBlock > 0 {
|
||||
for !hasConsensus {
|
||||
allAgreed := true
|
||||
for be := range candidates {
|
||||
actualBlockNumber, actualBlockHash, err := cp.fetchBlock(ctx, be, proposedBlock.String())
|
||||
if err != nil {
|
||||
log.Warn("error updating backend", "name", be.Name, "err", err)
|
||||
continue
|
||||
}
|
||||
if proposedBlockHash == "" {
|
||||
proposedBlockHash = actualBlockHash
|
||||
}
|
||||
blocksDontMatch := (actualBlockNumber != proposedBlock) || (actualBlockHash != proposedBlockHash)
|
||||
if blocksDontMatch {
|
||||
if currentConsensusBlockNumber >= actualBlockNumber {
|
||||
log.Warn("backend broke consensus",
|
||||
"name", be.Name,
|
||||
"actualBlockNumber", actualBlockNumber,
|
||||
"actualBlockHash", actualBlockHash,
|
||||
"proposedBlock", proposedBlock,
|
||||
"proposedBlockHash", proposedBlockHash)
|
||||
broken = true
|
||||
}
|
||||
allAgreed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allAgreed {
|
||||
hasConsensus = true
|
||||
} else {
|
||||
// walk one block behind and try again
|
||||
proposedBlock -= 1
|
||||
proposedBlockHash = ""
|
||||
log.Debug("no consensus, now trying", "block:", proposedBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if broken {
|
||||
// propagate event to other interested parts, such as cache invalidator
|
||||
for _, l := range cp.listeners {
|
||||
l()
|
||||
}
|
||||
log.Info("consensus broken",
|
||||
"currentConsensusBlockNumber", currentConsensusBlockNumber,
|
||||
"proposedBlock", proposedBlock,
|
||||
"proposedBlockHash", proposedBlockHash)
|
||||
}
|
||||
|
||||
// update tracker
|
||||
cp.tracker.SetLatestBlockNumber(proposedBlock)
|
||||
cp.tracker.SetSafeBlockNumber(lowestSafeBlock)
|
||||
cp.tracker.SetFinalizedBlockNumber(lowestFinalizedBlock)
|
||||
|
||||
// update consensus group
|
||||
group := make([]*Backend, 0, len(candidates))
|
||||
consensusBackendsNames := make([]string, 0, len(candidates))
|
||||
filteredBackendsNames := make([]string, 0, len(cp.backendGroup.Backends))
|
||||
for _, be := range cp.backendGroup.Backends {
|
||||
_, exist := candidates[be]
|
||||
if exist {
|
||||
group = append(group, be)
|
||||
consensusBackendsNames = append(consensusBackendsNames, be.Name)
|
||||
} else {
|
||||
filteredBackendsNames = append(filteredBackendsNames, be.Name)
|
||||
}
|
||||
}
|
||||
|
||||
cp.consensusGroupMux.Lock()
|
||||
cp.consensusGroup = group
|
||||
cp.consensusGroupMux.Unlock()
|
||||
|
||||
RecordGroupConsensusLatestBlock(cp.backendGroup, proposedBlock)
|
||||
RecordGroupConsensusSafeBlock(cp.backendGroup, lowestSafeBlock)
|
||||
RecordGroupConsensusFinalizedBlock(cp.backendGroup, lowestFinalizedBlock)
|
||||
|
||||
RecordGroupConsensusCount(cp.backendGroup, len(group))
|
||||
RecordGroupConsensusFilteredCount(cp.backendGroup, len(filteredBackendsNames))
|
||||
RecordGroupTotalCount(cp.backendGroup, len(cp.backendGroup.Backends))
|
||||
|
||||
log.Debug("group state",
|
||||
"proposedBlock", proposedBlock,
|
||||
"consensusBackends", strings.Join(consensusBackendsNames, ", "),
|
||||
"filteredBackends", strings.Join(filteredBackendsNames, ", "))
|
||||
}
|
||||
|
||||
// IsBanned checks if a specific backend is banned
|
||||
func (cp *ConsensusPoller) IsBanned(be *Backend) bool {
|
||||
bs := cp.backendState[be]
|
||||
defer bs.backendStateMux.Unlock()
|
||||
bs.backendStateMux.Lock()
|
||||
return bs.IsBanned()
|
||||
}
|
||||
|
||||
// Ban bans a specific backend
|
||||
func (cp *ConsensusPoller) Ban(be *Backend) {
|
||||
if be.forcedCandidate {
|
||||
return
|
||||
}
|
||||
|
||||
bs := cp.backendState[be]
|
||||
defer bs.backendStateMux.Unlock()
|
||||
bs.backendStateMux.Lock()
|
||||
bs.bannedUntil = time.Now().Add(cp.banPeriod)
|
||||
|
||||
// when we ban a node, we give it the chance to start from any block when it is back
|
||||
bs.latestBlockNumber = 0
|
||||
bs.safeBlockNumber = 0
|
||||
bs.finalizedBlockNumber = 0
|
||||
}
|
||||
|
||||
// Unban removes any bans from the backends
|
||||
func (cp *ConsensusPoller) Unban(be *Backend) {
|
||||
bs := cp.backendState[be]
|
||||
defer bs.backendStateMux.Unlock()
|
||||
bs.backendStateMux.Lock()
|
||||
bs.bannedUntil = time.Now().Add(-10 * time.Hour)
|
||||
}
|
||||
|
||||
// Reset reset all backend states
|
||||
func (cp *ConsensusPoller) Reset() {
|
||||
for _, be := range cp.backendGroup.Backends {
|
||||
cp.backendState[be] = &backendState{}
|
||||
}
|
||||
}
|
||||
|
||||
// fetchBlock is a convenient wrapper to make a request to get a block directly from the backend
|
||||
func (cp *ConsensusPoller) fetchBlock(ctx context.Context, be *Backend, block string) (blockNumber hexutil.Uint64, blockHash string, err error) {
|
||||
var rpcRes RPCRes
|
||||
err = be.ForwardRPC(ctx, &rpcRes, "67", "eth_getBlockByNumber", block, false)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
jsonMap, ok := rpcRes.Result.(map[string]interface{})
|
||||
if !ok {
|
||||
return 0, "", fmt.Errorf("unexpected response to eth_getBlockByNumber on backend %s", be.Name)
|
||||
}
|
||||
blockNumber = hexutil.Uint64(hexutil.MustDecodeUint64(jsonMap["number"].(string)))
|
||||
blockHash = jsonMap["hash"].(string)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// getPeerCount is a convenient wrapper to retrieve the current peer count from the backend
|
||||
func (cp *ConsensusPoller) getPeerCount(ctx context.Context, be *Backend) (count uint64, err error) {
|
||||
var rpcRes RPCRes
|
||||
err = be.ForwardRPC(ctx, &rpcRes, "67", "net_peerCount")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
jsonMap, ok := rpcRes.Result.(string)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unexpected response to net_peerCount on backend %s", be.Name)
|
||||
}
|
||||
|
||||
count = hexutil.MustDecodeUint64(jsonMap)
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// isInSync is a convenient wrapper to check if the backend is in sync from the network
|
||||
func (cp *ConsensusPoller) isInSync(ctx context.Context, be *Backend) (result bool, err error) {
|
||||
var rpcRes RPCRes
|
||||
err = be.ForwardRPC(ctx, &rpcRes, "67", "eth_syncing")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var res bool
|
||||
switch typed := rpcRes.Result.(type) {
|
||||
case bool:
|
||||
syncing := typed
|
||||
res = !syncing
|
||||
case string:
|
||||
syncing, err := strconv.ParseBool(typed)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
res = !syncing
|
||||
default:
|
||||
// result is a json when not in sync
|
||||
res = false
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getBackendState creates a copy of backend state so that the caller can use it without locking
|
||||
func (cp *ConsensusPoller) getBackendState(be *Backend) *backendState {
|
||||
bs := cp.backendState[be]
|
||||
defer bs.backendStateMux.Unlock()
|
||||
bs.backendStateMux.Lock()
|
||||
|
||||
return &backendState{
|
||||
latestBlockNumber: bs.latestBlockNumber,
|
||||
latestBlockHash: bs.latestBlockHash,
|
||||
safeBlockNumber: bs.safeBlockNumber,
|
||||
finalizedBlockNumber: bs.finalizedBlockNumber,
|
||||
peerCount: bs.peerCount,
|
||||
inSync: bs.inSync,
|
||||
lastUpdate: bs.lastUpdate,
|
||||
bannedUntil: bs.bannedUntil,
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ConsensusPoller) GetLastUpdate(be *Backend) time.Time {
|
||||
bs := cp.backendState[be]
|
||||
defer bs.backendStateMux.Unlock()
|
||||
bs.backendStateMux.Lock()
|
||||
return bs.lastUpdate
|
||||
}
|
||||
|
||||
func (cp *ConsensusPoller) setBackendState(be *Backend, peerCount uint64, inSync bool,
|
||||
latestBlockNumber hexutil.Uint64, latestBlockHash string,
|
||||
safeBlockNumber hexutil.Uint64,
|
||||
finalizedBlockNumber hexutil.Uint64) bool {
|
||||
bs := cp.backendState[be]
|
||||
bs.backendStateMux.Lock()
|
||||
changed := bs.latestBlockHash != latestBlockHash
|
||||
bs.peerCount = peerCount
|
||||
bs.inSync = inSync
|
||||
bs.latestBlockNumber = latestBlockNumber
|
||||
bs.latestBlockHash = latestBlockHash
|
||||
bs.finalizedBlockNumber = finalizedBlockNumber
|
||||
bs.safeBlockNumber = safeBlockNumber
|
||||
bs.lastUpdate = time.Now()
|
||||
bs.backendStateMux.Unlock()
|
||||
return changed
|
||||
}
|
||||
|
||||
// getConsensusCandidates will search for candidates in the primary group,
|
||||
// if there are none it will search for candidates in he fallback group
|
||||
func (cp *ConsensusPoller) getConsensusCandidates() map[*Backend]*backendState {
|
||||
|
||||
healthyPrimaries := cp.FilterCandidates(cp.backendGroup.Primaries())
|
||||
|
||||
RecordHealthyCandidates(cp.backendGroup, len(healthyPrimaries))
|
||||
if len(healthyPrimaries) > 0 {
|
||||
return healthyPrimaries
|
||||
}
|
||||
|
||||
return cp.FilterCandidates(cp.backendGroup.Fallbacks())
|
||||
}
|
||||
|
||||
// filterCandidates find out what backends are the candidates to be in the consensus group
|
||||
// and create a copy of current their state
|
||||
//
|
||||
// a candidate is a serving node within the following conditions:
|
||||
// - not banned
|
||||
// - healthy (network latency and error rate)
|
||||
// - with minimum peer count
|
||||
// - in sync
|
||||
// - updated recently
|
||||
// - not lagging latest block
|
||||
func (cp *ConsensusPoller) FilterCandidates(backends []*Backend) map[*Backend]*backendState {
|
||||
|
||||
candidates := make(map[*Backend]*backendState, len(cp.backendGroup.Backends))
|
||||
|
||||
for _, be := range backends {
|
||||
|
||||
bs := cp.getBackendState(be)
|
||||
if be.forcedCandidate {
|
||||
candidates[be] = bs
|
||||
continue
|
||||
}
|
||||
if bs.IsBanned() {
|
||||
continue
|
||||
}
|
||||
if !be.IsHealthy() {
|
||||
continue
|
||||
}
|
||||
if !be.skipPeerCountCheck && bs.peerCount < cp.minPeerCount {
|
||||
log.Debug("backend peer count too low for inclusion in consensus",
|
||||
"backend_name", be.Name,
|
||||
"peer_count", bs.peerCount,
|
||||
"min_peer_count", cp.minPeerCount,
|
||||
)
|
||||
continue
|
||||
}
|
||||
if !bs.inSync {
|
||||
continue
|
||||
}
|
||||
if bs.lastUpdate.Add(cp.maxUpdateThreshold).Before(time.Now()) {
|
||||
continue
|
||||
}
|
||||
|
||||
candidates[be] = bs
|
||||
}
|
||||
|
||||
// find the highest block, in order to use it defining the highest non-lagging ancestor block
|
||||
var highestLatestBlock hexutil.Uint64
|
||||
for _, bs := range candidates {
|
||||
if bs.latestBlockNumber > highestLatestBlock {
|
||||
highestLatestBlock = bs.latestBlockNumber
|
||||
}
|
||||
}
|
||||
|
||||
// find the highest common ancestor block
|
||||
lagging := make([]*Backend, 0, len(candidates))
|
||||
for be, bs := range candidates {
|
||||
// check if backend is lagging behind the highest block
|
||||
if uint64(highestLatestBlock-bs.latestBlockNumber) > cp.maxBlockLag {
|
||||
lagging = append(lagging, be)
|
||||
}
|
||||
}
|
||||
|
||||
// remove lagging backends from the candidates
|
||||
for _, be := range lagging {
|
||||
delete(candidates, be)
|
||||
}
|
||||
|
||||
return candidates
|
||||
}
|
356
proxyd/consensus_tracker.go
Normal file
356
proxyd/consensus_tracker.go
Normal file
@ -0,0 +1,356 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/go-redsync/redsync/v4"
|
||||
"github.com/go-redsync/redsync/v4/redis/goredis/v9"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// ConsensusTracker abstracts how we store and retrieve the current consensus
|
||||
// allowing it to be stored locally in-memory or in a shared Redis cluster
|
||||
type ConsensusTracker interface {
|
||||
GetLatestBlockNumber() hexutil.Uint64
|
||||
SetLatestBlockNumber(blockNumber hexutil.Uint64)
|
||||
GetSafeBlockNumber() hexutil.Uint64
|
||||
SetSafeBlockNumber(blockNumber hexutil.Uint64)
|
||||
GetFinalizedBlockNumber() hexutil.Uint64
|
||||
SetFinalizedBlockNumber(blockNumber hexutil.Uint64)
|
||||
}
|
||||
|
||||
// DTO to hold the current consensus state
|
||||
type ConsensusTrackerState struct {
|
||||
Latest hexutil.Uint64 `json:"latest"`
|
||||
Safe hexutil.Uint64 `json:"safe"`
|
||||
Finalized hexutil.Uint64 `json:"finalized"`
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) update(o *ConsensusTrackerState) {
|
||||
ct.mutex.Lock()
|
||||
defer ct.mutex.Unlock()
|
||||
|
||||
ct.state.Latest = o.Latest
|
||||
ct.state.Safe = o.Safe
|
||||
ct.state.Finalized = o.Finalized
|
||||
}
|
||||
|
||||
// InMemoryConsensusTracker store and retrieve in memory, async-safe
|
||||
type InMemoryConsensusTracker struct {
|
||||
mutex sync.Mutex
|
||||
state *ConsensusTrackerState
|
||||
}
|
||||
|
||||
func NewInMemoryConsensusTracker() ConsensusTracker {
|
||||
return &InMemoryConsensusTracker{
|
||||
mutex: sync.Mutex{},
|
||||
state: &ConsensusTrackerState{},
|
||||
}
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) Valid() bool {
|
||||
return ct.GetLatestBlockNumber() > 0 &&
|
||||
ct.GetSafeBlockNumber() > 0 &&
|
||||
ct.GetFinalizedBlockNumber() > 0
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) Behind(other *InMemoryConsensusTracker) bool {
|
||||
return ct.GetLatestBlockNumber() < other.GetLatestBlockNumber() ||
|
||||
ct.GetSafeBlockNumber() < other.GetSafeBlockNumber() ||
|
||||
ct.GetFinalizedBlockNumber() < other.GetFinalizedBlockNumber()
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) GetLatestBlockNumber() hexutil.Uint64 {
|
||||
defer ct.mutex.Unlock()
|
||||
ct.mutex.Lock()
|
||||
|
||||
return ct.state.Latest
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) SetLatestBlockNumber(blockNumber hexutil.Uint64) {
|
||||
defer ct.mutex.Unlock()
|
||||
ct.mutex.Lock()
|
||||
|
||||
ct.state.Latest = blockNumber
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) GetSafeBlockNumber() hexutil.Uint64 {
|
||||
defer ct.mutex.Unlock()
|
||||
ct.mutex.Lock()
|
||||
|
||||
return ct.state.Safe
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) SetSafeBlockNumber(blockNumber hexutil.Uint64) {
|
||||
defer ct.mutex.Unlock()
|
||||
ct.mutex.Lock()
|
||||
|
||||
ct.state.Safe = blockNumber
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) GetFinalizedBlockNumber() hexutil.Uint64 {
|
||||
defer ct.mutex.Unlock()
|
||||
ct.mutex.Lock()
|
||||
|
||||
return ct.state.Finalized
|
||||
}
|
||||
|
||||
func (ct *InMemoryConsensusTracker) SetFinalizedBlockNumber(blockNumber hexutil.Uint64) {
|
||||
defer ct.mutex.Unlock()
|
||||
ct.mutex.Lock()
|
||||
|
||||
ct.state.Finalized = blockNumber
|
||||
}
|
||||
|
||||
// RedisConsensusTracker store and retrieve in a shared Redis cluster, with leader election
|
||||
type RedisConsensusTracker struct {
|
||||
ctx context.Context
|
||||
client *redis.Client
|
||||
namespace string
|
||||
backendGroup *BackendGroup
|
||||
|
||||
redlock *redsync.Mutex
|
||||
lockPeriod time.Duration
|
||||
heartbeatInterval time.Duration
|
||||
|
||||
leader bool
|
||||
leaderName string
|
||||
|
||||
// holds the state collected by local pollers
|
||||
local *InMemoryConsensusTracker
|
||||
|
||||
// holds a copy of the remote shared state
|
||||
// when leader, updates the remote with the local state
|
||||
remote *InMemoryConsensusTracker
|
||||
}
|
||||
|
||||
type RedisConsensusTrackerOpt func(cp *RedisConsensusTracker)
|
||||
|
||||
func WithLockPeriod(lockPeriod time.Duration) RedisConsensusTrackerOpt {
|
||||
return func(ct *RedisConsensusTracker) {
|
||||
ct.lockPeriod = lockPeriod
|
||||
}
|
||||
}
|
||||
|
||||
func WithHeartbeatInterval(heartbeatInterval time.Duration) RedisConsensusTrackerOpt {
|
||||
return func(ct *RedisConsensusTracker) {
|
||||
ct.heartbeatInterval = heartbeatInterval
|
||||
}
|
||||
}
|
||||
func NewRedisConsensusTracker(ctx context.Context,
|
||||
redisClient *redis.Client,
|
||||
bg *BackendGroup,
|
||||
namespace string,
|
||||
opts ...RedisConsensusTrackerOpt) ConsensusTracker {
|
||||
|
||||
tracker := &RedisConsensusTracker{
|
||||
ctx: ctx,
|
||||
client: redisClient,
|
||||
backendGroup: bg,
|
||||
namespace: namespace,
|
||||
|
||||
lockPeriod: 30 * time.Second,
|
||||
heartbeatInterval: 2 * time.Second,
|
||||
local: NewInMemoryConsensusTracker().(*InMemoryConsensusTracker),
|
||||
remote: NewInMemoryConsensusTracker().(*InMemoryConsensusTracker),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(tracker)
|
||||
}
|
||||
|
||||
return tracker
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) Init() {
|
||||
go func() {
|
||||
for {
|
||||
timer := time.NewTimer(ct.heartbeatInterval)
|
||||
ct.stateHeartbeat()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
continue
|
||||
case <-ct.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) stateHeartbeat() {
|
||||
pool := goredis.NewPool(ct.client)
|
||||
rs := redsync.New(pool)
|
||||
key := ct.key("mutex")
|
||||
|
||||
val, err := ct.client.Get(ct.ctx, key).Result()
|
||||
if err != nil && err != redis.Nil {
|
||||
log.Error("failed to read the lock", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "read_lock", err)
|
||||
if ct.leader {
|
||||
ok, err := ct.redlock.Unlock()
|
||||
if err != nil || !ok {
|
||||
log.Error("failed to release the lock after error", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "leader_release_lock", err)
|
||||
return
|
||||
}
|
||||
ct.leader = false
|
||||
}
|
||||
return
|
||||
}
|
||||
if val != "" {
|
||||
if ct.leader {
|
||||
log.Debug("extending lock")
|
||||
ok, err := ct.redlock.Extend()
|
||||
if err != nil || !ok {
|
||||
log.Error("failed to extend lock", "err", err, "mutex", ct.redlock.Name(), "val", ct.redlock.Value())
|
||||
RecordGroupConsensusError(ct.backendGroup, "leader_extend_lock", err)
|
||||
ok, err := ct.redlock.Unlock()
|
||||
if err != nil || !ok {
|
||||
log.Error("failed to release the lock after error", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "leader_release_lock", err)
|
||||
return
|
||||
}
|
||||
ct.leader = false
|
||||
return
|
||||
}
|
||||
ct.postPayload(val)
|
||||
} else {
|
||||
// retrieve current leader
|
||||
leaderName, err := ct.client.Get(ct.ctx, ct.key(fmt.Sprintf("leader:%s", val))).Result()
|
||||
if err != nil && err != redis.Nil {
|
||||
log.Error("failed to read the remote leader", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "read_leader", err)
|
||||
return
|
||||
}
|
||||
ct.leaderName = leaderName
|
||||
log.Debug("following", "val", val, "leader", leaderName)
|
||||
// retrieve payload
|
||||
val, err := ct.client.Get(ct.ctx, ct.key(fmt.Sprintf("state:%s", val))).Result()
|
||||
if err != nil && err != redis.Nil {
|
||||
log.Error("failed to read the remote state", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "read_state", err)
|
||||
return
|
||||
}
|
||||
if val == "" {
|
||||
log.Error("remote state is missing (recent leader election maybe?)")
|
||||
RecordGroupConsensusError(ct.backendGroup, "read_state_missing", err)
|
||||
return
|
||||
}
|
||||
state := &ConsensusTrackerState{}
|
||||
err = json.Unmarshal([]byte(val), state)
|
||||
if err != nil {
|
||||
log.Error("failed to unmarshal the remote state", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "read_unmarshal_state", err)
|
||||
return
|
||||
}
|
||||
|
||||
ct.remote.update(state)
|
||||
log.Debug("updated state from remote", "state", val, "leader", leaderName)
|
||||
|
||||
RecordGroupConsensusHALatestBlock(ct.backendGroup, leaderName, ct.remote.state.Latest)
|
||||
RecordGroupConsensusHASafeBlock(ct.backendGroup, leaderName, ct.remote.state.Safe)
|
||||
RecordGroupConsensusHAFinalizedBlock(ct.backendGroup, leaderName, ct.remote.state.Finalized)
|
||||
}
|
||||
} else {
|
||||
if !ct.local.Valid() {
|
||||
log.Warn("local state is not valid or behind remote, skipping")
|
||||
return
|
||||
}
|
||||
if ct.remote.Valid() && ct.local.Behind(ct.remote) {
|
||||
log.Warn("local state is behind remote, skipping")
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("lock not found, creating a new one")
|
||||
|
||||
mutex := rs.NewMutex(key,
|
||||
redsync.WithExpiry(ct.lockPeriod),
|
||||
redsync.WithFailFast(true),
|
||||
redsync.WithTries(1))
|
||||
|
||||
// nosemgrep: missing-unlock-before-return
|
||||
// this lock is hold indefinitely, and it is extended until the leader dies
|
||||
if err := mutex.Lock(); err != nil {
|
||||
log.Debug("failed to obtain lock", "err", err)
|
||||
ct.leader = false
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("lock acquired", "mutex", mutex.Name(), "val", mutex.Value())
|
||||
ct.redlock = mutex
|
||||
ct.leader = true
|
||||
ct.postPayload(mutex.Value())
|
||||
}
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) key(tag string) string {
|
||||
return fmt.Sprintf("consensus:%s:%s", ct.namespace, tag)
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) GetLatestBlockNumber() hexutil.Uint64 {
|
||||
return ct.remote.GetLatestBlockNumber()
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) SetLatestBlockNumber(blockNumber hexutil.Uint64) {
|
||||
ct.local.SetLatestBlockNumber(blockNumber)
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) GetSafeBlockNumber() hexutil.Uint64 {
|
||||
return ct.remote.GetSafeBlockNumber()
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) SetSafeBlockNumber(blockNumber hexutil.Uint64) {
|
||||
ct.local.SetSafeBlockNumber(blockNumber)
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) GetFinalizedBlockNumber() hexutil.Uint64 {
|
||||
return ct.remote.GetFinalizedBlockNumber()
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) SetFinalizedBlockNumber(blockNumber hexutil.Uint64) {
|
||||
ct.local.SetFinalizedBlockNumber(blockNumber)
|
||||
}
|
||||
|
||||
func (ct *RedisConsensusTracker) postPayload(mutexVal string) {
|
||||
jsonState, err := json.Marshal(ct.local.state)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal local", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "leader_marshal_local_state", err)
|
||||
ct.leader = false
|
||||
return
|
||||
}
|
||||
err = ct.client.Set(ct.ctx, ct.key(fmt.Sprintf("state:%s", mutexVal)), jsonState, ct.lockPeriod).Err()
|
||||
if err != nil {
|
||||
log.Error("failed to post the state", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "leader_post_state", err)
|
||||
ct.leader = false
|
||||
return
|
||||
}
|
||||
|
||||
leader, _ := os.LookupEnv("HOSTNAME")
|
||||
err = ct.client.Set(ct.ctx, ct.key(fmt.Sprintf("leader:%s", mutexVal)), leader, ct.lockPeriod).Err()
|
||||
if err != nil {
|
||||
log.Error("failed to post the leader", "err", err)
|
||||
RecordGroupConsensusError(ct.backendGroup, "leader_post_leader", err)
|
||||
ct.leader = false
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("posted state", "state", string(jsonState), "leader", leader)
|
||||
|
||||
ct.leaderName = leader
|
||||
ct.remote.update(ct.local.state)
|
||||
|
||||
RecordGroupConsensusHALatestBlock(ct.backendGroup, leader, ct.remote.state.Latest)
|
||||
RecordGroupConsensusHASafeBlock(ct.backendGroup, leader, ct.remote.state.Safe)
|
||||
RecordGroupConsensusHAFinalizedBlock(ct.backendGroup, leader, ct.remote.state.Finalized)
|
||||
}
|
6
proxyd/entrypoint.sh
Normal file
6
proxyd/entrypoint.sh
Normal file
@ -0,0 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "Updating CA certificates."
|
||||
update-ca-certificates
|
||||
echo "Running CMD."
|
||||
exec "$@"
|
7
proxyd/errors.go
Normal file
7
proxyd/errors.go
Normal file
@ -0,0 +1,7 @@
|
||||
package proxyd
|
||||
|
||||
import "fmt"
|
||||
|
||||
func wrapErr(err error, msg string) error {
|
||||
return fmt.Errorf("%s %w", msg, err)
|
||||
}
|
123
proxyd/example.config.toml
Normal file
123
proxyd/example.config.toml
Normal file
@ -0,0 +1,123 @@
|
||||
# List of WS methods to whitelist.
|
||||
ws_method_whitelist = [
|
||||
"eth_subscribe",
|
||||
"eth_call",
|
||||
"eth_chainId"
|
||||
]
|
||||
# Enable WS on this backend group. There can only be one WS-enabled backend group.
|
||||
ws_backend_group = "main"
|
||||
|
||||
[server]
|
||||
# Host for the proxyd RPC server to listen on.
|
||||
rpc_host = "0.0.0.0"
|
||||
# Port for the above.
|
||||
rpc_port = 8080
|
||||
# Host for the proxyd WS server to listen on.
|
||||
ws_host = "0.0.0.0"
|
||||
# Port for the above
|
||||
# Set the ws_port to 0 to disable WS
|
||||
ws_port = 8085
|
||||
# Maximum client body size, in bytes, that the server will accept.
|
||||
max_body_size_bytes = 10485760
|
||||
max_concurrent_rpcs = 1000
|
||||
# Server log level
|
||||
log_level = "info"
|
||||
|
||||
[redis]
|
||||
# URL to a Redis instance.
|
||||
url = "redis://localhost:6379"
|
||||
|
||||
[metrics]
|
||||
# Whether or not to enable Prometheus metrics.
|
||||
enabled = true
|
||||
# Host for the Prometheus metrics endpoint to listen on.
|
||||
host = "0.0.0.0"
|
||||
# Port for the above.
|
||||
port = 9761
|
||||
|
||||
[backend]
|
||||
# How long proxyd should wait for a backend response before timing out.
|
||||
response_timeout_seconds = 5
|
||||
# Maximum response size, in bytes, that proxyd will accept from a backend.
|
||||
max_response_size_bytes = 5242880
|
||||
# Maximum number of times proxyd will try a backend before giving up.
|
||||
max_retries = 3
|
||||
# Number of seconds to wait before trying an unhealthy backend again.
|
||||
out_of_service_seconds = 600
|
||||
# Maximum latency accepted to serve requests, default 10s
|
||||
max_latency_threshold = "30s"
|
||||
# Maximum latency accepted to serve requests before degraded, default 5s
|
||||
max_degraded_latency_threshold = "10s"
|
||||
# Maximum error rate accepted to serve requests, default 0.5 (i.e. 50%)
|
||||
max_error_rate_threshold = 0.3
|
||||
|
||||
[backends]
|
||||
# A map of backends by name.
|
||||
[backends.infura]
|
||||
# The URL to contact the backend at. Will be read from the environment
|
||||
# if an environment variable prefixed with $ is provided.
|
||||
rpc_url = ""
|
||||
# The WS URL to contact the backend at. Will be read from the environment
|
||||
# if an environment variable prefixed with $ is provided.
|
||||
ws_url = ""
|
||||
username = ""
|
||||
# An HTTP Basic password to authenticate with the backend. Will be read from
|
||||
# the environment if an environment variable prefixed with $ is provided.
|
||||
password = ""
|
||||
max_rps = 3
|
||||
max_ws_conns = 1
|
||||
# Path to a custom root CA.
|
||||
ca_file = ""
|
||||
# Path to a custom client cert file.
|
||||
client_cert_file = ""
|
||||
# Path to a custom client key file.
|
||||
client_key_file = ""
|
||||
# Allows backends to skip peer count checking, default false
|
||||
# consensus_skip_peer_count = true
|
||||
# Specified the target method to get receipts, default "debug_getRawReceipts"
|
||||
# See https://github.com/ethereum-optimism/optimism/blob/186e46a47647a51a658e699e9ff047d39444c2de/op-node/sources/receipts.go#L186-L253
|
||||
consensus_receipts_target = "eth_getBlockReceipts"
|
||||
|
||||
[backends.alchemy]
|
||||
rpc_url = ""
|
||||
ws_url = ""
|
||||
username = ""
|
||||
password = ""
|
||||
max_rps = 3
|
||||
max_ws_conns = 1
|
||||
consensus_receipts_target = "alchemy_getTransactionReceipts"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["infura"]
|
||||
# Enable consensus awareness for backend group, making it act as a load balancer, default false
|
||||
# consensus_aware = true
|
||||
# Period in which the backend wont serve requests if banned, default 5m
|
||||
# consensus_ban_period = "1m"
|
||||
# Maximum delay for update the backend, default 30s
|
||||
# consensus_max_update_threshold = "20s"
|
||||
# Maximum block lag, default 8
|
||||
# consensus_max_block_lag = 16
|
||||
# Maximum block range (for eth_getLogs method), no default
|
||||
# consensus_max_block_range = 20000
|
||||
# Minimum peer count, default 3
|
||||
# consensus_min_peer_count = 4
|
||||
|
||||
[backend_groups.alchemy]
|
||||
backends = ["alchemy"]
|
||||
|
||||
# If the authentication group below is in the config,
|
||||
# proxyd will only accept authenticated requests.
|
||||
[authentication]
|
||||
# Mapping of auth key to alias. The alias is used to provide a human-
|
||||
# readable name for the auth key in monitoring. The auth key will be
|
||||
# read from the environment if an environment variable prefixed with $
|
||||
# is provided. Note that you will need to quote the environment variable
|
||||
# in order for it to be value TOML, e.g. "$FOO_AUTH_KEY" = "foo_alias".
|
||||
secret = "test"
|
||||
|
||||
# Mapping of methods to backend groups.
|
||||
[rpc_method_mappings]
|
||||
eth_call = "main"
|
||||
eth_chainId = "main"
|
||||
eth_blockNumber = "alchemy"
|
139
proxyd/frontend_rate_limiter.go
Normal file
139
proxyd/frontend_rate_limiter.go
Normal file
@ -0,0 +1,139 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type FrontendRateLimiter interface {
|
||||
// Take consumes a key, and a maximum number of requests
|
||||
// per time interval. It returns a boolean denoting if
|
||||
// the limit could be taken, or an error if a failure
|
||||
// occurred in the backing rate limit implementation.
|
||||
//
|
||||
// No error will be returned if the limit could not be taken
|
||||
// as a result of the requestor being over the limit.
|
||||
Take(ctx context.Context, key string) (bool, error)
|
||||
}
|
||||
|
||||
// limitedKeys is a wrapper around a map that stores a truncated
|
||||
// timestamp and a mutex. The map is used to keep track of rate
|
||||
// limit keys, and their used limits.
|
||||
type limitedKeys struct {
|
||||
truncTS int64
|
||||
keys map[string]int
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func newLimitedKeys(t int64) *limitedKeys {
|
||||
return &limitedKeys{
|
||||
truncTS: t,
|
||||
keys: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *limitedKeys) Take(key string, max int) bool {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
val, ok := l.keys[key]
|
||||
if !ok {
|
||||
l.keys[key] = 0
|
||||
val = 0
|
||||
}
|
||||
l.keys[key] = val + 1
|
||||
return val < max
|
||||
}
|
||||
|
||||
// MemoryFrontendRateLimiter is a rate limiter that stores
|
||||
// all rate limiting information in local memory. It works
|
||||
// by storing a limitedKeys struct that references the
|
||||
// truncated timestamp at which the struct was created. If
|
||||
// the current truncated timestamp doesn't match what's
|
||||
// referenced, the limit is reset. Otherwise, values in
|
||||
// a map are incremented to represent the limit.
|
||||
type MemoryFrontendRateLimiter struct {
|
||||
currGeneration *limitedKeys
|
||||
dur time.Duration
|
||||
max int
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func NewMemoryFrontendRateLimit(dur time.Duration, max int) FrontendRateLimiter {
|
||||
return &MemoryFrontendRateLimiter{
|
||||
dur: dur,
|
||||
max: max,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MemoryFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) {
|
||||
m.mtx.Lock()
|
||||
// Create truncated timestamp
|
||||
truncTS := truncateNow(m.dur)
|
||||
|
||||
// If there is no current rate limit map or the rate limit map reference
|
||||
// a different timestamp, reset limits.
|
||||
if m.currGeneration == nil || m.currGeneration.truncTS != truncTS {
|
||||
m.currGeneration = newLimitedKeys(truncTS)
|
||||
}
|
||||
|
||||
// Pull out the limiter so we can unlock before incrementing the limit.
|
||||
limiter := m.currGeneration
|
||||
|
||||
m.mtx.Unlock()
|
||||
|
||||
return limiter.Take(key, m.max), nil
|
||||
}
|
||||
|
||||
// RedisFrontendRateLimiter is a rate limiter that stores data in Redis.
|
||||
// It uses the basic rate limiter pattern described on the Redis best
|
||||
// practices website: https://redis.com/redis-best-practices/basic-rate-limiting/.
|
||||
type RedisFrontendRateLimiter struct {
|
||||
r *redis.Client
|
||||
dur time.Duration
|
||||
max int
|
||||
prefix string
|
||||
}
|
||||
|
||||
func NewRedisFrontendRateLimiter(r *redis.Client, dur time.Duration, max int, prefix string) FrontendRateLimiter {
|
||||
return &RedisFrontendRateLimiter{
|
||||
r: r,
|
||||
dur: dur,
|
||||
max: max,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RedisFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) {
|
||||
var incr *redis.IntCmd
|
||||
truncTS := truncateNow(r.dur)
|
||||
fullKey := fmt.Sprintf("rate_limit:%s:%s:%d", r.prefix, key, truncTS)
|
||||
_, err := r.r.Pipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
incr = pipe.Incr(ctx, fullKey)
|
||||
pipe.PExpire(ctx, fullKey, r.dur-time.Millisecond)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
frontendRateLimitTakeErrors.Inc()
|
||||
return false, err
|
||||
}
|
||||
|
||||
return incr.Val()-1 < int64(r.max), nil
|
||||
}
|
||||
|
||||
type noopFrontendRateLimiter struct{}
|
||||
|
||||
var NoopFrontendRateLimiter = &noopFrontendRateLimiter{}
|
||||
|
||||
func (n *noopFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// truncateNow truncates the current timestamp
|
||||
// to the specified duration.
|
||||
func truncateNow(dur time.Duration) int64 {
|
||||
return time.Now().Truncate(dur).Unix()
|
||||
}
|
53
proxyd/frontend_rate_limiter_test.go
Normal file
53
proxyd/frontend_rate_limiter_test.go
Normal file
@ -0,0 +1,53 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFrontendRateLimiter(t *testing.T) {
|
||||
redisServer, err := miniredis.Run()
|
||||
require.NoError(t, err)
|
||||
defer redisServer.Close()
|
||||
|
||||
redisClient := redis.NewClient(&redis.Options{
|
||||
Addr: fmt.Sprintf("127.0.0.1:%s", redisServer.Port()),
|
||||
})
|
||||
|
||||
max := 2
|
||||
lims := []struct {
|
||||
name string
|
||||
frl FrontendRateLimiter
|
||||
}{
|
||||
{"memory", NewMemoryFrontendRateLimit(2*time.Second, max)},
|
||||
{"redis", NewRedisFrontendRateLimiter(redisClient, 2*time.Second, max, "")},
|
||||
}
|
||||
|
||||
for _, cfg := range lims {
|
||||
frl := cfg.frl
|
||||
ctx := context.Background()
|
||||
t.Run(cfg.name, func(t *testing.T) {
|
||||
for i := 0; i < 4; i++ {
|
||||
ok, err := frl.Take(ctx, "foo")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, i < max, ok)
|
||||
ok, err = frl.Take(ctx, "bar")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, i < max, ok)
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
for i := 0; i < 4; i++ {
|
||||
ok, _ := frl.Take(ctx, "foo")
|
||||
require.Equal(t, i < max, ok)
|
||||
ok, _ = frl.Take(ctx, "bar")
|
||||
require.Equal(t, i < max, ok)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
86
proxyd/go.mod
Normal file
86
proxyd/go.mod
Normal file
@ -0,0 +1,86 @@
|
||||
module github.com/ethereum-optimism/optimism/proxyd
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.3.2
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible
|
||||
github.com/emirpasic/gods v1.18.1
|
||||
github.com/ethereum/go-ethereum v1.13.15
|
||||
github.com/go-redsync/redsync/v4 v4.10.0
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/hashicorp/golang-lru v1.0.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.17.0
|
||||
github.com/redis/go-redis/v9 v9.2.1
|
||||
github.com/rs/cors v1.10.1
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
|
||||
golang.org/x/sync v0.5.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/DataDog/zstd v1.5.5 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.10.0 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cockroachdb/errors v1.11.1 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360 // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.3.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
|
||||
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
|
||||
github.com/getsentry/sentry-go v0.25.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/gomodule/redigo v1.8.9 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/klauspost/compress v1.17.1 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.45.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/yuin/gopher-lua v1.1.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.15.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
290
proxyd/go.sum
Normal file
290
proxyd/go.sum
Normal file
@ -0,0 +1,290 @@
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
|
||||
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE=
|
||||
github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
|
||||
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
||||
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360 h1:x1dzGu9e1FYmkG8mL9emtdWD1EzH/17SijnoLvKvPiM=
|
||||
github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A=
|
||||
github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/go-ethereum v1.13.15 h1:U7sSGYGo4SPjP6iNIifNoyIAiNjrmQkz6EwQG+/EZWo=
|
||||
github.com/ethereum/go-ethereum v1.13.15/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE=
|
||||
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc=
|
||||
github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI=
|
||||
github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4=
|
||||
github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg=
|
||||
github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
|
||||
github.com/go-redsync/redsync/v4 v4.10.0 h1:hTeAak4C73mNBQSTq6KCKDFaiIlfC+z5yTTl8fCJuBs=
|
||||
github.com/go-redsync/redsync/v4 v4.10.0/go.mod h1:ZfayzutkgeBmEmBlUR3j+rF6kN44UUGtEdfzhBFZTPc=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws=
|
||||
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g=
|
||||
github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
|
||||
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/redis/go-redis/v9 v9.2.1 h1:WlYJg71ODF0dVspZZCpYmoF1+U1Jjk9Rwd7pq6QmlCg=
|
||||
github.com/redis/go-redis/v9 v9.2.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
|
||||
github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo=
|
||||
github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo=
|
||||
github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM=
|
||||
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a h1:WS5nQycV+82Ndezq0UcMcGVG416PZgcJPqI/bLM824A=
|
||||
github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a/go.mod h1:0KAUfC65le2kMu4fnBxm7Xj3PkQ3MBpJbF5oMmqufBc=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE=
|
||||
github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
|
||||
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
42
proxyd/integration_tests/batch_timeout_test.go
Normal file
42
proxyd/integration_tests/batch_timeout_test.go
Normal file
@ -0,0 +1,42 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
batchTimeoutResponse = `{"error":{"code":-32015,"message":"gateway timeout"},"id":null,"jsonrpc":"2.0"}`
|
||||
)
|
||||
|
||||
func TestBatchTimeout(t *testing.T) {
|
||||
slowBackend := NewMockBackend(nil)
|
||||
defer slowBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("SLOW_BACKEND_RPC_URL", slowBackend.URL()))
|
||||
|
||||
config := ReadConfig("batch_timeout")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
slowBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// check the config. The sleep duration should be at least double the server.timeout_seconds config to prevent flakes
|
||||
time.Sleep(time.Second * 2)
|
||||
BatchedResponseHandler(200, goodResponse)(w, r)
|
||||
}))
|
||||
res, statusCode, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 504, statusCode)
|
||||
RequireEqualJSON(t, []byte(batchTimeoutResponse), res)
|
||||
require.Equal(t, 1, len(slowBackend.Requests()))
|
||||
}
|
188
proxyd/integration_tests/batching_test.go
Normal file
188
proxyd/integration_tests/batching_test.go
Normal file
@ -0,0 +1,188 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBatching(t *testing.T) {
|
||||
config := ReadConfig("batching")
|
||||
|
||||
chainIDResponse1 := `{"jsonrpc": "2.0", "result": "hello1", "id": 1}`
|
||||
chainIDResponse2 := `{"jsonrpc": "2.0", "result": "hello2", "id": 2}`
|
||||
chainIDResponse3 := `{"jsonrpc": "2.0", "result": "hello3", "id": 3}`
|
||||
netVersionResponse1 := `{"jsonrpc": "2.0", "result": "1.0", "id": 1}`
|
||||
callResponse1 := `{"jsonrpc": "2.0", "result": "ekans1", "id": 1}`
|
||||
|
||||
ethAccountsResponse2 := `{"jsonrpc": "2.0", "result": [], "id": 2}`
|
||||
|
||||
backendResTooLargeResponse1 := `{"error":{"code":-32020,"message":"backend response too large"},"id":1,"jsonrpc":"2.0"}`
|
||||
backendResTooLargeResponse2 := `{"error":{"code":-32020,"message":"backend response too large"},"id":2,"jsonrpc":"2.0"}`
|
||||
|
||||
type mockResult struct {
|
||||
method string
|
||||
id string
|
||||
result interface{}
|
||||
}
|
||||
|
||||
chainIDMock1 := mockResult{"eth_chainId", "1", "hello1"}
|
||||
chainIDMock2 := mockResult{"eth_chainId", "2", "hello2"}
|
||||
chainIDMock3 := mockResult{"eth_chainId", "3", "hello3"}
|
||||
netVersionMock1 := mockResult{"net_version", "1", "1.0"}
|
||||
callMock1 := mockResult{"eth_call", "1", "ekans1"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
handler http.Handler
|
||||
mocks []mockResult
|
||||
reqs []*proxyd.RPCReq
|
||||
expectedRes string
|
||||
maxUpstreamBatchSize int
|
||||
numExpectedForwards int
|
||||
maxResponseSizeBytes int64
|
||||
}{
|
||||
{
|
||||
name: "backend returns batches out of order",
|
||||
mocks: []mockResult{chainIDMock1, chainIDMock2, chainIDMock3},
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
NewRPCReq("3", "eth_chainId", nil),
|
||||
},
|
||||
expectedRes: asArray(chainIDResponse1, chainIDResponse2, chainIDResponse3),
|
||||
maxUpstreamBatchSize: 2,
|
||||
numExpectedForwards: 2,
|
||||
},
|
||||
{
|
||||
// infura behavior
|
||||
name: "backend returns single RPC response object as error",
|
||||
handler: SingleResponseHandler(500, `{"jsonrpc":"2.0","error":{"code":-32001,"message":"internal server error"},"id":1}`),
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
},
|
||||
expectedRes: asArray(
|
||||
`{"error":{"code":-32011,"message":"no backends available for method"},"id":1,"jsonrpc":"2.0"}`,
|
||||
`{"error":{"code":-32011,"message":"no backends available for method"},"id":2,"jsonrpc":"2.0"}`,
|
||||
),
|
||||
maxUpstreamBatchSize: 10,
|
||||
numExpectedForwards: 1,
|
||||
},
|
||||
{
|
||||
name: "backend returns single RPC response object for minibatches",
|
||||
handler: SingleResponseHandler(500, `{"jsonrpc":"2.0","error":{"code":-32001,"message":"internal server error"},"id":1}`),
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
},
|
||||
expectedRes: asArray(
|
||||
`{"error":{"code":-32011,"message":"no backends available for method"},"id":1,"jsonrpc":"2.0"}`,
|
||||
`{"error":{"code":-32011,"message":"no backends available for method"},"id":2,"jsonrpc":"2.0"}`,
|
||||
),
|
||||
maxUpstreamBatchSize: 1,
|
||||
numExpectedForwards: 2,
|
||||
},
|
||||
{
|
||||
name: "duplicate request ids are on distinct batches",
|
||||
mocks: []mockResult{
|
||||
netVersionMock1,
|
||||
chainIDMock2,
|
||||
chainIDMock1,
|
||||
callMock1,
|
||||
},
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "net_version", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_call", nil),
|
||||
},
|
||||
expectedRes: asArray(netVersionResponse1, chainIDResponse2, chainIDResponse1, callResponse1),
|
||||
maxUpstreamBatchSize: 2,
|
||||
numExpectedForwards: 3,
|
||||
},
|
||||
{
|
||||
name: "over max size",
|
||||
mocks: []mockResult{},
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "net_version", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
NewRPCReq("3", "eth_chainId", nil),
|
||||
NewRPCReq("4", "eth_call", nil),
|
||||
NewRPCReq("5", "eth_call", nil),
|
||||
NewRPCReq("6", "eth_call", nil),
|
||||
},
|
||||
expectedRes: "{\"error\":{\"code\":-32014,\"message\":\"over batch size custom message\"},\"id\":null,\"jsonrpc\":\"2.0\"}",
|
||||
maxUpstreamBatchSize: 2,
|
||||
numExpectedForwards: 0,
|
||||
},
|
||||
{
|
||||
name: "eth_accounts does not get forwarded",
|
||||
mocks: []mockResult{
|
||||
callMock1,
|
||||
},
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "eth_call", nil),
|
||||
NewRPCReq("2", "eth_accounts", nil),
|
||||
},
|
||||
expectedRes: asArray(callResponse1, ethAccountsResponse2),
|
||||
maxUpstreamBatchSize: 2,
|
||||
numExpectedForwards: 1,
|
||||
},
|
||||
{
|
||||
name: "large upstream response gets dropped",
|
||||
mocks: []mockResult{chainIDMock1, chainIDMock2},
|
||||
reqs: []*proxyd.RPCReq{
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
},
|
||||
expectedRes: asArray(backendResTooLargeResponse1, backendResTooLargeResponse2),
|
||||
maxUpstreamBatchSize: 2,
|
||||
numExpectedForwards: 1,
|
||||
maxResponseSizeBytes: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config.Server.MaxUpstreamBatchSize = tt.maxUpstreamBatchSize
|
||||
config.BackendOptions.MaxResponseSizeBytes = tt.maxResponseSizeBytes
|
||||
|
||||
handler := tt.handler
|
||||
if handler == nil {
|
||||
router := NewBatchRPCResponseRouter()
|
||||
for _, mock := range tt.mocks {
|
||||
router.SetRoute(mock.method, mock.id, mock.result)
|
||||
}
|
||||
handler = router
|
||||
}
|
||||
|
||||
goodBackend := NewMockBackend(handler)
|
||||
defer goodBackend.Close()
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
res, statusCode, err := client.SendBatchRPC(tt.reqs...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, statusCode)
|
||||
RequireEqualJSON(t, []byte(tt.expectedRes), res)
|
||||
|
||||
if tt.numExpectedForwards != 0 {
|
||||
require.Equal(t, tt.numExpectedForwards, len(goodBackend.Requests()))
|
||||
}
|
||||
|
||||
if handler, ok := handler.(*BatchRPCResponseRouter); ok {
|
||||
for i, mock := range tt.mocks {
|
||||
require.Equal(t, 1, handler.GetNumCalls(mock.method, mock.id), i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
275
proxyd/integration_tests/caching_test.go
Normal file
275
proxyd/integration_tests/caching_test.go
Normal file
@ -0,0 +1,275 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis"
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCaching(t *testing.T) {
|
||||
redis, err := miniredis.Run()
|
||||
require.NoError(t, err)
|
||||
defer redis.Close()
|
||||
|
||||
hdlr := NewBatchRPCResponseRouter()
|
||||
/* cacheable */
|
||||
hdlr.SetRoute("eth_chainId", "999", "0x420")
|
||||
hdlr.SetRoute("net_version", "999", "0x1234")
|
||||
hdlr.SetRoute("eth_getBlockTransactionCountByHash", "999", "eth_getBlockTransactionCountByHash")
|
||||
hdlr.SetRoute("eth_getBlockByHash", "999", "eth_getBlockByHash")
|
||||
hdlr.SetRoute("eth_getTransactionByHash", "999", "eth_getTransactionByHash")
|
||||
hdlr.SetRoute("eth_getTransactionByBlockHashAndIndex", "999", "eth_getTransactionByBlockHashAndIndex")
|
||||
hdlr.SetRoute("eth_getUncleByBlockHashAndIndex", "999", "eth_getUncleByBlockHashAndIndex")
|
||||
hdlr.SetRoute("eth_getTransactionReceipt", "999", "eth_getTransactionReceipt")
|
||||
hdlr.SetRoute("debug_getRawReceipts", "999", "debug_getRawReceipts")
|
||||
/* not cacheable */
|
||||
hdlr.SetRoute("eth_getBlockByNumber", "999", "eth_getBlockByNumber")
|
||||
hdlr.SetRoute("eth_blockNumber", "999", "eth_blockNumber")
|
||||
hdlr.SetRoute("eth_call", "999", "eth_call")
|
||||
|
||||
backend := NewMockBackend(hdlr)
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())))
|
||||
config := ReadConfig("caching")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
// allow time for the block number fetcher to fire
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
|
||||
tests := []struct {
|
||||
method string
|
||||
params []interface{}
|
||||
response string
|
||||
backendCalls int
|
||||
}{
|
||||
/* cacheable */
|
||||
{
|
||||
"eth_chainId",
|
||||
nil,
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"net_version",
|
||||
nil,
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"eth_getBlockTransactionCountByHash",
|
||||
[]interface{}{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockTransactionCountByHash\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"eth_getBlockByHash",
|
||||
[]interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByHash\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"eth_getTransactionByBlockHashAndIndex",
|
||||
[]interface{}{"0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331", "0x55"},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionByBlockHashAndIndex\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"eth_getUncleByBlockHashAndIndex",
|
||||
[]interface{}{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", "0x90"},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getUncleByBlockHashAndIndex\", \"id\": 999}",
|
||||
1,
|
||||
},
|
||||
/* not cacheable */
|
||||
{
|
||||
"eth_getBlockByNumber",
|
||||
[]interface{}{
|
||||
"0x1",
|
||||
true,
|
||||
},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByNumber\", \"id\": 999}",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"eth_getTransactionReceipt",
|
||||
[]interface{}{"0x85d995eba9763907fdf35cd2034144dd9d53ce32cbec21349d4b12823c6860c5"},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionReceipt\", \"id\": 999}",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"eth_getTransactionByHash",
|
||||
[]interface{}{"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b"},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionByHash\", \"id\": 999}",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"eth_call",
|
||||
[]interface{}{
|
||||
struct {
|
||||
To string `json:"to"`
|
||||
}{
|
||||
"0x1234",
|
||||
},
|
||||
"0x60",
|
||||
},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"eth_blockNumber",
|
||||
nil,
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_blockNumber\", \"id\": 999}",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"eth_call",
|
||||
[]interface{}{
|
||||
struct {
|
||||
To string `json:"to"`
|
||||
}{
|
||||
"0x1234",
|
||||
},
|
||||
"latest",
|
||||
},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"eth_call",
|
||||
[]interface{}{
|
||||
struct {
|
||||
To string `json:"to"`
|
||||
}{
|
||||
"0x1234",
|
||||
},
|
||||
"pending",
|
||||
},
|
||||
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}",
|
||||
2,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.method, func(t *testing.T) {
|
||||
resRaw, _, err := client.SendRPC(tt.method, tt.params)
|
||||
require.NoError(t, err)
|
||||
resCache, _, err := client.SendRPC(tt.method, tt.params)
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(tt.response), resCache)
|
||||
RequireEqualJSON(t, resRaw, resCache)
|
||||
require.Equal(t, tt.backendCalls, countRequests(backend, tt.method))
|
||||
backend.Reset()
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("nil responses should not be cached", func(t *testing.T) {
|
||||
hdlr.SetRoute("eth_getBlockByHash", "999", nil)
|
||||
resRaw, _, err := client.SendRPC("eth_getBlockByHash", []interface{}{"0x123"})
|
||||
require.NoError(t, err)
|
||||
resCache, _, err := client.SendRPC("eth_getBlockByHash", []interface{}{"0x123"})
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":null}"), resRaw)
|
||||
RequireEqualJSON(t, resRaw, resCache)
|
||||
require.Equal(t, 2, countRequests(backend, "eth_getBlockByHash"))
|
||||
})
|
||||
|
||||
t.Run("debug_getRawReceipts with 0 receipts should not be cached", func(t *testing.T) {
|
||||
backend.Reset()
|
||||
hdlr.SetRoute("debug_getRawReceipts", "999", []string{})
|
||||
resRaw, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560ff"})
|
||||
require.NoError(t, err)
|
||||
resCache, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560ff"})
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":[]}"), resRaw)
|
||||
RequireEqualJSON(t, resRaw, resCache)
|
||||
require.Equal(t, 2, countRequests(backend, "debug_getRawReceipts"))
|
||||
})
|
||||
|
||||
t.Run("debug_getRawReceipts with more than 0 receipts should be cached", func(t *testing.T) {
|
||||
backend.Reset()
|
||||
hdlr.SetRoute("debug_getRawReceipts", "999", []string{"a"})
|
||||
resRaw, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560bb"})
|
||||
require.NoError(t, err)
|
||||
resCache, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560bb"})
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":[\"a\"]}"), resRaw)
|
||||
RequireEqualJSON(t, resRaw, resCache)
|
||||
require.Equal(t, 1, countRequests(backend, "debug_getRawReceipts"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBatchCaching(t *testing.T) {
|
||||
redis, err := miniredis.Run()
|
||||
require.NoError(t, err)
|
||||
defer redis.Close()
|
||||
|
||||
hdlr := NewBatchRPCResponseRouter()
|
||||
hdlr.SetRoute("eth_chainId", "1", "0x420")
|
||||
hdlr.SetRoute("net_version", "1", "0x1234")
|
||||
hdlr.SetRoute("eth_call", "1", "dummy_call")
|
||||
hdlr.SetRoute("eth_getBlockByHash", "1", "eth_getBlockByHash")
|
||||
|
||||
backend := NewMockBackend(hdlr)
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())))
|
||||
|
||||
config := ReadConfig("caching")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
// allow time for the block number fetcher to fire
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
|
||||
goodChainIdResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 1}"
|
||||
goodNetVersionResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 1}"
|
||||
goodEthCallResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"dummy_call\", \"id\": 1}"
|
||||
goodEthGetBlockByHash := "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByHash\", \"id\": 1}"
|
||||
|
||||
res, _, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "net_version", nil),
|
||||
NewRPCReq("1", "eth_getBlockByHash", []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(asArray(goodChainIdResponse, goodNetVersionResponse, goodEthGetBlockByHash)), res)
|
||||
require.Equal(t, 1, countRequests(backend, "eth_chainId"))
|
||||
require.Equal(t, 1, countRequests(backend, "net_version"))
|
||||
require.Equal(t, 1, countRequests(backend, "eth_getBlockByHash"))
|
||||
|
||||
backend.Reset()
|
||||
res, _, err = client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_call", []interface{}{`{"to":"0x1234"}`, "pending"}),
|
||||
NewRPCReq("1", "net_version", nil),
|
||||
NewRPCReq("1", "eth_getBlockByHash", []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(asArray(goodChainIdResponse, goodEthCallResponse, goodNetVersionResponse, goodEthGetBlockByHash)), res)
|
||||
require.Equal(t, 0, countRequests(backend, "eth_chainId"))
|
||||
require.Equal(t, 0, countRequests(backend, "net_version"))
|
||||
require.Equal(t, 0, countRequests(backend, "eth_getBlockByHash"))
|
||||
require.Equal(t, 1, countRequests(backend, "eth_call"))
|
||||
}
|
||||
|
||||
func countRequests(backend *MockBackend, name string) int {
|
||||
var count int
|
||||
for _, req := range backend.Requests() {
|
||||
if bytes.Contains(req.Body, []byte(name)) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
1005
proxyd/integration_tests/consensus_test.go
Normal file
1005
proxyd/integration_tests/consensus_test.go
Normal file
File diff suppressed because it is too large
Load Diff
288
proxyd/integration_tests/failover_test.go
Normal file
288
proxyd/integration_tests/failover_test.go
Normal file
@ -0,0 +1,288 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis"
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
goodResponse = `{"jsonrpc": "2.0", "result": "hello", "id": 999}`
|
||||
noBackendsResponse = `{"error":{"code":-32011,"message":"no backends available for method"},"id":999,"jsonrpc":"2.0"}`
|
||||
unexpectedResponse = `{"error":{"code":-32011,"message":"some error"},"id":999,"jsonrpc":"2.0"}`
|
||||
)
|
||||
|
||||
func TestFailover(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
badBackend := NewMockBackend(nil)
|
||||
defer badBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
|
||||
|
||||
config := ReadConfig("failover")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
handler http.Handler
|
||||
}{
|
||||
{
|
||||
"backend responds 200 with non-JSON response",
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
_, _ = w.Write([]byte("this data is not JSON!"))
|
||||
}),
|
||||
},
|
||||
{
|
||||
"backend responds with no body",
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
}),
|
||||
},
|
||||
}
|
||||
codes := []int{
|
||||
300,
|
||||
301,
|
||||
302,
|
||||
401,
|
||||
403,
|
||||
429,
|
||||
500,
|
||||
503,
|
||||
}
|
||||
for _, code := range codes {
|
||||
tests = append(tests, struct {
|
||||
name string
|
||||
handler http.Handler
|
||||
}{
|
||||
fmt.Sprintf("backend %d", code),
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(code)
|
||||
}),
|
||||
})
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
badBackend.SetHandler(tt.handler)
|
||||
res, statusCode, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 1, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
badBackend.Reset()
|
||||
goodBackend.Reset()
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("backend times out and falls back to another", func(t *testing.T) {
|
||||
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(2 * time.Second)
|
||||
_, _ = w.Write([]byte("[{}]"))
|
||||
}))
|
||||
res, statusCode, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 1, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
goodBackend.Reset()
|
||||
badBackend.Reset()
|
||||
})
|
||||
|
||||
t.Run("works with a batch request", func(t *testing.T) {
|
||||
goodBackend.SetHandler(BatchedResponseHandler(200, goodResponse, goodResponse))
|
||||
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(500)
|
||||
}))
|
||||
res, statusCode, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse)), res)
|
||||
require.Equal(t, 1, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
goodBackend.Reset()
|
||||
badBackend.Reset()
|
||||
})
|
||||
}
|
||||
|
||||
func TestRetries(t *testing.T) {
|
||||
backend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
config := ReadConfig("retries")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
attempts := int32(0)
|
||||
backend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
incremented := atomic.AddInt32(&attempts, 1)
|
||||
if incremented != 2 {
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
BatchedResponseHandler(200, goodResponse)(w, r)
|
||||
}))
|
||||
|
||||
// test case where request eventually succeeds
|
||||
res, statusCode, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 2, len(backend.Requests()))
|
||||
|
||||
// test case where it does not
|
||||
backend.Reset()
|
||||
attempts = -10
|
||||
res, statusCode, err = client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 503, statusCode)
|
||||
RequireEqualJSON(t, []byte(noBackendsResponse), res)
|
||||
require.Equal(t, 4, len(backend.Requests()))
|
||||
}
|
||||
|
||||
func TestOutOfServiceInterval(t *testing.T) {
|
||||
okHandler := BatchedResponseHandler(200, goodResponse)
|
||||
goodBackend := NewMockBackend(okHandler)
|
||||
defer goodBackend.Close()
|
||||
badBackend := NewMockBackend(nil)
|
||||
defer badBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
|
||||
|
||||
config := ReadConfig("out_of_service_interval")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(503)
|
||||
}))
|
||||
|
||||
res, statusCode, err := client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 2, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
|
||||
res, statusCode, err = client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 4, len(badBackend.Requests()))
|
||||
require.Equal(t, 2, len(goodBackend.Requests()))
|
||||
|
||||
_, statusCode, err = client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
require.Equal(t, 8, len(badBackend.Requests()))
|
||||
require.Equal(t, 4, len(goodBackend.Requests()))
|
||||
|
||||
time.Sleep(time.Second)
|
||||
badBackend.SetHandler(okHandler)
|
||||
|
||||
res, statusCode, err = client.SendRPC("eth_chainId", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res)
|
||||
require.Equal(t, 9, len(badBackend.Requests()))
|
||||
require.Equal(t, 4, len(goodBackend.Requests()))
|
||||
}
|
||||
|
||||
func TestBatchWithPartialFailover(t *testing.T) {
|
||||
config := ReadConfig("failover")
|
||||
config.Server.MaxUpstreamBatchSize = 2
|
||||
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
badBackend := NewMockBackend(SingleResponseHandler(200, "this data is not JSON!"))
|
||||
defer badBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
|
||||
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
res, statusCode, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
NewRPCReq("3", "eth_chainId", nil),
|
||||
NewRPCReq("4", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse, goodResponse, goodResponse)), res)
|
||||
require.Equal(t, 2, len(badBackend.Requests()))
|
||||
require.Equal(t, 2, len(goodBackend.Requests()))
|
||||
}
|
||||
|
||||
func TestInfuraFailoverOnUnexpectedResponse(t *testing.T) {
|
||||
InitLogger()
|
||||
// Scenario:
|
||||
// 1. Send batch to BAD_BACKEND (Infura)
|
||||
// 2. Infura fails completely due to a partially errorneous batch request (one of N+1 request object is invalid)
|
||||
// 3. Assert that the request batch is re-routed to the failover provider
|
||||
// 4. Assert that BAD_BACKEND is NOT labeled offline
|
||||
// 5. Assert that BAD_BACKEND is NOT retried
|
||||
|
||||
redis, err := miniredis.Run()
|
||||
require.NoError(t, err)
|
||||
defer redis.Close()
|
||||
|
||||
config := ReadConfig("failover")
|
||||
config.Server.MaxUpstreamBatchSize = 2
|
||||
config.BackendOptions.MaxRetries = 2
|
||||
// Setup redis to detect offline backends
|
||||
config.Redis.URL = fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())
|
||||
require.NoError(t, err)
|
||||
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
badBackend := NewMockBackend(SingleResponseHandler(200, unexpectedResponse))
|
||||
defer badBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
|
||||
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
res, statusCode, err := client.SendBatchRPC(
|
||||
NewRPCReq("1", "eth_chainId", nil),
|
||||
NewRPCReq("2", "eth_chainId", nil),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, statusCode)
|
||||
RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse)), res)
|
||||
require.Equal(t, 1, len(badBackend.Requests()))
|
||||
require.Equal(t, 1, len(goodBackend.Requests()))
|
||||
}
|
374
proxyd/integration_tests/fallback_test.go
Normal file
374
proxyd/integration_tests/fallback_test.go
Normal file
@ -0,0 +1,374 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
ms "github.com/ethereum-optimism/optimism/proxyd/tools/mockserver/handler"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func setup_failover(t *testing.T) (map[string]nodeContext, *proxyd.BackendGroup, *ProxydHTTPClient, func(), []time.Time, []time.Time) {
|
||||
// setup mock servers
|
||||
node1 := NewMockBackend(nil)
|
||||
node2 := NewMockBackend(nil)
|
||||
|
||||
dir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
||||
responses := path.Join(dir, "testdata/consensus_responses.yml")
|
||||
|
||||
h1 := ms.MockedHandler{
|
||||
Overrides: []*ms.MethodTemplate{},
|
||||
Autoload: true,
|
||||
AutoloadFile: responses,
|
||||
}
|
||||
h2 := ms.MockedHandler{
|
||||
Overrides: []*ms.MethodTemplate{},
|
||||
Autoload: true,
|
||||
AutoloadFile: responses,
|
||||
}
|
||||
|
||||
require.NoError(t, os.Setenv("NODE1_URL", node1.URL()))
|
||||
require.NoError(t, os.Setenv("NODE2_URL", node2.URL()))
|
||||
|
||||
node1.SetHandler(http.HandlerFunc(h1.Handler))
|
||||
node2.SetHandler(http.HandlerFunc(h2.Handler))
|
||||
|
||||
// setup proxyd
|
||||
config := ReadConfig("fallback")
|
||||
svr, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
// expose the proxyd client
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
|
||||
// expose the backend group
|
||||
bg := svr.BackendGroups["node"]
|
||||
require.NotNil(t, bg)
|
||||
require.NotNil(t, bg.Consensus)
|
||||
require.Equal(t, 2, len(bg.Backends)) // should match config
|
||||
|
||||
// convenient mapping to access the nodes by name
|
||||
nodes := map[string]nodeContext{
|
||||
"normal": {
|
||||
mockBackend: node1,
|
||||
backend: bg.Backends[0],
|
||||
handler: &h1,
|
||||
},
|
||||
"fallback": {
|
||||
mockBackend: node2,
|
||||
backend: bg.Backends[1],
|
||||
handler: &h2,
|
||||
},
|
||||
}
|
||||
normalTimestamps := []time.Time{}
|
||||
fallbackTimestamps := []time.Time{}
|
||||
|
||||
return nodes, bg, client, shutdown, normalTimestamps, fallbackTimestamps
|
||||
}
|
||||
|
||||
func TestFallback(t *testing.T) {
|
||||
nodes, bg, client, shutdown, normalTimestamps, fallbackTimestamps := setup_failover(t)
|
||||
defer nodes["normal"].mockBackend.Close()
|
||||
defer nodes["fallback"].mockBackend.Close()
|
||||
defer shutdown()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Use Update to Advance the Candidate iteration
|
||||
update := func() {
|
||||
for _, be := range bg.Primaries() {
|
||||
bg.Consensus.UpdateBackend(ctx, be)
|
||||
}
|
||||
|
||||
for _, be := range bg.Fallbacks() {
|
||||
healthyCandidates := bg.Consensus.FilterCandidates(bg.Primaries())
|
||||
if len(healthyCandidates) == 0 {
|
||||
bg.Consensus.UpdateBackend(ctx, be)
|
||||
}
|
||||
}
|
||||
|
||||
bg.Consensus.UpdateBackendGroupConsensus(ctx)
|
||||
}
|
||||
|
||||
override := func(node string, method string, block string, response string) {
|
||||
if _, ok := nodes[node]; !ok {
|
||||
t.Fatalf("node %s does not exist in the nodes map", node)
|
||||
}
|
||||
nodes[node].handler.AddOverride(&ms.MethodTemplate{
|
||||
Method: method,
|
||||
Block: block,
|
||||
Response: response,
|
||||
})
|
||||
}
|
||||
|
||||
overrideBlock := func(node string, blockRequest string, blockResponse string) {
|
||||
override(node,
|
||||
"eth_getBlockByNumber",
|
||||
blockRequest,
|
||||
buildResponse(map[string]string{
|
||||
"number": blockResponse,
|
||||
"hash": "hash_" + blockResponse,
|
||||
}))
|
||||
}
|
||||
|
||||
overrideBlockHash := func(node string, blockRequest string, number string, hash string) {
|
||||
override(node,
|
||||
"eth_getBlockByNumber",
|
||||
blockRequest,
|
||||
buildResponse(map[string]string{
|
||||
"number": number,
|
||||
"hash": hash,
|
||||
}))
|
||||
}
|
||||
|
||||
overridePeerCount := func(node string, count int) {
|
||||
override(node, "net_peerCount", "", buildResponse(hexutil.Uint64(count).String()))
|
||||
}
|
||||
|
||||
overrideNotInSync := func(node string) {
|
||||
override(node, "eth_syncing", "", buildResponse(map[string]string{
|
||||
"startingblock": "0x0",
|
||||
"currentblock": "0x0",
|
||||
"highestblock": "0x100",
|
||||
}))
|
||||
}
|
||||
|
||||
containsNode := func(backends []*proxyd.Backend, name string) bool {
|
||||
for _, be := range backends {
|
||||
// Note: Currently checks for name but would like to expose fallback better
|
||||
if be.Name == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: Improvement instead of simple array,
|
||||
// ensure normal and backend are returned in strict order
|
||||
recordLastUpdates := func(backends []*proxyd.Backend) []time.Time {
|
||||
lastUpdated := []time.Time{}
|
||||
for _, be := range backends {
|
||||
lastUpdated = append(lastUpdated, bg.Consensus.GetLastUpdate(be))
|
||||
}
|
||||
return lastUpdated
|
||||
}
|
||||
|
||||
// convenient methods to manipulate state and mock responses
|
||||
reset := func() {
|
||||
for _, node := range nodes {
|
||||
node.handler.ResetOverrides()
|
||||
node.mockBackend.Reset()
|
||||
}
|
||||
bg.Consensus.ClearListeners()
|
||||
bg.Consensus.Reset()
|
||||
|
||||
normalTimestamps = []time.Time{}
|
||||
fallbackTimestamps = []time.Time{}
|
||||
}
|
||||
|
||||
/*
|
||||
triggerFirstNormalFailure: will trigger consensus group into fallback mode
|
||||
old consensus group should be returned one time, and fallback group should be enabled
|
||||
Fallback will be returned subsequent update
|
||||
*/
|
||||
triggerFirstNormalFailure := func() {
|
||||
overridePeerCount("normal", 0)
|
||||
update()
|
||||
require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback"))
|
||||
require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal"))
|
||||
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
|
||||
nodes["fallback"].mockBackend.Reset()
|
||||
}
|
||||
|
||||
t.Run("Test fallback Mode will not be exited, unless state changes", func(t *testing.T) {
|
||||
reset()
|
||||
triggerFirstNormalFailure()
|
||||
for i := 0; i < 10; i++ {
|
||||
update()
|
||||
require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal"))
|
||||
require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback"))
|
||||
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Test Healthy mode will not be exited unless state changes", func(t *testing.T) {
|
||||
reset()
|
||||
for i := 0; i < 10; i++ {
|
||||
update()
|
||||
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
|
||||
require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback"))
|
||||
require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal"))
|
||||
|
||||
_, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x101", false})
|
||||
|
||||
require.Equal(t, 200, statusCode)
|
||||
require.Nil(t, err, "error not nil")
|
||||
require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String())
|
||||
require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String())
|
||||
require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String())
|
||||
}
|
||||
// TODO: Remove these, just here so compiler doesn't complain
|
||||
overrideNotInSync("normal")
|
||||
overrideBlock("normal", "safe", "0xb1")
|
||||
overrideBlockHash("fallback", "0x102", "0x102", "wrong_hash")
|
||||
})
|
||||
|
||||
t.Run("trigger normal failure, subsequent update return failover in consensus group, and fallback mode enabled", func(t *testing.T) {
|
||||
reset()
|
||||
triggerFirstNormalFailure()
|
||||
update()
|
||||
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
|
||||
require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback"))
|
||||
require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal"))
|
||||
})
|
||||
|
||||
t.Run("trigger healthy -> fallback, update -> healthy", func(t *testing.T) {
|
||||
reset()
|
||||
update()
|
||||
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
|
||||
require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal"))
|
||||
require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback"))
|
||||
|
||||
triggerFirstNormalFailure()
|
||||
update()
|
||||
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
|
||||
require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback"))
|
||||
require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal"))
|
||||
|
||||
overridePeerCount("normal", 5)
|
||||
update()
|
||||
require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup()))
|
||||
require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal"))
|
||||
require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback"))
|
||||
})
|
||||
|
||||
t.Run("Ensure fallback is not updated when in normal mode", func(t *testing.T) {
|
||||
reset()
|
||||
for i := 0; i < 10; i++ {
|
||||
update()
|
||||
ts := recordLastUpdates(bg.Backends)
|
||||
normalTimestamps = append(normalTimestamps, ts[0])
|
||||
fallbackTimestamps = append(fallbackTimestamps, ts[1])
|
||||
|
||||
require.False(t, normalTimestamps[i].IsZero())
|
||||
require.True(t, fallbackTimestamps[i].IsZero())
|
||||
|
||||
require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal"))
|
||||
require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback"))
|
||||
|
||||
// consensus at block 0x101
|
||||
require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String())
|
||||
require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String())
|
||||
require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String())
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Set Normal backend to Fail -> both backends should be updated
|
||||
*/
|
||||
t.Run("Ensure both nodes are quieried in fallback mode", func(t *testing.T) {
|
||||
reset()
|
||||
triggerFirstNormalFailure()
|
||||
for i := 0; i < 10; i++ {
|
||||
update()
|
||||
ts := recordLastUpdates(bg.Backends)
|
||||
normalTimestamps = append(normalTimestamps, ts[0])
|
||||
fallbackTimestamps = append(fallbackTimestamps, ts[1])
|
||||
|
||||
// Both Nodes should be updated again
|
||||
require.False(t, normalTimestamps[i].IsZero())
|
||||
require.False(t, fallbackTimestamps[i].IsZero(),
|
||||
fmt.Sprintf("Error: Fallback timestamp: %v was not queried on iteratio %d", fallbackTimestamps[i], i),
|
||||
)
|
||||
if i > 0 {
|
||||
require.Greater(t, normalTimestamps[i], normalTimestamps[i-1])
|
||||
require.Greater(t, fallbackTimestamps[i], fallbackTimestamps[i-1])
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Ensure both nodes are quieried in fallback mode", func(t *testing.T) {
|
||||
reset()
|
||||
triggerFirstNormalFailure()
|
||||
for i := 0; i < 10; i++ {
|
||||
update()
|
||||
ts := recordLastUpdates(bg.Backends)
|
||||
normalTimestamps = append(normalTimestamps, ts[0])
|
||||
fallbackTimestamps = append(fallbackTimestamps, ts[1])
|
||||
|
||||
// Both Nodes should be updated again
|
||||
require.False(t, normalTimestamps[i].IsZero())
|
||||
require.False(t, fallbackTimestamps[i].IsZero(),
|
||||
fmt.Sprintf("Error: Fallback timestamp: %v was not queried on iteratio %d", fallbackTimestamps[i], i),
|
||||
)
|
||||
if i > 0 {
|
||||
require.Greater(t, normalTimestamps[i], normalTimestamps[i-1])
|
||||
require.Greater(t, fallbackTimestamps[i], fallbackTimestamps[i-1])
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("Healthy -> Fallback -> Healthy with timestamps", func(t *testing.T) {
|
||||
reset()
|
||||
for i := 0; i < 10; i++ {
|
||||
update()
|
||||
ts := recordLastUpdates(bg.Backends)
|
||||
normalTimestamps = append(normalTimestamps, ts[0])
|
||||
fallbackTimestamps = append(fallbackTimestamps, ts[1])
|
||||
|
||||
// Normal is queried, fallback is not
|
||||
require.False(t, normalTimestamps[i].IsZero())
|
||||
require.True(t, fallbackTimestamps[i].IsZero(),
|
||||
fmt.Sprintf("Error: Fallback timestamp: %v was not queried on iteratio %d", fallbackTimestamps[i], i),
|
||||
)
|
||||
if i > 0 {
|
||||
require.Greater(t, normalTimestamps[i], normalTimestamps[i-1])
|
||||
// Fallbacks should be zeros
|
||||
require.Equal(t, fallbackTimestamps[i], fallbackTimestamps[i-1])
|
||||
}
|
||||
}
|
||||
|
||||
offset := 10
|
||||
triggerFirstNormalFailure()
|
||||
for i := 0; i < 10; i++ {
|
||||
update()
|
||||
ts := recordLastUpdates(bg.Backends)
|
||||
normalTimestamps = append(normalTimestamps, ts[0])
|
||||
fallbackTimestamps = append(fallbackTimestamps, ts[1])
|
||||
|
||||
// Both Nodes should be updated again
|
||||
require.False(t, normalTimestamps[i+offset].IsZero())
|
||||
require.False(t, fallbackTimestamps[i+offset].IsZero())
|
||||
|
||||
require.Greater(t, normalTimestamps[i+offset], normalTimestamps[i+offset-1])
|
||||
require.Greater(t, fallbackTimestamps[i+offset], fallbackTimestamps[i+offset-1])
|
||||
}
|
||||
|
||||
overridePeerCount("normal", 5)
|
||||
offset = 20
|
||||
for i := 0; i < 10; i++ {
|
||||
update()
|
||||
ts := recordLastUpdates(bg.Backends)
|
||||
normalTimestamps = append(normalTimestamps, ts[0])
|
||||
fallbackTimestamps = append(fallbackTimestamps, ts[1])
|
||||
|
||||
// Normal Node will be updated
|
||||
require.False(t, normalTimestamps[i+offset].IsZero())
|
||||
require.Greater(t, normalTimestamps[i+offset], normalTimestamps[i+offset-1])
|
||||
|
||||
// fallback should not be updating
|
||||
if offset+i > 21 {
|
||||
require.Equal(t, fallbackTimestamps[i+offset], fallbackTimestamps[i+offset-1])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
79
proxyd/integration_tests/max_rpc_conns_test.go
Normal file
79
proxyd/integration_tests/max_rpc_conns_test.go
Normal file
@ -0,0 +1,79 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMaxConcurrentRPCs(t *testing.T) {
|
||||
var (
|
||||
mu sync.Mutex
|
||||
concurrentRPCs int
|
||||
maxConcurrentRPCs int
|
||||
)
|
||||
handler := func(w http.ResponseWriter, r *http.Request) {
|
||||
mu.Lock()
|
||||
concurrentRPCs++
|
||||
if maxConcurrentRPCs < concurrentRPCs {
|
||||
maxConcurrentRPCs = concurrentRPCs
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
time.Sleep(time.Second * 2)
|
||||
BatchedResponseHandler(200, goodResponse)(w, r)
|
||||
|
||||
mu.Lock()
|
||||
concurrentRPCs--
|
||||
mu.Unlock()
|
||||
}
|
||||
// We don't use the MockBackend because it serializes requests to the handler
|
||||
slowBackend := httptest.NewServer(http.HandlerFunc(handler))
|
||||
defer slowBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", slowBackend.URL))
|
||||
|
||||
config := ReadConfig("max_rpc_conns")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
type resWithCodeErr struct {
|
||||
res []byte
|
||||
code int
|
||||
err error
|
||||
}
|
||||
resCh := make(chan *resWithCodeErr)
|
||||
for i := 0; i < 3; i++ {
|
||||
go func() {
|
||||
res, code, err := client.SendRPC("eth_chainId", nil)
|
||||
resCh <- &resWithCodeErr{
|
||||
res: res,
|
||||
code: code,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
}
|
||||
res1 := <-resCh
|
||||
res2 := <-resCh
|
||||
res3 := <-resCh
|
||||
|
||||
require.NoError(t, res1.err)
|
||||
require.NoError(t, res2.err)
|
||||
require.NoError(t, res3.err)
|
||||
require.Equal(t, 200, res1.code)
|
||||
require.Equal(t, 200, res2.code)
|
||||
require.Equal(t, 200, res3.code)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res1.res)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res2.res)
|
||||
RequireEqualJSON(t, []byte(goodResponse), res3.res)
|
||||
|
||||
require.EqualValues(t, 2, maxConcurrentRPCs)
|
||||
}
|
327
proxyd/integration_tests/mock_backend_test.go
Normal file
327
proxyd/integration_tests/mock_backend_test.go
Normal file
@ -0,0 +1,327 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
type RecordedRequest struct {
|
||||
Method string
|
||||
Headers http.Header
|
||||
Body []byte
|
||||
}
|
||||
|
||||
type MockBackend struct {
|
||||
handler http.Handler
|
||||
server *httptest.Server
|
||||
mtx sync.RWMutex
|
||||
requests []*RecordedRequest
|
||||
}
|
||||
|
||||
func SingleResponseHandler(code int, response string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(code)
|
||||
_, _ = w.Write([]byte(response))
|
||||
}
|
||||
}
|
||||
|
||||
func BatchedResponseHandler(code int, responses ...string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if len(responses) == 1 {
|
||||
SingleResponseHandler(code, responses[0])(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
var body string
|
||||
body += "["
|
||||
for i, response := range responses {
|
||||
body += response
|
||||
if i+1 < len(responses) {
|
||||
body += ","
|
||||
}
|
||||
}
|
||||
body += "]"
|
||||
SingleResponseHandler(code, body)(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
type responseMapping struct {
|
||||
result interface{}
|
||||
calls int
|
||||
}
|
||||
type BatchRPCResponseRouter struct {
|
||||
m map[string]map[string]*responseMapping
|
||||
fallback map[string]interface{}
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func NewBatchRPCResponseRouter() *BatchRPCResponseRouter {
|
||||
return &BatchRPCResponseRouter{
|
||||
m: make(map[string]map[string]*responseMapping),
|
||||
fallback: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *BatchRPCResponseRouter) SetRoute(method string, id string, result interface{}) {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
switch result.(type) {
|
||||
case string:
|
||||
case []string:
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
panic("invalid result type")
|
||||
}
|
||||
|
||||
m := h.m[method]
|
||||
if m == nil {
|
||||
m = make(map[string]*responseMapping)
|
||||
}
|
||||
m[id] = &responseMapping{result: result}
|
||||
h.m[method] = m
|
||||
}
|
||||
|
||||
func (h *BatchRPCResponseRouter) SetFallbackRoute(method string, result interface{}) {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
switch result.(type) {
|
||||
case string:
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
panic("invalid result type")
|
||||
}
|
||||
|
||||
h.fallback[method] = result
|
||||
}
|
||||
|
||||
func (h *BatchRPCResponseRouter) GetNumCalls(method string, id string) int {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
if m := h.m[method]; m != nil {
|
||||
if rm := m[id]; rm != nil {
|
||||
return rm.calls
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (h *BatchRPCResponseRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
h.mtx.Lock()
|
||||
defer h.mtx.Unlock()
|
||||
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if proxyd.IsBatch(body) {
|
||||
batch, err := proxyd.ParseBatchRPCReq(body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
out := make([]*proxyd.RPCRes, len(batch))
|
||||
for i := range batch {
|
||||
req, err := proxyd.ParseRPCReq(batch[i])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var result interface{}
|
||||
var resultHasValue bool
|
||||
|
||||
if mappings, exists := h.m[req.Method]; exists {
|
||||
if rm := mappings[string(req.ID)]; rm != nil {
|
||||
result = rm.result
|
||||
resultHasValue = true
|
||||
rm.calls++
|
||||
}
|
||||
}
|
||||
if !resultHasValue {
|
||||
result, resultHasValue = h.fallback[req.Method]
|
||||
}
|
||||
if !resultHasValue {
|
||||
w.WriteHeader(400)
|
||||
return
|
||||
}
|
||||
|
||||
out[i] = &proxyd.RPCRes{
|
||||
JSONRPC: proxyd.JSONRPCVersion,
|
||||
Result: result,
|
||||
ID: req.ID,
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(out); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
req, err := proxyd.ParseRPCReq(body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var result interface{}
|
||||
var resultHasValue bool
|
||||
|
||||
if mappings, exists := h.m[req.Method]; exists {
|
||||
if rm := mappings[string(req.ID)]; rm != nil {
|
||||
result = rm.result
|
||||
resultHasValue = true
|
||||
rm.calls++
|
||||
}
|
||||
}
|
||||
if !resultHasValue {
|
||||
result, resultHasValue = h.fallback[req.Method]
|
||||
}
|
||||
if !resultHasValue {
|
||||
w.WriteHeader(400)
|
||||
return
|
||||
}
|
||||
|
||||
out := &proxyd.RPCRes{
|
||||
JSONRPC: proxyd.JSONRPCVersion,
|
||||
Result: result,
|
||||
ID: req.ID,
|
||||
}
|
||||
enc := json.NewEncoder(w)
|
||||
if err := enc.Encode(out); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func NewMockBackend(handler http.Handler) *MockBackend {
|
||||
mb := &MockBackend{
|
||||
handler: handler,
|
||||
}
|
||||
mb.server = httptest.NewServer(http.HandlerFunc(mb.wrappedHandler))
|
||||
return mb
|
||||
}
|
||||
|
||||
func (m *MockBackend) URL() string {
|
||||
return m.server.URL
|
||||
}
|
||||
|
||||
func (m *MockBackend) Close() {
|
||||
m.server.Close()
|
||||
}
|
||||
|
||||
func (m *MockBackend) SetHandler(handler http.Handler) {
|
||||
m.mtx.Lock()
|
||||
m.handler = handler
|
||||
m.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (m *MockBackend) Reset() {
|
||||
m.mtx.Lock()
|
||||
m.requests = nil
|
||||
m.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (m *MockBackend) Requests() []*RecordedRequest {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
out := make([]*RecordedRequest, len(m.requests))
|
||||
copy(out, m.requests)
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *MockBackend) wrappedHandler(w http.ResponseWriter, r *http.Request) {
|
||||
m.mtx.Lock()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
clone := r.Clone(context.Background())
|
||||
clone.Body = io.NopCloser(bytes.NewReader(body))
|
||||
m.requests = append(m.requests, &RecordedRequest{
|
||||
Method: r.Method,
|
||||
Headers: r.Header.Clone(),
|
||||
Body: body,
|
||||
})
|
||||
m.handler.ServeHTTP(w, clone)
|
||||
m.mtx.Unlock()
|
||||
}
|
||||
|
||||
type MockWSBackend struct {
|
||||
connCB MockWSBackendOnConnect
|
||||
msgCB MockWSBackendOnMessage
|
||||
closeCB MockWSBackendOnClose
|
||||
server *httptest.Server
|
||||
upgrader websocket.Upgrader
|
||||
conns []*websocket.Conn
|
||||
connsMu sync.Mutex
|
||||
}
|
||||
|
||||
type MockWSBackendOnConnect func(conn *websocket.Conn)
|
||||
type MockWSBackendOnMessage func(conn *websocket.Conn, msgType int, data []byte)
|
||||
type MockWSBackendOnClose func(conn *websocket.Conn, err error)
|
||||
|
||||
func NewMockWSBackend(
|
||||
connCB MockWSBackendOnConnect,
|
||||
msgCB MockWSBackendOnMessage,
|
||||
closeCB MockWSBackendOnClose,
|
||||
) *MockWSBackend {
|
||||
mb := &MockWSBackend{
|
||||
connCB: connCB,
|
||||
msgCB: msgCB,
|
||||
closeCB: closeCB,
|
||||
}
|
||||
mb.server = httptest.NewServer(mb)
|
||||
return mb
|
||||
}
|
||||
|
||||
func (m *MockWSBackend) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := m.upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if m.connCB != nil {
|
||||
m.connCB(conn)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
mType, msg, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
if m.closeCB != nil {
|
||||
m.closeCB(conn, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if m.msgCB != nil {
|
||||
m.msgCB(conn, mType, msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
m.connsMu.Lock()
|
||||
m.conns = append(m.conns, conn)
|
||||
m.connsMu.Unlock()
|
||||
}
|
||||
|
||||
func (m *MockWSBackend) URL() string {
|
||||
return strings.Replace(m.server.URL, "http://", "ws://", 1)
|
||||
}
|
||||
|
||||
func (m *MockWSBackend) Close() {
|
||||
m.server.Close()
|
||||
|
||||
m.connsMu.Lock()
|
||||
for _, conn := range m.conns {
|
||||
conn.Close()
|
||||
}
|
||||
m.connsMu.Unlock()
|
||||
}
|
170
proxyd/integration_tests/rate_limit_test.go
Normal file
170
proxyd/integration_tests/rate_limit_test.go
Normal file
@ -0,0 +1,170 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type resWithCode struct {
|
||||
code int
|
||||
res []byte
|
||||
}
|
||||
|
||||
const frontendOverLimitResponse = `{"error":{"code":-32016,"message":"over rate limit with special message"},"id":null,"jsonrpc":"2.0"}`
|
||||
const frontendOverLimitResponseWithID = `{"error":{"code":-32016,"message":"over rate limit with special message"},"id":999,"jsonrpc":"2.0"}`
|
||||
|
||||
var ethChainID = "eth_chainId"
|
||||
|
||||
func TestFrontendMaxRPSLimit(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("frontend_rate_limit")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
t.Run("non-exempt over limit", func(t *testing.T) {
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
limitedRes, codes := spamReqs(t, client, ethChainID, 429, 3)
|
||||
require.Equal(t, 1, codes[429])
|
||||
require.Equal(t, 2, codes[200])
|
||||
RequireEqualJSON(t, []byte(frontendOverLimitResponse), limitedRes)
|
||||
})
|
||||
|
||||
t.Run("exempt user agent over limit", func(t *testing.T) {
|
||||
h := make(http.Header)
|
||||
h.Set("User-Agent", "exempt_agent")
|
||||
client := NewProxydClientWithHeaders("http://127.0.0.1:8545", h)
|
||||
_, codes := spamReqs(t, client, ethChainID, 429, 3)
|
||||
require.Equal(t, 3, codes[200])
|
||||
})
|
||||
|
||||
t.Run("exempt origin over limit", func(t *testing.T) {
|
||||
h := make(http.Header)
|
||||
h.Set("Origin", "exempt_origin")
|
||||
client := NewProxydClientWithHeaders("http://127.0.0.1:8545", h)
|
||||
_, codes := spamReqs(t, client, ethChainID, 429, 3)
|
||||
require.Equal(t, 3, codes[200])
|
||||
})
|
||||
|
||||
t.Run("multiple xff", func(t *testing.T) {
|
||||
h1 := make(http.Header)
|
||||
h1.Set("X-Forwarded-For", "0.0.0.0")
|
||||
h2 := make(http.Header)
|
||||
h2.Set("X-Forwarded-For", "1.1.1.1")
|
||||
client1 := NewProxydClientWithHeaders("http://127.0.0.1:8545", h1)
|
||||
client2 := NewProxydClientWithHeaders("http://127.0.0.1:8545", h2)
|
||||
_, codes := spamReqs(t, client1, ethChainID, 429, 3)
|
||||
require.Equal(t, 1, codes[429])
|
||||
require.Equal(t, 2, codes[200])
|
||||
_, code, err := client2.SendRPC(ethChainID, nil)
|
||||
require.Equal(t, 200, code)
|
||||
require.NoError(t, err)
|
||||
time.Sleep(time.Second)
|
||||
_, code, err = client2.SendRPC(ethChainID, nil)
|
||||
require.Equal(t, 200, code)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
t.Run("RPC override", func(t *testing.T) {
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
limitedRes, codes := spamReqs(t, client, "eth_foobar", 429, 2)
|
||||
// use 2 and 1 here since the limit for eth_foobar is 1
|
||||
require.Equal(t, 1, codes[429])
|
||||
require.Equal(t, 1, codes[200])
|
||||
RequireEqualJSON(t, []byte(frontendOverLimitResponseWithID), limitedRes)
|
||||
})
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
t.Run("RPC override in batch", func(t *testing.T) {
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
req := NewRPCReq("123", "eth_foobar", nil)
|
||||
out, code, err := client.SendBatchRPC(req, req, req)
|
||||
require.NoError(t, err)
|
||||
var res []proxyd.RPCRes
|
||||
require.NoError(t, json.Unmarshal(out, &res))
|
||||
|
||||
expCode := proxyd.ErrOverRateLimit.Code
|
||||
require.Equal(t, 200, code)
|
||||
require.Equal(t, 3, len(res))
|
||||
require.Nil(t, res[0].Error)
|
||||
require.Equal(t, expCode, res[1].Error.Code)
|
||||
require.Equal(t, expCode, res[2].Error.Code)
|
||||
})
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
t.Run("RPC override in batch exempt", func(t *testing.T) {
|
||||
h := make(http.Header)
|
||||
h.Set("User-Agent", "exempt_agent")
|
||||
client := NewProxydClientWithHeaders("http://127.0.0.1:8545", h)
|
||||
req := NewRPCReq("123", "eth_foobar", nil)
|
||||
out, code, err := client.SendBatchRPC(req, req, req)
|
||||
require.NoError(t, err)
|
||||
var res []proxyd.RPCRes
|
||||
require.NoError(t, json.Unmarshal(out, &res))
|
||||
|
||||
require.Equal(t, 200, code)
|
||||
require.Equal(t, 3, len(res))
|
||||
require.Nil(t, res[0].Error)
|
||||
require.Nil(t, res[1].Error)
|
||||
require.Nil(t, res[2].Error)
|
||||
})
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
t.Run("global RPC override", func(t *testing.T) {
|
||||
h := make(http.Header)
|
||||
h.Set("User-Agent", "exempt_agent")
|
||||
client := NewProxydClientWithHeaders("http://127.0.0.1:8545", h)
|
||||
limitedRes, codes := spamReqs(t, client, "eth_baz", 429, 2)
|
||||
// use 1 and 1 here since the limit for eth_baz is 1
|
||||
require.Equal(t, 1, codes[429])
|
||||
require.Equal(t, 1, codes[200])
|
||||
RequireEqualJSON(t, []byte(frontendOverLimitResponseWithID), limitedRes)
|
||||
})
|
||||
}
|
||||
|
||||
func spamReqs(t *testing.T, client *ProxydHTTPClient, method string, limCode int, n int) ([]byte, map[int]int) {
|
||||
resCh := make(chan *resWithCode)
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
res, code, err := client.SendRPC(method, nil)
|
||||
require.NoError(t, err)
|
||||
resCh <- &resWithCode{
|
||||
code: code,
|
||||
res: res,
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
codes := make(map[int]int)
|
||||
var limitedRes []byte
|
||||
for i := 0; i < n; i++ {
|
||||
res := <-resCh
|
||||
code := res.code
|
||||
if codes[code] == 0 {
|
||||
codes[code] = 1
|
||||
} else {
|
||||
codes[code] += 1
|
||||
}
|
||||
|
||||
if code == limCode {
|
||||
limitedRes = res.res
|
||||
}
|
||||
}
|
||||
|
||||
return limitedRes, codes
|
||||
}
|
126
proxyd/integration_tests/sender_rate_limit_test.go
Normal file
126
proxyd/integration_tests/sender_rate_limit_test.go
Normal file
@ -0,0 +1,126 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const txHex1 = "0x02f8b28201a406849502f931849502f931830147f9948f3ddd0fbf3e78ca1d6c" +
|
||||
"d17379ed88e261249b5280b84447e7ef2400000000000000000000000089c8b1" +
|
||||
"b2774201bac50f627403eac1b732459cf7000000000000000000000000000000" +
|
||||
"0000000000000000056bc75e2d63100000c080a0473c95566026c312c9664cd6" +
|
||||
"1145d2f3e759d49209fe96011ac012884ec5b017a0763b58f6fa6096e6ba28ee" +
|
||||
"08bfac58f58fb3b8bcef5af98578bdeaddf40bde42"
|
||||
|
||||
const txHex2 = "0x02f8758201a48217fd84773594008504a817c80082520894be53e587975603" +
|
||||
"a13d0923d0aa6d37c5233dd750865af3107a400080c080a04aefbd5819c35729" +
|
||||
"138fe26b6ae1783ebf08d249b356c2f920345db97877f3f7a008d5ae92560a3c" +
|
||||
"65f723439887205713af7ce7d7f6b24fba198f2afa03435867"
|
||||
|
||||
const dummyRes = `{"id": 123, "jsonrpc": "2.0", "result": "dummy"}`
|
||||
|
||||
const limRes = `{"error":{"code":-32017,"message":"sender is over rate limit"},"id":1,"jsonrpc":"2.0"}`
|
||||
|
||||
func TestSenderRateLimitValidation(t *testing.T) {
|
||||
goodBackend := NewMockBackend(SingleResponseHandler(200, dummyRes))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("sender_rate_limit")
|
||||
|
||||
// Don't perform rate limiting in this test since we're only testing
|
||||
// validation.
|
||||
config.SenderRateLimit.Limit = math.MaxInt
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
f, err := os.Open("testdata/testdata.txt")
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
scanner.Scan() // skip header
|
||||
for scanner.Scan() {
|
||||
record := strings.Split(scanner.Text(), "|")
|
||||
name, body, expResponseBody := record[0], record[1], record[2]
|
||||
require.NoError(t, err)
|
||||
t.Run(name, func(t *testing.T) {
|
||||
res, _, err := client.SendRequest([]byte(body))
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(expResponseBody), res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSenderRateLimitLimiting(t *testing.T) {
|
||||
goodBackend := NewMockBackend(SingleResponseHandler(200, dummyRes))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("sender_rate_limit")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
// Two separate requests from the same sender
|
||||
// should be rate limited.
|
||||
res1, code1, err := client.SendRequest(makeSendRawTransaction(txHex1))
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(dummyRes), res1)
|
||||
require.Equal(t, 200, code1)
|
||||
res2, code2, err := client.SendRequest(makeSendRawTransaction(txHex1))
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(limRes), res2)
|
||||
require.Equal(t, 429, code2)
|
||||
|
||||
// Clear the limiter.
|
||||
time.Sleep(1100 * time.Millisecond)
|
||||
|
||||
// Two separate requests from different senders
|
||||
// should not be rate limited.
|
||||
res1, code1, err = client.SendRequest(makeSendRawTransaction(txHex1))
|
||||
require.NoError(t, err)
|
||||
res2, code2, err = client.SendRequest(makeSendRawTransaction(txHex2))
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(dummyRes), res1)
|
||||
require.Equal(t, 200, code1)
|
||||
RequireEqualJSON(t, []byte(dummyRes), res2)
|
||||
require.Equal(t, 200, code2)
|
||||
|
||||
// Clear the limiter.
|
||||
time.Sleep(1100 * time.Millisecond)
|
||||
|
||||
// A batch request should rate limit within the batch itself.
|
||||
batch := []byte(fmt.Sprintf(
|
||||
`[%s, %s, %s]`,
|
||||
makeSendRawTransaction(txHex1),
|
||||
makeSendRawTransaction(txHex1),
|
||||
makeSendRawTransaction(txHex2),
|
||||
))
|
||||
res, code, err := client.SendRequest(batch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, code)
|
||||
RequireEqualJSON(t, []byte(fmt.Sprintf(
|
||||
`[%s, %s, %s]`,
|
||||
dummyRes,
|
||||
limRes,
|
||||
dummyRes,
|
||||
)), res)
|
||||
}
|
||||
|
||||
func makeSendRawTransaction(dataHex string) []byte {
|
||||
return []byte(`{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["` + dataHex + `"],"id":1}`)
|
||||
}
|
51
proxyd/integration_tests/smoke_test.go
Normal file
51
proxyd/integration_tests/smoke_test.go
Normal file
@ -0,0 +1,51 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInitProxyd(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("smoke")
|
||||
|
||||
sysStdOut := os.Stdout
|
||||
r, w, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
os.Stdout = w
|
||||
|
||||
proxyd.SetLogLevel(log.LevelInfo)
|
||||
|
||||
defer func() {
|
||||
w.Close()
|
||||
out, _ := io.ReadAll(r)
|
||||
require.True(t, strings.Contains(string(out), "started proxyd"))
|
||||
require.True(t, strings.Contains(string(out), "shutting down proxyd"))
|
||||
fmt.Println(string(out))
|
||||
os.Stdout = sysStdOut
|
||||
}()
|
||||
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
t.Run("initialization", func(t *testing.T) {
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
res, code, err := client.SendRPC(ethChainID, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, code)
|
||||
require.NotNil(t, res)
|
||||
})
|
||||
|
||||
}
|
20
proxyd/integration_tests/testdata/batch_timeout.toml
vendored
Normal file
20
proxyd/integration_tests/testdata/batch_timeout.toml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
timeout_seconds = 1
|
||||
max_upstream_batch_size = 1
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_retries = 3
|
||||
|
||||
[backends]
|
||||
[backends.slow]
|
||||
rpc_url = "$SLOW_BACKEND_RPC_URL"
|
||||
ws_url = "$SLOW_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["slow"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
23
proxyd/integration_tests/testdata/batching.toml
vendored
Normal file
23
proxyd/integration_tests/testdata/batching.toml
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
||||
net_version = "main"
|
||||
eth_call = "main"
|
||||
|
||||
[batch]
|
||||
error_message = "over batch size custom message"
|
||||
max_size = 5
|
36
proxyd/integration_tests/testdata/caching.toml
vendored
Normal file
36
proxyd/integration_tests/testdata/caching.toml
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[redis]
|
||||
url = "$REDIS_URL"
|
||||
namespace = "proxyd"
|
||||
|
||||
[cache]
|
||||
enabled = true
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
||||
net_version = "main"
|
||||
eth_getBlockByNumber = "main"
|
||||
eth_blockNumber = "main"
|
||||
eth_call = "main"
|
||||
eth_getBlockTransactionCountByHash = "main"
|
||||
eth_getUncleCountByBlockHash = "main"
|
||||
eth_getBlockByHash = "main"
|
||||
eth_getTransactionByHash = "main"
|
||||
eth_getTransactionByBlockHashAndIndex = "main"
|
||||
eth_getUncleByBlockHashAndIndex = "main"
|
||||
eth_getTransactionReceipt = "main"
|
||||
debug_getRawReceipts = "main"
|
30
proxyd/integration_tests/testdata/consensus.toml
vendored
Normal file
30
proxyd/integration_tests/testdata/consensus.toml
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_degraded_latency_threshold = "30ms"
|
||||
|
||||
[backends]
|
||||
[backends.node1]
|
||||
rpc_url = "$NODE1_URL"
|
||||
|
||||
[backends.node2]
|
||||
rpc_url = "$NODE2_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.node]
|
||||
backends = ["node1", "node2"]
|
||||
consensus_aware = true
|
||||
consensus_handler = "noop" # allow more control over the consensus poller for tests
|
||||
consensus_ban_period = "1m"
|
||||
consensus_max_update_threshold = "2m"
|
||||
consensus_max_block_lag = 8
|
||||
consensus_min_peer_count = 4
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_call = "node"
|
||||
eth_chainId = "node"
|
||||
eth_blockNumber = "node"
|
||||
eth_getBlockByNumber = "node"
|
||||
consensus_getReceipts = "node"
|
234
proxyd/integration_tests/testdata/consensus_responses.yml
vendored
Normal file
234
proxyd/integration_tests/testdata/consensus_responses.yml
vendored
Normal file
@ -0,0 +1,234 @@
|
||||
- method: eth_chainId
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": "hello",
|
||||
}
|
||||
- method: net_peerCount
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": "0x10"
|
||||
}
|
||||
- method: eth_syncing
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": false
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: latest
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x101",
|
||||
"number": "0x101"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x101
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x101",
|
||||
"number": "0x101"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x102
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x102",
|
||||
"number": "0x102"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x103
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x103",
|
||||
"number": "0x103"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x10a
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x10a",
|
||||
"number": "0x10a"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x132
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x132",
|
||||
"number": "0x132"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x133
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x133",
|
||||
"number": "0x133"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x134
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x134",
|
||||
"number": "0x134"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x200
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x200",
|
||||
"number": "0x200"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x91
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0x91",
|
||||
"number": "0x91"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: safe
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0xe1",
|
||||
"number": "0xe1"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0xe1
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0xe1",
|
||||
"number": "0xe1"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: finalized
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0xc1",
|
||||
"number": "0xc1"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0xc1
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0xc1",
|
||||
"number": "0xc1"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0xd1
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash_0xd1",
|
||||
"number": "0xd1"
|
||||
}
|
||||
}
|
||||
- method: debug_getRawReceipts
|
||||
block: 0x55
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"_": "debug_getRawReceipts"
|
||||
}
|
||||
}
|
||||
- method: debug_getRawReceipts
|
||||
block: 0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"_": "debug_getRawReceipts"
|
||||
}
|
||||
}
|
||||
- method: debug_getRawReceipts
|
||||
block: 0x101
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"_": "debug_getRawReceipts"
|
||||
}
|
||||
}
|
||||
- method: eth_getTransactionReceipt
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"_": "eth_getTransactionReceipt"
|
||||
}
|
||||
}
|
||||
- method: alchemy_getTransactionReceipts
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"_": "alchemy_getTransactionReceipts"
|
||||
}
|
||||
}
|
20
proxyd/integration_tests/testdata/failover.toml
vendored
Normal file
20
proxyd/integration_tests/testdata/failover.toml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
[backends.bad]
|
||||
rpc_url = "$BAD_BACKEND_RPC_URL"
|
||||
ws_url = "$BAD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["bad", "good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
31
proxyd/integration_tests/testdata/fallback.toml
vendored
Normal file
31
proxyd/integration_tests/testdata/fallback.toml
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_degraded_latency_threshold = "30ms"
|
||||
|
||||
[backends]
|
||||
[backends.normal]
|
||||
rpc_url = "$NODE1_URL"
|
||||
|
||||
[backends.fallback]
|
||||
rpc_url = "$NODE2_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.node]
|
||||
backends = ["normal", "fallback"]
|
||||
consensus_aware = true
|
||||
consensus_handler = "noop" # allow more control over the consensus poller for tests
|
||||
consensus_ban_period = "1m"
|
||||
consensus_max_update_threshold = "2m"
|
||||
consensus_max_block_lag = 8
|
||||
consensus_min_peer_count = 4
|
||||
fallbacks = ["fallback"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_call = "node"
|
||||
eth_chainId = "node"
|
||||
eth_blockNumber = "node"
|
||||
eth_getBlockByNumber = "node"
|
||||
consensus_getReceipts = "node"
|
35
proxyd/integration_tests/testdata/frontend_rate_limit.toml
vendored
Normal file
35
proxyd/integration_tests/testdata/frontend_rate_limit.toml
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
||||
eth_foobar = "main"
|
||||
eth_baz = "main"
|
||||
|
||||
[rate_limit]
|
||||
base_rate = 2
|
||||
base_interval = "1s"
|
||||
exempt_origins = ["exempt_origin"]
|
||||
exempt_user_agents = ["exempt_agent"]
|
||||
error_message = "over rate limit with special message"
|
||||
|
||||
[rate_limit.method_overrides.eth_foobar]
|
||||
limit = 1
|
||||
interval = "1s"
|
||||
|
||||
[rate_limit.method_overrides.eth_baz]
|
||||
limit = 1
|
||||
interval = "1s"
|
||||
global = true
|
19
proxyd/integration_tests/testdata/max_rpc_conns.toml
vendored
Normal file
19
proxyd/integration_tests/testdata/max_rpc_conns.toml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
max_concurrent_rpcs = 2
|
||||
|
||||
[backend]
|
||||
# this should cover blocked requests due to max_concurrent_rpcs
|
||||
response_timeout_seconds = 12
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
22
proxyd/integration_tests/testdata/out_of_service_interval.toml
vendored
Normal file
22
proxyd/integration_tests/testdata/out_of_service_interval.toml
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_retries = 1
|
||||
out_of_service_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
[backends.bad]
|
||||
rpc_url = "$BAD_BACKEND_RPC_URL"
|
||||
ws_url = "$BAD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["bad", "good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
18
proxyd/integration_tests/testdata/retries.toml
vendored
Normal file
18
proxyd/integration_tests/testdata/retries.toml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_retries = 3
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
24
proxyd/integration_tests/testdata/sender_rate_limit.toml
vendored
Normal file
24
proxyd/integration_tests/testdata/sender_rate_limit.toml
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
||||
eth_sendRawTransaction = "main"
|
||||
|
||||
[sender_rate_limit]
|
||||
allowed_chain_ids = [0, 420] # adding 0 allows pre-EIP-155 transactions
|
||||
enabled = true
|
||||
interval = "1s"
|
||||
limit = 1
|
21
proxyd/integration_tests/testdata/size_limits.toml
vendored
Normal file
21
proxyd/integration_tests/testdata/size_limits.toml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
whitelist_error_message = "rpc method is not whitelisted custom message"
|
||||
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
max_request_body_size_bytes = 150
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
max_response_size_bytes = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
18
proxyd/integration_tests/testdata/smoke.toml
vendored
Normal file
18
proxyd/integration_tests/testdata/smoke.toml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
||||
|
14
proxyd/integration_tests/testdata/testdata.txt
vendored
Normal file
14
proxyd/integration_tests/testdata/testdata.txt
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
name|body|responseBody
|
||||
not json|not json|{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}
|
||||
not json-rpc|{"foo":"bar"}|{"jsonrpc":"2.0","error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null}
|
||||
missing fields json-rpc|{"jsonrpc":"2.0"}|{"jsonrpc":"2.0","error":{"code":-32600,"message":"no method specified"},"id":null}
|
||||
bad method json-rpc|{"jsonrpc":"2.0","method":"eth_notSendRawTransaction","id":1}|{"jsonrpc":"2.0","error":{"code":-32601,"message":"rpc method is not whitelisted"},"id":1}
|
||||
no transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":[],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"missing value for required argument 0"},"id":1}
|
||||
invalid transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0xf6806872fcc650ad4e77e0629206426cd183d751e9ddcc8d5e77"],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"rlp: value size exceeds available input length"},"id":1}
|
||||
invalid transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x1234"],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"transaction type not supported"},"id":1}
|
||||
valid transaction data - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8748201a415843b9aca31843b9aca3182520894f80267194936da1e98db10bce06f3147d580a62e880de0b6b3a764000080c001a0b50ee053102360ff5fedf0933b912b7e140c90fe57fa07a0cebe70dbd72339dda072974cb7bfe5c3dc54dde110e2b049408ccab8a879949c3b4d42a3a7555a618b"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"}
|
||||
valid transaction data - contract call|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8b28201a406849502f931849502f931830147f9948f3ddd0fbf3e78ca1d6cd17379ed88e261249b5280b84447e7ef2400000000000000000000000089c8b1b2774201bac50f627403eac1b732459cf70000000000000000000000000000000000000000000000056bc75e2d63100000c080a0473c95566026c312c9664cd61145d2f3e759d49209fe96011ac012884ec5b017a0763b58f6fa6096e6ba28ee08bfac58f58fb3b8bcef5af98578bdeaddf40bde42"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"}
|
||||
valid chain id - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8748201a415843b9aca31843b9aca3182520894f80267194936da1e98db10bce06f3147d580a62e880de0b6b3a764000080c001a0b50ee053102360ff5fedf0933b912b7e140c90fe57fa07a0cebe70dbd72339dda072974cb7bfe5c3dc54dde110e2b049408ccab8a879949c3b4d42a3a7555a618b"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"}
|
||||
invalid chain id - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f87683ab41308217af84773594008504a817c80082520894be53e587975603a13d0923d0aa6d37c5233dd750865af3107a400080c001a04ae265f17e882b922d39f0f0cb058a6378df1dc89da8b8165ab6bc53851b426aa0682079486be2aa23bc7514477473362cc7d63afa12c99f7d8fb15e68d69d9a48"],"id":1}|{"jsonrpc":"2.0","error":{"code":-32000,"message":"invalid sender"},"id":1}
|
||||
no chain id (pre eip-155) - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0xf865808609184e72a00082271094000000000000000000000000000000000000000001001ba0d937ddb66e7788f917864b8e6974cac376b091154db1c25ff8429a6e61016e74a054ced39349e7658b7efceccfabc461e02418eb510124377949cfae8ccf1831af"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"}
|
||||
batch with mixed results|[{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f87683ab41308217af84773594008504a817c80082520894be53e587975603a13d0923d0aa6d37c5233dd750865af3107a400080c001a04ae265f17e882b922d39f0f0cb058a6378df1dc89da8b8165ab6bc53851b426aa0682079486be2aa23bc7514477473362cc7d63afa12c99f7d8fb15e68d69d9a48"],"id":1},{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8748201a415843b9aca31843b9aca3182520894f80267194936da1e98db10bce06f3147d580a62e880de0b6b3a764000080c001a0b50ee053102360ff5fedf0933b912b7e140c90fe57fa07a0cebe70dbd72339dda072974cb7bfe5c3dc54dde110e2b049408ccab8a879949c3b4d42a3a7555a618b"],"id":1},{"bad":"json"},{"jsonrpc":"2.0","method":"eth_fooTheBar","params":[],"id":123}]|[{"jsonrpc":"2.0","error":{"code":-32000,"message":"invalid sender"},"id":1},{"id": 123, "jsonrpc": "2.0", "result": "dummy"},{"jsonrpc":"2.0","error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null},{"jsonrpc":"2.0","error":{"code":-32601,"message":"rpc method is not whitelisted"},"id":123}]
|
19
proxyd/integration_tests/testdata/whitelist.toml
vendored
Normal file
19
proxyd/integration_tests/testdata/whitelist.toml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
whitelist_error_message = "rpc method is not whitelisted custom message"
|
||||
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
28
proxyd/integration_tests/testdata/ws.toml
vendored
Normal file
28
proxyd/integration_tests/testdata/ws.toml
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
whitelist_error_message = "rpc method is not whitelisted"
|
||||
|
||||
ws_backend_group = "main"
|
||||
|
||||
ws_method_whitelist = [
|
||||
"eth_subscribe",
|
||||
"eth_accounts"
|
||||
]
|
||||
|
||||
[server]
|
||||
rpc_port = 8545
|
||||
ws_port = 8546
|
||||
|
||||
[backend]
|
||||
response_timeout_seconds = 1
|
||||
|
||||
[backends]
|
||||
[backends.good]
|
||||
rpc_url = "$GOOD_BACKEND_RPC_URL"
|
||||
ws_url = "$GOOD_BACKEND_RPC_URL"
|
||||
max_ws_conns = 1
|
||||
|
||||
[backend_groups]
|
||||
[backend_groups.main]
|
||||
backends = ["good"]
|
||||
|
||||
[rpc_method_mappings]
|
||||
eth_chainId = "main"
|
191
proxyd/integration_tests/util_test.go
Normal file
191
proxyd/integration_tests/util_test.go
Normal file
@ -0,0 +1,191 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slog"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
)
|
||||
|
||||
type ProxydHTTPClient struct {
|
||||
url string
|
||||
headers http.Header
|
||||
}
|
||||
|
||||
func NewProxydClient(url string) *ProxydHTTPClient {
|
||||
return NewProxydClientWithHeaders(url, make(http.Header))
|
||||
}
|
||||
|
||||
func NewProxydClientWithHeaders(url string, headers http.Header) *ProxydHTTPClient {
|
||||
clonedHeaders := headers.Clone()
|
||||
clonedHeaders.Set("Content-Type", "application/json")
|
||||
return &ProxydHTTPClient{
|
||||
url: url,
|
||||
headers: clonedHeaders,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxydHTTPClient) SendRPC(method string, params []interface{}) ([]byte, int, error) {
|
||||
rpcReq := NewRPCReq("999", method, params)
|
||||
body, err := json.Marshal(rpcReq)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return p.SendRequest(body)
|
||||
}
|
||||
|
||||
func (p *ProxydHTTPClient) SendBatchRPC(reqs ...*proxyd.RPCReq) ([]byte, int, error) {
|
||||
body, err := json.Marshal(reqs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return p.SendRequest(body)
|
||||
}
|
||||
|
||||
func (p *ProxydHTTPClient) SendRequest(body []byte) ([]byte, int, error) {
|
||||
req, err := http.NewRequest("POST", p.url, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
req.Header = p.headers
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
code := res.StatusCode
|
||||
resBody, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return resBody, code, nil
|
||||
}
|
||||
|
||||
func RequireEqualJSON(t *testing.T, expected []byte, actual []byte) {
|
||||
expJSON := canonicalizeJSON(t, expected)
|
||||
actJSON := canonicalizeJSON(t, actual)
|
||||
require.Equal(t, string(expJSON), string(actJSON))
|
||||
}
|
||||
|
||||
func canonicalizeJSON(t *testing.T, in []byte) []byte {
|
||||
var any interface{}
|
||||
if in[0] == '[' {
|
||||
any = make([]interface{}, 0)
|
||||
} else {
|
||||
any = make(map[string]interface{})
|
||||
}
|
||||
|
||||
err := json.Unmarshal(in, &any)
|
||||
require.NoError(t, err)
|
||||
out, err := json.Marshal(any)
|
||||
require.NoError(t, err)
|
||||
return out
|
||||
}
|
||||
|
||||
func ReadConfig(name string) *proxyd.Config {
|
||||
config := new(proxyd.Config)
|
||||
_, err := toml.DecodeFile(fmt.Sprintf("testdata/%s.toml", name), config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func NewRPCReq(id string, method string, params []interface{}) *proxyd.RPCReq {
|
||||
jsonParams, err := json.Marshal(params)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &proxyd.RPCReq{
|
||||
JSONRPC: proxyd.JSONRPCVersion,
|
||||
Method: method,
|
||||
Params: jsonParams,
|
||||
ID: []byte(id),
|
||||
}
|
||||
}
|
||||
|
||||
type ProxydWSClient struct {
|
||||
conn *websocket.Conn
|
||||
msgCB ProxydWSClientOnMessage
|
||||
closeCB ProxydWSClientOnClose
|
||||
}
|
||||
|
||||
type WSMessage struct {
|
||||
Type int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
type (
|
||||
ProxydWSClientOnMessage func(msgType int, data []byte)
|
||||
ProxydWSClientOnClose func(err error)
|
||||
)
|
||||
|
||||
func NewProxydWSClient(
|
||||
url string,
|
||||
msgCB ProxydWSClientOnMessage,
|
||||
closeCB ProxydWSClientOnClose,
|
||||
) (*ProxydWSClient, error) {
|
||||
conn, _, err := websocket.DefaultDialer.Dial(url, nil) // nolint:bodyclose
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &ProxydWSClient{
|
||||
conn: conn,
|
||||
msgCB: msgCB,
|
||||
closeCB: closeCB,
|
||||
}
|
||||
go c.readPump()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (h *ProxydWSClient) readPump() {
|
||||
for {
|
||||
mType, msg, err := h.conn.ReadMessage()
|
||||
if err != nil {
|
||||
if h.closeCB != nil {
|
||||
h.closeCB(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if h.msgCB != nil {
|
||||
h.msgCB(mType, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ProxydWSClient) HardClose() {
|
||||
h.conn.Close()
|
||||
}
|
||||
|
||||
func (h *ProxydWSClient) SoftClose() error {
|
||||
return h.WriteMessage(websocket.CloseMessage, nil)
|
||||
}
|
||||
|
||||
func (h *ProxydWSClient) WriteMessage(msgType int, msg []byte) error {
|
||||
return h.conn.WriteMessage(msgType, msg)
|
||||
}
|
||||
|
||||
func (h *ProxydWSClient) WriteControlMessage(msgType int, msg []byte) error {
|
||||
return h.conn.WriteControl(msgType, msg, time.Now().Add(time.Minute))
|
||||
}
|
||||
|
||||
func InitLogger() {
|
||||
log.SetDefault(log.NewLogger(slog.NewJSONHandler(
|
||||
os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})))
|
||||
}
|
258
proxyd/integration_tests/validation_test.go
Normal file
258
proxyd/integration_tests/validation_test.go
Normal file
@ -0,0 +1,258 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
notWhitelistedResponse = `{"jsonrpc":"2.0","error":{"code":-32601,"message":"rpc method is not whitelisted custom message"},"id":999}`
|
||||
parseErrResponse = `{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}`
|
||||
invalidJSONRPCVersionResponse = `{"error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null,"jsonrpc":"2.0"}`
|
||||
invalidIDResponse = `{"error":{"code":-32600,"message":"invalid ID"},"id":null,"jsonrpc":"2.0"}`
|
||||
invalidMethodResponse = `{"error":{"code":-32600,"message":"no method specified"},"id":null,"jsonrpc":"2.0"}`
|
||||
invalidBatchLenResponse = `{"error":{"code":-32600,"message":"must specify at least one batch call"},"id":null,"jsonrpc":"2.0"}`
|
||||
)
|
||||
|
||||
func TestSingleRPCValidation(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("whitelist")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
body string
|
||||
res string
|
||||
code int
|
||||
}{
|
||||
{
|
||||
"body not JSON",
|
||||
"this ain't an RPC call",
|
||||
parseErrResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"body not RPC",
|
||||
"{\"not\": \"rpc\"}",
|
||||
invalidJSONRPCVersionResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"body missing RPC ID",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}",
|
||||
invalidIDResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"body has array ID",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}",
|
||||
invalidIDResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"body has object ID",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}",
|
||||
invalidIDResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"bad method",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": 7, \"params\": [42, 23], \"id\": 1}",
|
||||
parseErrResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"bad JSON-RPC",
|
||||
"{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}",
|
||||
invalidJSONRPCVersionResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"omitted method",
|
||||
"{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}",
|
||||
invalidMethodResponse,
|
||||
400,
|
||||
},
|
||||
{
|
||||
"not whitelisted method",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}",
|
||||
notWhitelistedResponse,
|
||||
403,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res, code, err := client.SendRequest([]byte(tt.body))
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(tt.res), res)
|
||||
require.Equal(t, tt.code, code)
|
||||
require.Equal(t, 0, len(goodBackend.Requests()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchRPCValidation(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("whitelist")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
body string
|
||||
res string
|
||||
code int
|
||||
reqCount int
|
||||
}{
|
||||
{
|
||||
"empty batch",
|
||||
"[]",
|
||||
invalidBatchLenResponse,
|
||||
400,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"bad json",
|
||||
"[{,]",
|
||||
parseErrResponse,
|
||||
400,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"not object in batch",
|
||||
"[123]",
|
||||
asArray(parseErrResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"body not RPC",
|
||||
"[{\"not\": \"rpc\"}]",
|
||||
asArray(invalidJSONRPCVersionResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"body missing RPC ID",
|
||||
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}]",
|
||||
asArray(invalidIDResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"body has array ID",
|
||||
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}]",
|
||||
asArray(invalidIDResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"body has object ID",
|
||||
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}]",
|
||||
asArray(invalidIDResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
// this happens because we can't deserialize the method into a non
|
||||
// string value, and it blows up the parsing for the whole request.
|
||||
{
|
||||
"bad method",
|
||||
"[{\"error\":{\"code\":-32600,\"message\":\"invalid request\"},\"id\":null,\"jsonrpc\":\"2.0\"}]",
|
||||
asArray(invalidMethodResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"bad JSON-RPC",
|
||||
"[{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}]",
|
||||
asArray(invalidJSONRPCVersionResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"omitted method",
|
||||
"[{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}]",
|
||||
asArray(invalidMethodResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"not whitelisted method",
|
||||
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}]",
|
||||
asArray(notWhitelistedResponse),
|
||||
200,
|
||||
0,
|
||||
},
|
||||
{
|
||||
"mixed",
|
||||
asArray(
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"eth_chainId\", \"params\": [], \"id\": 123}",
|
||||
"123",
|
||||
),
|
||||
asArray(
|
||||
notWhitelistedResponse,
|
||||
goodResponse,
|
||||
parseErrResponse,
|
||||
),
|
||||
200,
|
||||
1,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res, code, err := client.SendRequest([]byte(tt.body))
|
||||
require.NoError(t, err)
|
||||
RequireEqualJSON(t, []byte(tt.res), res)
|
||||
require.Equal(t, tt.code, code)
|
||||
require.Equal(t, tt.reqCount, len(goodBackend.Requests()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeLimits(t *testing.T) {
|
||||
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
|
||||
defer goodBackend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
|
||||
|
||||
config := ReadConfig("size_limits")
|
||||
client := NewProxydClient("http://127.0.0.1:8545")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
payload := strings.Repeat("barf", 1024*1024)
|
||||
out, code, err := client.SendRequest([]byte(fmt.Sprintf(`{"jsonrpc": "2.0", "method": "eth_chainId", "params": [%s], "id": 1}`, payload)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, `{"jsonrpc":"2.0","error":{"code":-32021,"message":"request body too large"},"id":null}`, strings.TrimSpace(string(out)))
|
||||
require.Equal(t, 413, code)
|
||||
|
||||
// The default response is already over the size limit in size_limits.toml.
|
||||
out, code, err = client.SendRequest([]byte(`{"jsonrpc": "2.0", "method": "eth_chainId", "params": [], "id": 1}`))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, `{"jsonrpc":"2.0","error":{"code":-32020,"message":"backend response too large"},"id":1}`, strings.TrimSpace(string(out)))
|
||||
require.Equal(t, 500, code)
|
||||
}
|
||||
|
||||
func asArray(in ...string) string {
|
||||
return "[" + strings.Join(in, ",") + "]"
|
||||
}
|
241
proxyd/integration_tests/ws_test.go
Normal file
241
proxyd/integration_tests/ws_test.go
Normal file
@ -0,0 +1,241 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type backendHandler struct {
|
||||
msgCB atomic.Value
|
||||
closeCB atomic.Value
|
||||
}
|
||||
|
||||
func (b *backendHandler) MsgCB(conn *websocket.Conn, msgType int, data []byte) {
|
||||
cb := b.msgCB.Load()
|
||||
if cb == nil {
|
||||
return
|
||||
}
|
||||
cb.(MockWSBackendOnMessage)(conn, msgType, data)
|
||||
}
|
||||
|
||||
func (b *backendHandler) SetMsgCB(cb MockWSBackendOnMessage) {
|
||||
b.msgCB.Store(cb)
|
||||
}
|
||||
|
||||
func (b *backendHandler) CloseCB(conn *websocket.Conn, err error) {
|
||||
cb := b.closeCB.Load()
|
||||
if cb == nil {
|
||||
return
|
||||
}
|
||||
cb.(MockWSBackendOnClose)(conn, err)
|
||||
}
|
||||
|
||||
func (b *backendHandler) SetCloseCB(cb MockWSBackendOnClose) {
|
||||
b.closeCB.Store(cb)
|
||||
}
|
||||
|
||||
type clientHandler struct {
|
||||
msgCB atomic.Value
|
||||
}
|
||||
|
||||
func (c *clientHandler) MsgCB(msgType int, data []byte) {
|
||||
cb := c.msgCB.Load().(ProxydWSClientOnMessage)
|
||||
if cb == nil {
|
||||
return
|
||||
}
|
||||
cb(msgType, data)
|
||||
}
|
||||
|
||||
func (c *clientHandler) SetMsgCB(cb ProxydWSClientOnMessage) {
|
||||
c.msgCB.Store(cb)
|
||||
}
|
||||
|
||||
func TestWS(t *testing.T) {
|
||||
backendHdlr := new(backendHandler)
|
||||
clientHdlr := new(clientHandler)
|
||||
|
||||
backend := NewMockWSBackend(nil, func(conn *websocket.Conn, msgType int, data []byte) {
|
||||
backendHdlr.MsgCB(conn, msgType, data)
|
||||
}, func(conn *websocket.Conn, err error) {
|
||||
backendHdlr.CloseCB(conn, err)
|
||||
})
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
|
||||
config := ReadConfig("ws")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
client, err := NewProxydWSClient("ws://127.0.0.1:8546", func(msgType int, data []byte) {
|
||||
clientHdlr.MsgCB(msgType, data)
|
||||
}, nil)
|
||||
defer client.HardClose()
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
backendRes string
|
||||
expRes string
|
||||
clientReq string
|
||||
}{
|
||||
{
|
||||
"ok response",
|
||||
"{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":\"0xcd0c3e8af590364c09d0fa6a1210faf5\"}",
|
||||
"{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":\"0xcd0c3e8af590364c09d0fa6a1210faf5\"}",
|
||||
"{\"id\": 1, \"method\": \"eth_subscribe\", \"params\": [\"newHeads\"]}",
|
||||
},
|
||||
{
|
||||
"garbage backend response",
|
||||
"gibblegabble",
|
||||
"{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32013,\"message\":\"backend returned an invalid response\"},\"id\":null}",
|
||||
"{\"id\": 1, \"method\": \"eth_subscribe\", \"params\": [\"newHeads\"]}",
|
||||
},
|
||||
{
|
||||
"blacklisted RPC",
|
||||
"}",
|
||||
"{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32601,\"message\":\"rpc method is not whitelisted\"},\"id\":1}",
|
||||
"{\"id\": 1, \"method\": \"eth_whatever\", \"params\": []}",
|
||||
},
|
||||
{
|
||||
"garbage client request",
|
||||
"{}",
|
||||
"{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32700,\"message\":\"parse error\"},\"id\":null}",
|
||||
"barf",
|
||||
},
|
||||
{
|
||||
"invalid client request",
|
||||
"{}",
|
||||
"{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32700,\"message\":\"parse error\"},\"id\":null}",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": true}",
|
||||
},
|
||||
{
|
||||
"eth_accounts",
|
||||
"{}",
|
||||
"{\"jsonrpc\":\"2.0\",\"result\":[],\"id\":1}",
|
||||
"{\"jsonrpc\": \"2.0\", \"method\": \"eth_accounts\", \"id\": 1}",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
timeout := time.NewTicker(10 * time.Second)
|
||||
doneCh := make(chan struct{}, 1)
|
||||
backendHdlr.SetMsgCB(func(conn *websocket.Conn, msgType int, data []byte) {
|
||||
require.NoError(t, conn.WriteMessage(websocket.TextMessage, []byte(tt.backendRes)))
|
||||
})
|
||||
clientHdlr.SetMsgCB(func(msgType int, data []byte) {
|
||||
require.Equal(t, tt.expRes, string(data))
|
||||
doneCh <- struct{}{}
|
||||
})
|
||||
require.NoError(t, client.WriteMessage(
|
||||
websocket.TextMessage,
|
||||
[]byte(tt.clientReq),
|
||||
))
|
||||
select {
|
||||
case <-timeout.C:
|
||||
t.Fatalf("timed out")
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWSClientClosure(t *testing.T) {
|
||||
backendHdlr := new(backendHandler)
|
||||
clientHdlr := new(clientHandler)
|
||||
|
||||
backend := NewMockWSBackend(nil, func(conn *websocket.Conn, msgType int, data []byte) {
|
||||
backendHdlr.MsgCB(conn, msgType, data)
|
||||
}, func(conn *websocket.Conn, err error) {
|
||||
backendHdlr.CloseCB(conn, err)
|
||||
})
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
|
||||
config := ReadConfig("ws")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
for _, closeType := range []string{"soft", "hard"} {
|
||||
t.Run(closeType, func(t *testing.T) {
|
||||
client, err := NewProxydWSClient("ws://127.0.0.1:8546", func(msgType int, data []byte) {
|
||||
clientHdlr.MsgCB(msgType, data)
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
timeout := time.NewTicker(30 * time.Second)
|
||||
doneCh := make(chan struct{}, 1)
|
||||
backendHdlr.SetCloseCB(func(conn *websocket.Conn, err error) {
|
||||
doneCh <- struct{}{}
|
||||
})
|
||||
|
||||
if closeType == "soft" {
|
||||
require.NoError(t, client.SoftClose())
|
||||
} else {
|
||||
client.HardClose()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-timeout.C:
|
||||
t.Fatalf("timed out")
|
||||
case <-doneCh:
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWSClientExceedReadLimit(t *testing.T) {
|
||||
backendHdlr := new(backendHandler)
|
||||
clientHdlr := new(clientHandler)
|
||||
|
||||
backend := NewMockWSBackend(nil, func(conn *websocket.Conn, msgType int, data []byte) {
|
||||
backendHdlr.MsgCB(conn, msgType, data)
|
||||
}, func(conn *websocket.Conn, err error) {
|
||||
backendHdlr.CloseCB(conn, err)
|
||||
})
|
||||
defer backend.Close()
|
||||
|
||||
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
|
||||
|
||||
config := ReadConfig("ws")
|
||||
_, shutdown, err := proxyd.Start(config)
|
||||
require.NoError(t, err)
|
||||
defer shutdown()
|
||||
|
||||
client, err := NewProxydWSClient("ws://127.0.0.1:8546", func(msgType int, data []byte) {
|
||||
clientHdlr.MsgCB(msgType, data)
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
closed := false
|
||||
originalHandler := client.conn.CloseHandler()
|
||||
client.conn.SetCloseHandler(func(code int, text string) error {
|
||||
closed = true
|
||||
return originalHandler(code, text)
|
||||
})
|
||||
|
||||
backendHdlr.SetMsgCB(func(conn *websocket.Conn, msgType int, data []byte) {
|
||||
t.Fatalf("backend should not get the large message")
|
||||
})
|
||||
|
||||
payload := strings.Repeat("barf", 1024*1024)
|
||||
clientReq := "{\"id\": 1, \"method\": \"eth_subscribe\", \"params\": [\"" + payload + "\"]}"
|
||||
err = client.WriteMessage(
|
||||
websocket.TextMessage,
|
||||
[]byte(clientReq),
|
||||
)
|
||||
require.Error(t, err)
|
||||
require.True(t, closed)
|
||||
|
||||
}
|
92
proxyd/methods.go
Normal file
92
proxyd/methods.go
Normal file
@ -0,0 +1,92 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
type RPCMethodHandler interface {
|
||||
GetRPCMethod(context.Context, *RPCReq) (*RPCRes, error)
|
||||
PutRPCMethod(context.Context, *RPCReq, *RPCRes) error
|
||||
}
|
||||
|
||||
type StaticMethodHandler struct {
|
||||
cache Cache
|
||||
m sync.RWMutex
|
||||
filterGet func(*RPCReq) bool
|
||||
filterPut func(*RPCReq, *RPCRes) bool
|
||||
}
|
||||
|
||||
func (e *StaticMethodHandler) key(req *RPCReq) string {
|
||||
// signature is the hashed json.RawMessage param contents
|
||||
h := sha256.New()
|
||||
h.Write(req.Params)
|
||||
signature := fmt.Sprintf("%x", h.Sum(nil))
|
||||
return strings.Join([]string{"cache", req.Method, signature}, ":")
|
||||
}
|
||||
|
||||
func (e *StaticMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) {
|
||||
if e.cache == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if e.filterGet != nil && !e.filterGet(req) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
e.m.RLock()
|
||||
defer e.m.RUnlock()
|
||||
|
||||
key := e.key(req)
|
||||
val, err := e.cache.Get(ctx, key)
|
||||
if err != nil {
|
||||
log.Error("error reading from cache", "key", key, "method", req.Method, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
if val == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result interface{}
|
||||
if err := json.Unmarshal([]byte(val), &result); err != nil {
|
||||
log.Error("error unmarshalling value from cache", "key", key, "method", req.Method, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
return &RPCRes{
|
||||
JSONRPC: req.JSONRPC,
|
||||
Result: result,
|
||||
ID: req.ID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *StaticMethodHandler) PutRPCMethod(ctx context.Context, req *RPCReq, res *RPCRes) error {
|
||||
if e.cache == nil {
|
||||
return nil
|
||||
}
|
||||
// if there is a filter on get, we don't want to cache it because its irretrievable
|
||||
if e.filterGet != nil && !e.filterGet(req) {
|
||||
return nil
|
||||
}
|
||||
// response filter
|
||||
if e.filterPut != nil && !e.filterPut(req, res) {
|
||||
return nil
|
||||
}
|
||||
|
||||
e.m.Lock()
|
||||
defer e.m.Unlock()
|
||||
|
||||
key := e.key(req)
|
||||
value := mustMarshalJSON(res.Result)
|
||||
|
||||
err := e.cache.Put(ctx, key, string(value))
|
||||
if err != nil {
|
||||
log.Error("error putting into cache", "key", key, "method", req.Method, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
601
proxyd/metrics.go
Normal file
601
proxyd/metrics.go
Normal file
@ -0,0 +1,601 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
const (
|
||||
MetricsNamespace = "proxyd"
|
||||
|
||||
RPCRequestSourceHTTP = "http"
|
||||
RPCRequestSourceWS = "ws"
|
||||
|
||||
BackendProxyd = "proxyd"
|
||||
SourceClient = "client"
|
||||
SourceBackend = "backend"
|
||||
MethodUnknown = "unknown"
|
||||
)
|
||||
|
||||
var PayloadSizeBuckets = []float64{10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000}
|
||||
var MillisecondDurationBuckets = []float64{1, 10, 50, 100, 500, 1000, 5000, 10000, 100000}
|
||||
|
||||
var (
|
||||
rpcRequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_requests_total",
|
||||
Help: "Count of total client RPC requests.",
|
||||
})
|
||||
|
||||
rpcForwardsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_forwards_total",
|
||||
Help: "Count of total RPC requests forwarded to each backend.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"source",
|
||||
})
|
||||
|
||||
rpcBackendHTTPResponseCodesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_backend_http_response_codes_total",
|
||||
Help: "Count of total backend responses by HTTP status code.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"status_code",
|
||||
"batched",
|
||||
})
|
||||
|
||||
rpcErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_errors_total",
|
||||
Help: "Count of total RPC errors.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"error_code",
|
||||
})
|
||||
|
||||
rpcSpecialErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_special_errors_total",
|
||||
Help: "Count of total special RPC errors.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"error_type",
|
||||
})
|
||||
|
||||
rpcBackendRequestDurationSumm = promauto.NewSummaryVec(prometheus.SummaryOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rpc_backend_request_duration_seconds",
|
||||
Help: "Summary of backend response times broken down by backend and method name.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
|
||||
}, []string{
|
||||
"backend_name",
|
||||
"method_name",
|
||||
"batched",
|
||||
})
|
||||
|
||||
activeClientWsConnsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "active_client_ws_conns",
|
||||
Help: "Gauge of active client WS connections.",
|
||||
}, []string{
|
||||
"auth",
|
||||
})
|
||||
|
||||
activeBackendWsConnsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "active_backend_ws_conns",
|
||||
Help: "Gauge of active backend WS connections.",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
unserviceableRequestsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "unserviceable_requests_total",
|
||||
Help: "Count of total requests that were rejected due to no backends being available.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"request_source",
|
||||
})
|
||||
|
||||
httpResponseCodesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "http_response_codes_total",
|
||||
Help: "Count of total HTTP response codes.",
|
||||
}, []string{
|
||||
"status_code",
|
||||
})
|
||||
|
||||
httpRequestDurationSumm = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "http_request_duration_seconds",
|
||||
Help: "Summary of HTTP request durations, in seconds.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
|
||||
})
|
||||
|
||||
wsMessagesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "ws_messages_total",
|
||||
Help: "Count of total websocket messages including protocol control.",
|
||||
}, []string{
|
||||
"auth",
|
||||
"backend_name",
|
||||
"source",
|
||||
})
|
||||
|
||||
redisErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "redis_errors_total",
|
||||
Help: "Count of total Redis errors.",
|
||||
}, []string{
|
||||
"source",
|
||||
})
|
||||
|
||||
requestPayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "request_payload_sizes",
|
||||
Help: "Histogram of client request payload sizes.",
|
||||
Buckets: PayloadSizeBuckets,
|
||||
}, []string{
|
||||
"auth",
|
||||
})
|
||||
|
||||
responsePayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "response_payload_sizes",
|
||||
Help: "Histogram of client response payload sizes.",
|
||||
Buckets: PayloadSizeBuckets,
|
||||
}, []string{
|
||||
"auth",
|
||||
})
|
||||
|
||||
cacheHitsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "cache_hits_total",
|
||||
Help: "Number of cache hits.",
|
||||
}, []string{
|
||||
"method",
|
||||
})
|
||||
|
||||
cacheMissesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "cache_misses_total",
|
||||
Help: "Number of cache misses.",
|
||||
}, []string{
|
||||
"method",
|
||||
})
|
||||
|
||||
cacheErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "cache_errors_total",
|
||||
Help: "Number of cache errors.",
|
||||
}, []string{
|
||||
"method",
|
||||
})
|
||||
|
||||
batchRPCShortCircuitsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "batch_rpc_short_circuits_total",
|
||||
Help: "Count of total batch RPC short-circuits.",
|
||||
})
|
||||
|
||||
rpcSpecialErrors = []string{
|
||||
"nonce too low",
|
||||
"gas price too high",
|
||||
"gas price too low",
|
||||
"invalid parameters",
|
||||
}
|
||||
|
||||
redisCacheDurationSumm = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "redis_cache_duration_milliseconds",
|
||||
Help: "Histogram of Redis command durations, in milliseconds.",
|
||||
Buckets: MillisecondDurationBuckets,
|
||||
}, []string{"command"})
|
||||
|
||||
tooManyRequestErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "too_many_request_errors_total",
|
||||
Help: "Count of request timeouts due to too many concurrent RPCs.",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
batchSizeHistogram = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "batch_size_summary",
|
||||
Help: "Summary of batch sizes",
|
||||
Buckets: []float64{
|
||||
1,
|
||||
5,
|
||||
10,
|
||||
25,
|
||||
50,
|
||||
100,
|
||||
},
|
||||
})
|
||||
|
||||
frontendRateLimitTakeErrors = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "rate_limit_take_errors",
|
||||
Help: "Count of errors taking frontend rate limits",
|
||||
})
|
||||
|
||||
consensusLatestBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_latest_block",
|
||||
Help: "Consensus latest block",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
})
|
||||
|
||||
consensusSafeBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_safe_block",
|
||||
Help: "Consensus safe block",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
})
|
||||
|
||||
consensusFinalizedBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_finalized_block",
|
||||
Help: "Consensus finalized block",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
})
|
||||
|
||||
consensusHAError = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_ha_error",
|
||||
Help: "Consensus HA error count",
|
||||
}, []string{
|
||||
"error",
|
||||
})
|
||||
|
||||
consensusHALatestBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_ha_latest_block",
|
||||
Help: "Consensus HA latest block",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
"leader",
|
||||
})
|
||||
|
||||
consensusHASafeBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_ha_safe_block",
|
||||
Help: "Consensus HA safe block",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
"leader",
|
||||
})
|
||||
|
||||
consensusHAFinalizedBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_ha_finalized_block",
|
||||
Help: "Consensus HA finalized block",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
"leader",
|
||||
})
|
||||
|
||||
backendLatestBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "backend_latest_block",
|
||||
Help: "Current latest block observed per backend",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
backendSafeBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "backend_safe_block",
|
||||
Help: "Current safe block observed per backend",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
backendFinalizedBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "backend_finalized_block",
|
||||
Help: "Current finalized block observed per backend",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
backendUnexpectedBlockTagsBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "backend_unexpected_block_tags",
|
||||
Help: "Bool gauge for unexpected block tags",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
consensusGroupCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_count",
|
||||
Help: "Consensus group serving traffic count",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
})
|
||||
|
||||
consensusGroupFilteredCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_filtered_count",
|
||||
Help: "Consensus group filtered out from serving traffic count",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
})
|
||||
|
||||
consensusGroupTotalCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "group_consensus_total_count",
|
||||
Help: "Total count of candidates to be part of consensus group",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
})
|
||||
|
||||
consensusBannedBackends = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "consensus_backend_banned",
|
||||
Help: "Bool gauge for banned backends",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
consensusPeerCountBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "consensus_backend_peer_count",
|
||||
Help: "Peer count",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
consensusInSyncBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "consensus_backend_in_sync",
|
||||
Help: "Bool gauge for backends in sync",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
consensusUpdateDelayBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "consensus_backend_update_delay",
|
||||
Help: "Delay (ms) for backend update",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
avgLatencyBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "backend_avg_latency",
|
||||
Help: "Average latency per backend",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
degradedBackends = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "backend_degraded",
|
||||
Help: "Bool gauge for degraded backends",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
networkErrorRateBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "backend_error_rate",
|
||||
Help: "Request error rate per backend",
|
||||
}, []string{
|
||||
"backend_name",
|
||||
})
|
||||
|
||||
healthyPrimaryCandidates = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "healthy_candidates",
|
||||
Help: "Record the number of healthy primary candidates",
|
||||
}, []string{
|
||||
"backend_group_name",
|
||||
})
|
||||
|
||||
backendGroupFallbackBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: "backend_group_fallback_backenend",
|
||||
Help: "Bool gauge for if a backend is a fallback for a backend group",
|
||||
}, []string{
|
||||
"backend_group",
|
||||
"backend_name",
|
||||
"fallback",
|
||||
})
|
||||
)
|
||||
|
||||
func RecordRedisError(source string) {
|
||||
redisErrorsTotal.WithLabelValues(source).Inc()
|
||||
}
|
||||
|
||||
func RecordRPCError(ctx context.Context, backendName, method string, err error) {
|
||||
rpcErr, ok := err.(*RPCErr)
|
||||
var code int
|
||||
if ok {
|
||||
MaybeRecordSpecialRPCError(ctx, backendName, method, rpcErr)
|
||||
code = rpcErr.Code
|
||||
} else {
|
||||
code = -1
|
||||
}
|
||||
|
||||
rpcErrorsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, strconv.Itoa(code)).Inc()
|
||||
}
|
||||
|
||||
func RecordWSMessage(ctx context.Context, backendName, source string) {
|
||||
wsMessagesTotal.WithLabelValues(GetAuthCtx(ctx), backendName, source).Inc()
|
||||
}
|
||||
|
||||
func RecordUnserviceableRequest(ctx context.Context, source string) {
|
||||
unserviceableRequestsTotal.WithLabelValues(GetAuthCtx(ctx), source).Inc()
|
||||
}
|
||||
|
||||
func RecordRPCForward(ctx context.Context, backendName, method, source string) {
|
||||
rpcForwardsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, source).Inc()
|
||||
}
|
||||
|
||||
func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string, rpcErr *RPCErr) {
|
||||
errMsg := strings.ToLower(rpcErr.Message)
|
||||
for _, errStr := range rpcSpecialErrors {
|
||||
if strings.Contains(errMsg, errStr) {
|
||||
rpcSpecialErrorsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, errStr).Inc()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RecordRequestPayloadSize(ctx context.Context, payloadSize int) {
|
||||
requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
|
||||
}
|
||||
|
||||
func RecordResponsePayloadSize(ctx context.Context, payloadSize int) {
|
||||
responsePayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
|
||||
}
|
||||
|
||||
func RecordCacheHit(method string) {
|
||||
cacheHitsTotal.WithLabelValues(method).Inc()
|
||||
}
|
||||
|
||||
func RecordCacheMiss(method string) {
|
||||
cacheMissesTotal.WithLabelValues(method).Inc()
|
||||
}
|
||||
|
||||
func RecordCacheError(method string) {
|
||||
cacheErrorsTotal.WithLabelValues(method).Inc()
|
||||
}
|
||||
|
||||
func RecordBatchSize(size int) {
|
||||
batchSizeHistogram.Observe(float64(size))
|
||||
}
|
||||
|
||||
var nonAlphanumericRegex = regexp.MustCompile(`[^a-zA-Z ]+`)
|
||||
|
||||
func RecordGroupConsensusError(group *BackendGroup, label string, err error) {
|
||||
errClean := nonAlphanumericRegex.ReplaceAllString(err.Error(), "")
|
||||
errClean = strings.ReplaceAll(errClean, " ", "_")
|
||||
errClean = strings.ReplaceAll(errClean, "__", "_")
|
||||
label = fmt.Sprintf("%s.%s", label, errClean)
|
||||
consensusHAError.WithLabelValues(label).Inc()
|
||||
}
|
||||
|
||||
func RecordGroupConsensusHALatestBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) {
|
||||
consensusHALatestBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordGroupConsensusHASafeBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) {
|
||||
consensusHASafeBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordGroupConsensusHAFinalizedBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) {
|
||||
consensusHAFinalizedBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordGroupConsensusLatestBlock(group *BackendGroup, blockNumber hexutil.Uint64) {
|
||||
consensusLatestBlock.WithLabelValues(group.Name).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordGroupConsensusSafeBlock(group *BackendGroup, blockNumber hexutil.Uint64) {
|
||||
consensusSafeBlock.WithLabelValues(group.Name).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordGroupConsensusFinalizedBlock(group *BackendGroup, blockNumber hexutil.Uint64) {
|
||||
consensusFinalizedBlock.WithLabelValues(group.Name).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordGroupConsensusCount(group *BackendGroup, count int) {
|
||||
consensusGroupCount.WithLabelValues(group.Name).Set(float64(count))
|
||||
}
|
||||
|
||||
func RecordGroupConsensusFilteredCount(group *BackendGroup, count int) {
|
||||
consensusGroupFilteredCount.WithLabelValues(group.Name).Set(float64(count))
|
||||
}
|
||||
|
||||
func RecordGroupTotalCount(group *BackendGroup, count int) {
|
||||
consensusGroupTotalCount.WithLabelValues(group.Name).Set(float64(count))
|
||||
}
|
||||
|
||||
func RecordBackendLatestBlock(b *Backend, blockNumber hexutil.Uint64) {
|
||||
backendLatestBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordBackendSafeBlock(b *Backend, blockNumber hexutil.Uint64) {
|
||||
backendSafeBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordBackendFinalizedBlock(b *Backend, blockNumber hexutil.Uint64) {
|
||||
backendFinalizedBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber))
|
||||
}
|
||||
|
||||
func RecordBackendUnexpectedBlockTags(b *Backend, unexpected bool) {
|
||||
backendUnexpectedBlockTagsBackend.WithLabelValues(b.Name).Set(boolToFloat64(unexpected))
|
||||
}
|
||||
|
||||
func RecordConsensusBackendBanned(b *Backend, banned bool) {
|
||||
consensusBannedBackends.WithLabelValues(b.Name).Set(boolToFloat64(banned))
|
||||
}
|
||||
|
||||
func RecordHealthyCandidates(b *BackendGroup, candidates int) {
|
||||
healthyPrimaryCandidates.WithLabelValues(b.Name).Set(float64(candidates))
|
||||
}
|
||||
|
||||
func RecordConsensusBackendPeerCount(b *Backend, peerCount uint64) {
|
||||
consensusPeerCountBackend.WithLabelValues(b.Name).Set(float64(peerCount))
|
||||
}
|
||||
|
||||
func RecordConsensusBackendInSync(b *Backend, inSync bool) {
|
||||
consensusInSyncBackend.WithLabelValues(b.Name).Set(boolToFloat64(inSync))
|
||||
}
|
||||
|
||||
func RecordConsensusBackendUpdateDelay(b *Backend, lastUpdate time.Time) {
|
||||
// avoid recording the delay for the first update
|
||||
if lastUpdate.IsZero() {
|
||||
return
|
||||
}
|
||||
delay := time.Since(lastUpdate)
|
||||
consensusUpdateDelayBackend.WithLabelValues(b.Name).Set(float64(delay.Milliseconds()))
|
||||
}
|
||||
|
||||
func RecordBackendNetworkLatencyAverageSlidingWindow(b *Backend, avgLatency time.Duration) {
|
||||
avgLatencyBackend.WithLabelValues(b.Name).Set(float64(avgLatency.Milliseconds()))
|
||||
degradedBackends.WithLabelValues(b.Name).Set(boolToFloat64(b.IsDegraded()))
|
||||
}
|
||||
|
||||
func RecordBackendNetworkErrorRateSlidingWindow(b *Backend, rate float64) {
|
||||
networkErrorRateBackend.WithLabelValues(b.Name).Set(rate)
|
||||
}
|
||||
|
||||
func RecordBackendGroupFallbacks(bg *BackendGroup, name string, fallback bool) {
|
||||
backendGroupFallbackBackend.WithLabelValues(bg.Name, name, strconv.FormatBool(fallback)).Set(boolToFloat64(fallback))
|
||||
}
|
||||
|
||||
func boolToFloat64(b bool) float64 {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
188
proxyd/pkg/avg-sliding-window/sliding.go
Normal file
188
proxyd/pkg/avg-sliding-window/sliding.go
Normal file
@ -0,0 +1,188 @@
|
||||
package avg_sliding_window
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lm "github.com/emirpasic/gods/maps/linkedhashmap"
|
||||
)
|
||||
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// DefaultClock provides a clock that gets current time from the system time
|
||||
type DefaultClock struct{}
|
||||
|
||||
func NewDefaultClock() *DefaultClock {
|
||||
return &DefaultClock{}
|
||||
}
|
||||
func (c DefaultClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// AdjustableClock provides a static clock to easily override the system time
|
||||
type AdjustableClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func NewAdjustableClock(now time.Time) *AdjustableClock {
|
||||
return &AdjustableClock{now: now}
|
||||
}
|
||||
func (c *AdjustableClock) Now() time.Time {
|
||||
return c.now
|
||||
}
|
||||
func (c *AdjustableClock) Set(now time.Time) {
|
||||
c.now = now
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
sum float64
|
||||
qty uint
|
||||
}
|
||||
|
||||
// AvgSlidingWindow calculates moving averages efficiently.
|
||||
// Data points are rounded to nearest bucket of size `bucketSize`,
|
||||
// and evicted when they are too old based on `windowLength`
|
||||
type AvgSlidingWindow struct {
|
||||
mux sync.Mutex
|
||||
bucketSize time.Duration
|
||||
windowLength time.Duration
|
||||
clock Clock
|
||||
buckets *lm.Map
|
||||
qty uint
|
||||
sum float64
|
||||
}
|
||||
|
||||
type SlidingWindowOpts func(sw *AvgSlidingWindow)
|
||||
|
||||
func NewSlidingWindow(opts ...SlidingWindowOpts) *AvgSlidingWindow {
|
||||
sw := &AvgSlidingWindow{
|
||||
buckets: lm.New(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(sw)
|
||||
}
|
||||
if sw.bucketSize == 0 {
|
||||
sw.bucketSize = time.Second
|
||||
}
|
||||
if sw.windowLength == 0 {
|
||||
sw.windowLength = 5 * time.Minute
|
||||
}
|
||||
if sw.clock == nil {
|
||||
sw.clock = NewDefaultClock()
|
||||
}
|
||||
return sw
|
||||
}
|
||||
|
||||
func WithWindowLength(windowLength time.Duration) SlidingWindowOpts {
|
||||
return func(sw *AvgSlidingWindow) {
|
||||
sw.windowLength = windowLength
|
||||
}
|
||||
}
|
||||
|
||||
func WithBucketSize(bucketSize time.Duration) SlidingWindowOpts {
|
||||
return func(sw *AvgSlidingWindow) {
|
||||
sw.bucketSize = bucketSize
|
||||
}
|
||||
}
|
||||
|
||||
func WithClock(clock Clock) SlidingWindowOpts {
|
||||
return func(sw *AvgSlidingWindow) {
|
||||
sw.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *AvgSlidingWindow) inWindow(t time.Time) bool {
|
||||
now := sw.clock.Now().Round(sw.bucketSize)
|
||||
windowStart := now.Add(-sw.windowLength)
|
||||
return windowStart.Before(t) && !t.After(now)
|
||||
}
|
||||
|
||||
// Add inserts a new data point into the window, with value `val` and the current time
|
||||
func (sw *AvgSlidingWindow) Add(val float64) {
|
||||
t := sw.clock.Now()
|
||||
sw.AddWithTime(t, val)
|
||||
}
|
||||
|
||||
// Incr is an alias to insert a data point with value float64(1) and the current time
|
||||
func (sw *AvgSlidingWindow) Incr() {
|
||||
sw.Add(1)
|
||||
}
|
||||
|
||||
// AddWithTime inserts a new data point into the window, with value `val` and time `t`
|
||||
func (sw *AvgSlidingWindow) AddWithTime(t time.Time, val float64) {
|
||||
sw.advance()
|
||||
|
||||
defer sw.mux.Unlock()
|
||||
sw.mux.Lock()
|
||||
|
||||
key := t.Round(sw.bucketSize)
|
||||
if !sw.inWindow(key) {
|
||||
return
|
||||
}
|
||||
|
||||
var b *bucket
|
||||
current, found := sw.buckets.Get(key)
|
||||
if !found {
|
||||
b = &bucket{}
|
||||
} else {
|
||||
b = current.(*bucket)
|
||||
}
|
||||
|
||||
// update bucket
|
||||
bsum := b.sum
|
||||
b.qty += 1
|
||||
b.sum = bsum + val
|
||||
|
||||
// update window
|
||||
wsum := sw.sum
|
||||
sw.qty += 1
|
||||
sw.sum = wsum - bsum + b.sum
|
||||
sw.buckets.Put(key, b)
|
||||
}
|
||||
|
||||
// advance evicts old data points
|
||||
func (sw *AvgSlidingWindow) advance() {
|
||||
defer sw.mux.Unlock()
|
||||
sw.mux.Lock()
|
||||
now := sw.clock.Now().Round(sw.bucketSize)
|
||||
windowStart := now.Add(-sw.windowLength)
|
||||
keys := sw.buckets.Keys()
|
||||
for _, k := range keys {
|
||||
if k.(time.Time).After(windowStart) {
|
||||
break
|
||||
}
|
||||
val, _ := sw.buckets.Get(k)
|
||||
b := val.(*bucket)
|
||||
sw.buckets.Remove(k)
|
||||
if sw.buckets.Size() > 0 {
|
||||
sw.qty -= b.qty
|
||||
sw.sum = sw.sum - b.sum
|
||||
} else {
|
||||
sw.qty = 0
|
||||
sw.sum = 0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Avg retrieves the current average for the sliding window
|
||||
func (sw *AvgSlidingWindow) Avg() float64 {
|
||||
sw.advance()
|
||||
if sw.qty == 0 {
|
||||
return 0
|
||||
}
|
||||
return sw.sum / float64(sw.qty)
|
||||
}
|
||||
|
||||
// Sum retrieves the current sum for the sliding window
|
||||
func (sw *AvgSlidingWindow) Sum() float64 {
|
||||
sw.advance()
|
||||
return sw.sum
|
||||
}
|
||||
|
||||
// Count retrieves the data point count for the sliding window
|
||||
func (sw *AvgSlidingWindow) Count() uint {
|
||||
sw.advance()
|
||||
return sw.qty
|
||||
}
|
277
proxyd/pkg/avg-sliding-window/sliding_test.go
Normal file
277
proxyd/pkg/avg-sliding-window/sliding_test.go
Normal file
@ -0,0 +1,277 @@
|
||||
package avg_sliding_window
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSlidingWindow_AddWithTime_Single(t *testing.T) {
|
||||
now := ts("2023-04-21 15:04:05")
|
||||
clock := NewAdjustableClock(now)
|
||||
|
||||
sw := NewSlidingWindow(
|
||||
WithWindowLength(10*time.Second),
|
||||
WithBucketSize(time.Second),
|
||||
WithClock(clock))
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 5)
|
||||
require.Equal(t, 5.0, sw.Avg())
|
||||
require.Equal(t, 5.0, sw.Sum())
|
||||
require.Equal(t, 1, int(sw.Count()))
|
||||
require.Equal(t, 1, sw.buckets.Size())
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 5.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
}
|
||||
|
||||
func TestSlidingWindow_AddWithTime_TwoValues_SameBucket(t *testing.T) {
|
||||
now := ts("2023-04-21 15:04:05")
|
||||
clock := NewAdjustableClock(now)
|
||||
|
||||
sw := NewSlidingWindow(
|
||||
WithWindowLength(10*time.Second),
|
||||
WithBucketSize(time.Second),
|
||||
WithClock(clock))
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 5)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 5)
|
||||
require.Equal(t, 5.0, sw.Avg())
|
||||
require.Equal(t, 10.0, sw.Sum())
|
||||
require.Equal(t, 2, int(sw.Count()))
|
||||
require.Equal(t, 1, sw.buckets.Size())
|
||||
require.Equal(t, 2, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 10.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
}
|
||||
|
||||
func TestSlidingWindow_AddWithTime_ThreeValues_SameBucket(t *testing.T) {
|
||||
now := ts("2023-04-21 15:04:05")
|
||||
clock := NewAdjustableClock(now)
|
||||
|
||||
sw := NewSlidingWindow(
|
||||
WithWindowLength(10*time.Second),
|
||||
WithBucketSize(time.Second),
|
||||
WithClock(clock))
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 4)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 5)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 6)
|
||||
require.Equal(t, 5.0, sw.Avg())
|
||||
require.Equal(t, 15.0, sw.Sum())
|
||||
require.Equal(t, 3, int(sw.Count()))
|
||||
require.Equal(t, 1, sw.buckets.Size())
|
||||
require.Equal(t, 15.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 3, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
}
|
||||
|
||||
func TestSlidingWindow_AddWithTime_ThreeValues_ThreeBuckets(t *testing.T) {
|
||||
now := ts("2023-04-21 15:04:05")
|
||||
clock := NewAdjustableClock(now)
|
||||
|
||||
sw := NewSlidingWindow(
|
||||
WithWindowLength(10*time.Second),
|
||||
WithBucketSize(time.Second),
|
||||
WithClock(clock))
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:01"), 4)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:02"), 5)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 6)
|
||||
require.Equal(t, 5.0, sw.Avg())
|
||||
require.Equal(t, 15.0, sw.Sum())
|
||||
require.Equal(t, 3, int(sw.Count()))
|
||||
require.Equal(t, 3, sw.buckets.Size())
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 4.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty))
|
||||
require.Equal(t, 5.0, sw.buckets.Values()[1].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[2].(*bucket).qty))
|
||||
require.Equal(t, 6.0, sw.buckets.Values()[2].(*bucket).sum)
|
||||
}
|
||||
|
||||
func TestSlidingWindow_AddWithTime_OutWindow(t *testing.T) {
|
||||
now := ts("2023-04-21 15:04:05")
|
||||
clock := NewAdjustableClock(now)
|
||||
|
||||
sw := NewSlidingWindow(
|
||||
WithWindowLength(10*time.Second),
|
||||
WithBucketSize(time.Second),
|
||||
WithClock(clock))
|
||||
sw.AddWithTime(ts("2023-04-21 15:03:55"), 1000)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:01"), 4)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:02"), 5)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 6)
|
||||
require.Equal(t, 5.0, sw.Avg())
|
||||
require.Equal(t, 15.0, sw.Sum())
|
||||
require.Equal(t, 3, int(sw.Count()))
|
||||
require.Equal(t, 3, sw.buckets.Size())
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 4.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty))
|
||||
require.Equal(t, 5.0, sw.buckets.Values()[1].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[2].(*bucket).qty))
|
||||
require.Equal(t, 6.0, sw.buckets.Values()[2].(*bucket).sum)
|
||||
}
|
||||
|
||||
func TestSlidingWindow_AdvanceClock(t *testing.T) {
|
||||
now := ts("2023-04-21 15:04:05")
|
||||
clock := NewAdjustableClock(now)
|
||||
|
||||
sw := NewSlidingWindow(
|
||||
WithWindowLength(10*time.Second),
|
||||
WithBucketSize(time.Second),
|
||||
WithClock(clock))
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:01"), 4)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:02"), 5)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 6)
|
||||
require.Equal(t, 5.0, sw.Avg())
|
||||
require.Equal(t, 15.0, sw.Sum())
|
||||
require.Equal(t, 3, int(sw.Count()))
|
||||
require.Equal(t, 3, sw.buckets.Size())
|
||||
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 4.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty))
|
||||
require.Equal(t, 5.0, sw.buckets.Values()[1].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[2].(*bucket).qty))
|
||||
require.Equal(t, 6.0, sw.buckets.Values()[2].(*bucket).sum)
|
||||
|
||||
// up until 15:04:05 we had 3 buckets
|
||||
// let's advance the clock to 15:04:11 and the first data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:11"))
|
||||
require.Equal(t, 5.5, sw.Avg())
|
||||
require.Equal(t, 11.0, sw.Sum())
|
||||
require.Equal(t, 2, int(sw.Count()))
|
||||
require.Equal(t, 2, sw.buckets.Size())
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 5.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty))
|
||||
require.Equal(t, 6.0, sw.buckets.Values()[1].(*bucket).sum)
|
||||
|
||||
// let's advance the clock to 15:04:12 and another data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:12"))
|
||||
require.Equal(t, 6.0, sw.Avg())
|
||||
require.Equal(t, 6.0, sw.Sum())
|
||||
require.Equal(t, 1, int(sw.Count()))
|
||||
require.Equal(t, 1, sw.buckets.Size())
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 6.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
|
||||
// let's advance the clock to 15:04:25 and all data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:25"))
|
||||
require.Equal(t, 0.0, sw.Avg())
|
||||
require.Equal(t, 0.0, sw.Sum())
|
||||
require.Equal(t, 0, int(sw.Count()))
|
||||
require.Equal(t, 0, sw.buckets.Size())
|
||||
}
|
||||
|
||||
func TestSlidingWindow_MultipleValPerBucket(t *testing.T) {
|
||||
now := ts("2023-04-21 15:04:05")
|
||||
clock := NewAdjustableClock(now)
|
||||
|
||||
sw := NewSlidingWindow(
|
||||
WithWindowLength(10*time.Second),
|
||||
WithBucketSize(time.Second),
|
||||
WithClock(clock))
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:01"), 4)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:01"), 12)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:02"), 5)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:02"), 15)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 6)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 3)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 1)
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 3)
|
||||
require.Equal(t, 6.125, sw.Avg())
|
||||
require.Equal(t, 49.0, sw.Sum())
|
||||
require.Equal(t, 8, int(sw.Count()))
|
||||
require.Equal(t, 3, sw.buckets.Size())
|
||||
require.Equal(t, 2, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 16.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 2, int(sw.buckets.Values()[1].(*bucket).qty))
|
||||
require.Equal(t, 20.0, sw.buckets.Values()[1].(*bucket).sum)
|
||||
require.Equal(t, 4, int(sw.buckets.Values()[2].(*bucket).qty))
|
||||
require.Equal(t, 13.0, sw.buckets.Values()[2].(*bucket).sum)
|
||||
|
||||
// up until 15:04:05 we had 3 buckets
|
||||
// let's advance the clock to 15:04:11 and the first data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:11"))
|
||||
require.Equal(t, 5.5, sw.Avg())
|
||||
require.Equal(t, 33.0, sw.Sum())
|
||||
require.Equal(t, 6, int(sw.Count()))
|
||||
require.Equal(t, 2, sw.buckets.Size())
|
||||
require.Equal(t, 2, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 20.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 4, int(sw.buckets.Values()[1].(*bucket).qty))
|
||||
require.Equal(t, 13.0, sw.buckets.Values()[1].(*bucket).sum)
|
||||
|
||||
// let's advance the clock to 15:04:12 and another data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:12"))
|
||||
require.Equal(t, 3.25, sw.Avg())
|
||||
require.Equal(t, 13.0, sw.Sum())
|
||||
require.Equal(t, 4, int(sw.Count()))
|
||||
require.Equal(t, 1, sw.buckets.Size())
|
||||
require.Equal(t, 4, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 13.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
|
||||
// let's advance the clock to 15:04:25 and all data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:25"))
|
||||
require.Equal(t, 0.0, sw.Avg())
|
||||
require.Equal(t, 0, sw.buckets.Size())
|
||||
}
|
||||
|
||||
func TestSlidingWindow_CustomBucket(t *testing.T) {
|
||||
now := ts("2023-04-21 15:04:05")
|
||||
clock := NewAdjustableClock(now)
|
||||
|
||||
sw := NewSlidingWindow(
|
||||
WithWindowLength(30*time.Second),
|
||||
WithBucketSize(10*time.Second),
|
||||
WithClock(clock))
|
||||
sw.AddWithTime(ts("2023-04-21 15:03:49"), 5) // key: 03:50, sum: 5.0
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:02"), 15) // key: 04:00
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:03"), 5) // key: 04:00
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:04"), 1) // key: 04:00, sum: 21.0
|
||||
sw.AddWithTime(ts("2023-04-21 15:04:05"), 3) // key: 04:10, sum: 3.0
|
||||
require.Equal(t, 5.8, sw.Avg())
|
||||
require.Equal(t, 29.0, sw.Sum())
|
||||
require.Equal(t, 5, int(sw.Count()))
|
||||
require.Equal(t, 3, sw.buckets.Size())
|
||||
require.Equal(t, 5.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 21.0, sw.buckets.Values()[1].(*bucket).sum)
|
||||
require.Equal(t, 3, int(sw.buckets.Values()[1].(*bucket).qty))
|
||||
require.Equal(t, 3.0, sw.buckets.Values()[2].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[2].(*bucket).qty))
|
||||
|
||||
// up until 15:04:05 we had 3 buckets
|
||||
// let's advance the clock to 15:04:21 and the first data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:21"))
|
||||
require.Equal(t, 6.0, sw.Avg())
|
||||
require.Equal(t, 24.0, sw.Sum())
|
||||
require.Equal(t, 4, int(sw.Count()))
|
||||
require.Equal(t, 2, sw.buckets.Size())
|
||||
require.Equal(t, 21.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 3, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
require.Equal(t, 3.0, sw.buckets.Values()[1].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty))
|
||||
|
||||
// let's advance the clock to 15:04:32 and another data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:32"))
|
||||
require.Equal(t, 3.0, sw.Avg())
|
||||
require.Equal(t, 3.0, sw.Sum())
|
||||
require.Equal(t, 1, sw.buckets.Size())
|
||||
require.Equal(t, 1, int(sw.Count()))
|
||||
require.Equal(t, 3.0, sw.buckets.Values()[0].(*bucket).sum)
|
||||
require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty))
|
||||
|
||||
// let's advance the clock to 15:04:46 and all data point should be evicted
|
||||
clock.Set(ts("2023-04-21 15:04:46"))
|
||||
require.Equal(t, 0.0, sw.Avg())
|
||||
require.Equal(t, 0.0, sw.Sum())
|
||||
require.Equal(t, 0, int(sw.Count()))
|
||||
require.Equal(t, 0, sw.buckets.Size())
|
||||
}
|
||||
|
||||
// ts is a convenient method that must parse a time.Time from a string in format `"2006-01-02 15:04:05"`
|
||||
func ts(s string) time.Time {
|
||||
t, err := time.Parse(time.DateTime, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
472
proxyd/proxyd.go
Normal file
472
proxyd/proxyd.go
Normal file
@ -0,0 +1,472 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/exp/slog"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
func SetLogLevel(logLevel slog.Leveler) {
|
||||
log.SetDefault(log.NewLogger(slog.NewJSONHandler(
|
||||
os.Stdout, &slog.HandlerOptions{Level: logLevel})))
|
||||
}
|
||||
|
||||
func Start(config *Config) (*Server, func(), error) {
|
||||
if len(config.Backends) == 0 {
|
||||
return nil, nil, errors.New("must define at least one backend")
|
||||
}
|
||||
if len(config.BackendGroups) == 0 {
|
||||
return nil, nil, errors.New("must define at least one backend group")
|
||||
}
|
||||
if len(config.RPCMethodMappings) == 0 {
|
||||
return nil, nil, errors.New("must define at least one RPC method mapping")
|
||||
}
|
||||
|
||||
for authKey := range config.Authentication {
|
||||
if authKey == "none" {
|
||||
return nil, nil, errors.New("cannot use none as an auth key")
|
||||
}
|
||||
}
|
||||
|
||||
var redisClient *redis.Client
|
||||
if config.Redis.URL != "" {
|
||||
rURL, err := ReadFromEnvOrConfig(config.Redis.URL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
redisClient, err = NewRedisClient(rURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if redisClient == nil && config.RateLimit.UseRedis {
|
||||
return nil, nil, errors.New("must specify a Redis URL if UseRedis is true in rate limit config")
|
||||
}
|
||||
|
||||
// While modifying shared globals is a bad practice, the alternative
|
||||
// is to clone these errors on every invocation. This is inefficient.
|
||||
// We'd also have to make sure that errors.Is and errors.As continue
|
||||
// to function properly on the cloned errors.
|
||||
if config.RateLimit.ErrorMessage != "" {
|
||||
ErrOverRateLimit.Message = config.RateLimit.ErrorMessage
|
||||
}
|
||||
if config.WhitelistErrorMessage != "" {
|
||||
ErrMethodNotWhitelisted.Message = config.WhitelistErrorMessage
|
||||
}
|
||||
if config.BatchConfig.ErrorMessage != "" {
|
||||
ErrTooManyBatchRequests.Message = config.BatchConfig.ErrorMessage
|
||||
}
|
||||
|
||||
if config.SenderRateLimit.Enabled {
|
||||
if config.SenderRateLimit.Limit <= 0 {
|
||||
return nil, nil, errors.New("limit in sender_rate_limit must be > 0")
|
||||
}
|
||||
if time.Duration(config.SenderRateLimit.Interval) < time.Second {
|
||||
return nil, nil, errors.New("interval in sender_rate_limit must be >= 1s")
|
||||
}
|
||||
}
|
||||
|
||||
maxConcurrentRPCs := config.Server.MaxConcurrentRPCs
|
||||
if maxConcurrentRPCs == 0 {
|
||||
maxConcurrentRPCs = math.MaxInt64
|
||||
}
|
||||
rpcRequestSemaphore := semaphore.NewWeighted(maxConcurrentRPCs)
|
||||
|
||||
backendNames := make([]string, 0)
|
||||
backendsByName := make(map[string]*Backend)
|
||||
for name, cfg := range config.Backends {
|
||||
opts := make([]BackendOpt, 0)
|
||||
|
||||
rpcURL, err := ReadFromEnvOrConfig(cfg.RPCURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
wsURL, err := ReadFromEnvOrConfig(cfg.WSURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if rpcURL == "" {
|
||||
return nil, nil, fmt.Errorf("must define an RPC URL for backend %s", name)
|
||||
}
|
||||
|
||||
if config.BackendOptions.ResponseTimeoutSeconds != 0 {
|
||||
timeout := secondsToDuration(config.BackendOptions.ResponseTimeoutSeconds)
|
||||
opts = append(opts, WithTimeout(timeout))
|
||||
}
|
||||
if config.BackendOptions.MaxRetries != 0 {
|
||||
opts = append(opts, WithMaxRetries(config.BackendOptions.MaxRetries))
|
||||
}
|
||||
if config.BackendOptions.MaxResponseSizeBytes != 0 {
|
||||
opts = append(opts, WithMaxResponseSize(config.BackendOptions.MaxResponseSizeBytes))
|
||||
}
|
||||
if config.BackendOptions.OutOfServiceSeconds != 0 {
|
||||
opts = append(opts, WithOutOfServiceDuration(secondsToDuration(config.BackendOptions.OutOfServiceSeconds)))
|
||||
}
|
||||
if config.BackendOptions.MaxDegradedLatencyThreshold > 0 {
|
||||
opts = append(opts, WithMaxDegradedLatencyThreshold(time.Duration(config.BackendOptions.MaxDegradedLatencyThreshold)))
|
||||
}
|
||||
if config.BackendOptions.MaxLatencyThreshold > 0 {
|
||||
opts = append(opts, WithMaxLatencyThreshold(time.Duration(config.BackendOptions.MaxLatencyThreshold)))
|
||||
}
|
||||
if config.BackendOptions.MaxErrorRateThreshold > 0 {
|
||||
opts = append(opts, WithMaxErrorRateThreshold(config.BackendOptions.MaxErrorRateThreshold))
|
||||
}
|
||||
if cfg.MaxRPS != 0 {
|
||||
opts = append(opts, WithMaxRPS(cfg.MaxRPS))
|
||||
}
|
||||
if cfg.MaxWSConns != 0 {
|
||||
opts = append(opts, WithMaxWSConns(cfg.MaxWSConns))
|
||||
}
|
||||
if cfg.Password != "" {
|
||||
passwordVal, err := ReadFromEnvOrConfig(cfg.Password)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts = append(opts, WithBasicAuth(cfg.Username, passwordVal))
|
||||
}
|
||||
|
||||
headers := map[string]string{}
|
||||
for headerName, headerValue := range cfg.Headers {
|
||||
headerValue, err := ReadFromEnvOrConfig(headerValue)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
headers[headerName] = headerValue
|
||||
}
|
||||
opts = append(opts, WithHeaders(headers))
|
||||
|
||||
tlsConfig, err := configureBackendTLS(cfg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if tlsConfig != nil {
|
||||
log.Info("using custom TLS config for backend", "name", name)
|
||||
opts = append(opts, WithTLSConfig(tlsConfig))
|
||||
}
|
||||
if cfg.StripTrailingXFF {
|
||||
opts = append(opts, WithStrippedTrailingXFF())
|
||||
}
|
||||
opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP")))
|
||||
opts = append(opts, WithConsensusSkipPeerCountCheck(cfg.ConsensusSkipPeerCountCheck))
|
||||
opts = append(opts, WithConsensusForcedCandidate(cfg.ConsensusForcedCandidate))
|
||||
opts = append(opts, WithWeight(cfg.Weight))
|
||||
|
||||
receiptsTarget, err := ReadFromEnvOrConfig(cfg.ConsensusReceiptsTarget)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
receiptsTarget, err = validateReceiptsTarget(receiptsTarget)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts = append(opts, WithConsensusReceiptTarget(receiptsTarget))
|
||||
|
||||
back := NewBackend(name, rpcURL, wsURL, rpcRequestSemaphore, opts...)
|
||||
backendNames = append(backendNames, name)
|
||||
backendsByName[name] = back
|
||||
log.Info("configured backend",
|
||||
"name", name,
|
||||
"backend_names", backendNames,
|
||||
"rpc_url", rpcURL,
|
||||
"ws_url", wsURL)
|
||||
}
|
||||
|
||||
backendGroups := make(map[string]*BackendGroup)
|
||||
for bgName, bg := range config.BackendGroups {
|
||||
backends := make([]*Backend, 0)
|
||||
fallbackBackends := make(map[string]bool)
|
||||
fallbackCount := 0
|
||||
for _, bName := range bg.Backends {
|
||||
if backendsByName[bName] == nil {
|
||||
return nil, nil, fmt.Errorf("backend %s is not defined", bName)
|
||||
}
|
||||
backends = append(backends, backendsByName[bName])
|
||||
|
||||
for _, fb := range bg.Fallbacks {
|
||||
if bName == fb {
|
||||
fallbackBackends[bName] = true
|
||||
log.Info("configured backend as fallback",
|
||||
"backend_name", bName,
|
||||
"backend_group", bgName,
|
||||
)
|
||||
fallbackCount++
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := fallbackBackends[bName]; !ok {
|
||||
fallbackBackends[bName] = false
|
||||
log.Info("configured backend as primary",
|
||||
"backend_name", bName,
|
||||
"backend_group", bgName,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if fallbackCount != len(bg.Fallbacks) {
|
||||
return nil, nil,
|
||||
fmt.Errorf(
|
||||
"error: number of fallbacks instantiated (%d) did not match configured (%d) for backend group %s",
|
||||
fallbackCount, len(bg.Fallbacks), bgName,
|
||||
)
|
||||
}
|
||||
|
||||
backendGroups[bgName] = &BackendGroup{
|
||||
Name: bgName,
|
||||
Backends: backends,
|
||||
WeightedRouting: bg.WeightedRouting,
|
||||
FallbackBackends: fallbackBackends,
|
||||
}
|
||||
}
|
||||
|
||||
var wsBackendGroup *BackendGroup
|
||||
if config.WSBackendGroup != "" {
|
||||
wsBackendGroup = backendGroups[config.WSBackendGroup]
|
||||
if wsBackendGroup == nil {
|
||||
return nil, nil, fmt.Errorf("ws backend group %s does not exist", config.WSBackendGroup)
|
||||
}
|
||||
}
|
||||
|
||||
if wsBackendGroup == nil && config.Server.WSPort != 0 {
|
||||
return nil, nil, fmt.Errorf("a ws port was defined, but no ws group was defined")
|
||||
}
|
||||
|
||||
for _, bg := range config.RPCMethodMappings {
|
||||
if backendGroups[bg] == nil {
|
||||
return nil, nil, fmt.Errorf("undefined backend group %s", bg)
|
||||
}
|
||||
}
|
||||
|
||||
var resolvedAuth map[string]string
|
||||
|
||||
if config.Authentication != nil {
|
||||
resolvedAuth = make(map[string]string)
|
||||
for secret, alias := range config.Authentication {
|
||||
resolvedSecret, err := ReadFromEnvOrConfig(secret)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
resolvedAuth[resolvedSecret] = alias
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
cache Cache
|
||||
rpcCache RPCCache
|
||||
)
|
||||
if config.Cache.Enabled {
|
||||
if redisClient == nil {
|
||||
log.Warn("redis is not configured, using in-memory cache")
|
||||
cache = newMemoryCache()
|
||||
} else {
|
||||
ttl := defaultCacheTtl
|
||||
if config.Cache.TTL != 0 {
|
||||
ttl = time.Duration(config.Cache.TTL)
|
||||
}
|
||||
cache = newRedisCache(redisClient, config.Redis.Namespace, ttl)
|
||||
}
|
||||
rpcCache = newRPCCache(newCacheWithCompression(cache))
|
||||
}
|
||||
|
||||
srv, err := NewServer(
|
||||
backendGroups,
|
||||
wsBackendGroup,
|
||||
NewStringSetFromStrings(config.WSMethodWhitelist),
|
||||
config.RPCMethodMappings,
|
||||
config.Server.MaxBodySizeBytes,
|
||||
resolvedAuth,
|
||||
secondsToDuration(config.Server.TimeoutSeconds),
|
||||
config.Server.MaxUpstreamBatchSize,
|
||||
config.Server.EnableXServedByHeader,
|
||||
rpcCache,
|
||||
config.RateLimit,
|
||||
config.SenderRateLimit,
|
||||
config.Server.EnableRequestLog,
|
||||
config.Server.MaxRequestBodyLogLen,
|
||||
config.BatchConfig.MaxSize,
|
||||
redisClient,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating server: %w", err)
|
||||
}
|
||||
|
||||
// Enable to support browser websocket connections.
|
||||
// See https://pkg.go.dev/github.com/gorilla/websocket#hdr-Origin_Considerations
|
||||
if config.Server.AllowAllOrigins {
|
||||
srv.upgrader.CheckOrigin = func(r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if config.Metrics.Enabled {
|
||||
addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port)
|
||||
log.Info("starting metrics server", "addr", addr)
|
||||
go func() {
|
||||
if err := http.ListenAndServe(addr, promhttp.Handler()); err != nil {
|
||||
log.Error("error starting metrics server", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// To allow integration tests to cleanly come up, wait
|
||||
// 10ms to give the below goroutines enough time to
|
||||
// encounter an error creating their servers
|
||||
errTimer := time.NewTimer(10 * time.Millisecond)
|
||||
|
||||
if config.Server.RPCPort != 0 {
|
||||
go func() {
|
||||
if err := srv.RPCListenAndServe(config.Server.RPCHost, config.Server.RPCPort); err != nil {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
log.Info("RPC server shut down")
|
||||
return
|
||||
}
|
||||
log.Crit("error starting RPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if config.Server.WSPort != 0 {
|
||||
go func() {
|
||||
if err := srv.WSListenAndServe(config.Server.WSHost, config.Server.WSPort); err != nil {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
log.Info("WS server shut down")
|
||||
return
|
||||
}
|
||||
log.Crit("error starting WS server", "err", err)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
log.Info("WS server not enabled (ws_port is set to 0)")
|
||||
}
|
||||
|
||||
for bgName, bg := range backendGroups {
|
||||
bgcfg := config.BackendGroups[bgName]
|
||||
if bgcfg.ConsensusAware {
|
||||
log.Info("creating poller for consensus aware backend_group", "name", bgName)
|
||||
|
||||
copts := make([]ConsensusOpt, 0)
|
||||
|
||||
if bgcfg.ConsensusAsyncHandler == "noop" {
|
||||
copts = append(copts, WithAsyncHandler(NewNoopAsyncHandler()))
|
||||
}
|
||||
if bgcfg.ConsensusBanPeriod > 0 {
|
||||
copts = append(copts, WithBanPeriod(time.Duration(bgcfg.ConsensusBanPeriod)))
|
||||
}
|
||||
if bgcfg.ConsensusMaxUpdateThreshold > 0 {
|
||||
copts = append(copts, WithMaxUpdateThreshold(time.Duration(bgcfg.ConsensusMaxUpdateThreshold)))
|
||||
}
|
||||
if bgcfg.ConsensusMaxBlockLag > 0 {
|
||||
copts = append(copts, WithMaxBlockLag(bgcfg.ConsensusMaxBlockLag))
|
||||
}
|
||||
if bgcfg.ConsensusMinPeerCount > 0 {
|
||||
copts = append(copts, WithMinPeerCount(uint64(bgcfg.ConsensusMinPeerCount)))
|
||||
}
|
||||
if bgcfg.ConsensusMaxBlockRange > 0 {
|
||||
copts = append(copts, WithMaxBlockRange(bgcfg.ConsensusMaxBlockRange))
|
||||
}
|
||||
if bgcfg.ConsensusPollerInterval > 0 {
|
||||
copts = append(copts, WithPollerInterval(time.Duration(bgcfg.ConsensusPollerInterval)))
|
||||
}
|
||||
|
||||
for _, be := range bgcfg.Backends {
|
||||
if fallback, ok := bg.FallbackBackends[be]; !ok {
|
||||
log.Crit("error backend not found in backend fallback configurations", "backend_name", be)
|
||||
} else {
|
||||
log.Debug("configuring new backend for group", "backend_group", bgName, "backend_name", be, "fallback", fallback)
|
||||
RecordBackendGroupFallbacks(bg, be, fallback)
|
||||
}
|
||||
}
|
||||
|
||||
var tracker ConsensusTracker
|
||||
if bgcfg.ConsensusHA {
|
||||
if bgcfg.ConsensusHARedis.URL == "" {
|
||||
log.Crit("must specify a consensus_ha_redis config when consensus_ha is true")
|
||||
}
|
||||
topts := make([]RedisConsensusTrackerOpt, 0)
|
||||
if bgcfg.ConsensusHALockPeriod > 0 {
|
||||
topts = append(topts, WithLockPeriod(time.Duration(bgcfg.ConsensusHALockPeriod)))
|
||||
}
|
||||
if bgcfg.ConsensusHAHeartbeatInterval > 0 {
|
||||
topts = append(topts, WithHeartbeatInterval(time.Duration(bgcfg.ConsensusHAHeartbeatInterval)))
|
||||
}
|
||||
consensusHARedisClient, err := NewRedisClient(bgcfg.ConsensusHARedis.URL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ns := fmt.Sprintf("%s:%s", bgcfg.ConsensusHARedis.Namespace, bg.Name)
|
||||
tracker = NewRedisConsensusTracker(context.Background(), consensusHARedisClient, bg, ns, topts...)
|
||||
copts = append(copts, WithTracker(tracker))
|
||||
}
|
||||
|
||||
cp := NewConsensusPoller(bg, copts...)
|
||||
bg.Consensus = cp
|
||||
|
||||
if bgcfg.ConsensusHA {
|
||||
tracker.(*RedisConsensusTracker).Init()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
<-errTimer.C
|
||||
log.Info("started proxyd")
|
||||
|
||||
shutdownFunc := func() {
|
||||
log.Info("shutting down proxyd")
|
||||
srv.Shutdown()
|
||||
log.Info("goodbye")
|
||||
}
|
||||
|
||||
return srv, shutdownFunc, nil
|
||||
}
|
||||
|
||||
func validateReceiptsTarget(val string) (string, error) {
|
||||
if val == "" {
|
||||
val = ReceiptsTargetDebugGetRawReceipts
|
||||
}
|
||||
switch val {
|
||||
case ReceiptsTargetDebugGetRawReceipts,
|
||||
ReceiptsTargetAlchemyGetTransactionReceipts,
|
||||
ReceiptsTargetEthGetTransactionReceipts,
|
||||
ReceiptsTargetParityGetTransactionReceipts:
|
||||
return val, nil
|
||||
default:
|
||||
return "", fmt.Errorf("invalid receipts target: %s", val)
|
||||
}
|
||||
}
|
||||
|
||||
func secondsToDuration(seconds int) time.Duration {
|
||||
return time.Duration(seconds) * time.Second
|
||||
}
|
||||
|
||||
func configureBackendTLS(cfg *BackendConfig) (*tls.Config, error) {
|
||||
if cfg.CAFile == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
tlsConfig, err := CreateTLSClient(cfg.CAFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.ClientCertFile != "" && cfg.ClientKeyFile != "" {
|
||||
cert, err := ParseKeyPair(cfg.ClientCertFile, cfg.ClientKeyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
32
proxyd/reader.go
Normal file
32
proxyd/reader.go
Normal file
@ -0,0 +1,32 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var ErrLimitReaderOverLimit = errors.New("over read limit")
|
||||
|
||||
func LimitReader(r io.Reader, n int64) io.Reader { return &LimitedReader{r, n} }
|
||||
|
||||
// A LimitedReader reads from R but limits the amount of
|
||||
// data returned to just N bytes. Each call to Read
|
||||
// updates N to reflect the new amount remaining.
|
||||
// Unlike the standard library version, Read returns
|
||||
// ErrLimitReaderOverLimit when N <= 0.
|
||||
type LimitedReader struct {
|
||||
R io.Reader // underlying reader
|
||||
N int64 // max bytes remaining
|
||||
}
|
||||
|
||||
func (l *LimitedReader) Read(p []byte) (int, error) {
|
||||
if l.N <= 0 {
|
||||
return 0, ErrLimitReaderOverLimit
|
||||
}
|
||||
if int64(len(p)) > l.N {
|
||||
p = p[0:l.N]
|
||||
}
|
||||
n, err := l.R.Read(p)
|
||||
l.N -= int64(n)
|
||||
return n, err
|
||||
}
|
44
proxyd/reader_test.go
Normal file
44
proxyd/reader_test.go
Normal file
@ -0,0 +1,44 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLimitReader(t *testing.T) {
|
||||
data := "hellohellohellohello"
|
||||
r := LimitReader(strings.NewReader(data), 3)
|
||||
buf := make([]byte, 3)
|
||||
|
||||
// Buffer reads OK
|
||||
n, err := r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, n)
|
||||
|
||||
// Buffer is over limit
|
||||
n, err = r.Read(buf)
|
||||
require.Equal(t, ErrLimitReaderOverLimit, err)
|
||||
require.Equal(t, 0, n)
|
||||
|
||||
// Buffer on initial read is over size
|
||||
buf = make([]byte, 16)
|
||||
r = LimitReader(strings.NewReader(data), 3)
|
||||
n, err = r.Read(buf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, n)
|
||||
|
||||
// test with read all where the limit is less than the data
|
||||
r = LimitReader(strings.NewReader(data), 3)
|
||||
out, err := io.ReadAll(r)
|
||||
require.Equal(t, ErrLimitReaderOverLimit, err)
|
||||
require.Equal(t, "hel", string(out))
|
||||
|
||||
// test with read all where the limit is more than the data
|
||||
r = LimitReader(strings.NewReader(data), 21)
|
||||
out, err = io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, data, string(out))
|
||||
}
|
22
proxyd/redis.go
Normal file
22
proxyd/redis.go
Normal file
@ -0,0 +1,22 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewRedisClient(url string) (*redis.Client, error) {
|
||||
opts, err := redis.ParseURL(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := redis.NewClient(opts)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := client.Ping(ctx).Err(); err != nil {
|
||||
return nil, wrapErr(err, "error connecting to redis")
|
||||
}
|
||||
return client, nil
|
||||
}
|
310
proxyd/rewriter.go
Normal file
310
proxyd/rewriter.go
Normal file
@ -0,0 +1,310 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
type RewriteContext struct {
|
||||
latest hexutil.Uint64
|
||||
safe hexutil.Uint64
|
||||
finalized hexutil.Uint64
|
||||
maxBlockRange uint64
|
||||
}
|
||||
|
||||
type RewriteResult uint8
|
||||
|
||||
const (
|
||||
// RewriteNone means request should be forwarded as-is
|
||||
RewriteNone RewriteResult = iota
|
||||
|
||||
// RewriteOverrideError means there was an error attempting to rewrite
|
||||
RewriteOverrideError
|
||||
|
||||
// RewriteOverrideRequest means the modified request should be forwarded to the backend
|
||||
RewriteOverrideRequest
|
||||
|
||||
// RewriteOverrideResponse means to skip calling the backend and serve the overridden response
|
||||
RewriteOverrideResponse
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRewriteBlockOutOfRange = errors.New("block is out of range")
|
||||
ErrRewriteRangeTooLarge = errors.New("block range is too large")
|
||||
)
|
||||
|
||||
// RewriteTags modifies the request and the response based on block tags
|
||||
func RewriteTags(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) {
|
||||
rw, err := RewriteResponse(rctx, req, res)
|
||||
if rw == RewriteOverrideResponse {
|
||||
return rw, err
|
||||
}
|
||||
return RewriteRequest(rctx, req, res)
|
||||
}
|
||||
|
||||
// RewriteResponse modifies the response object to comply with the rewrite context
|
||||
// after the method has been called at the backend
|
||||
// RewriteResult informs the decision of the rewrite
|
||||
func RewriteResponse(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) {
|
||||
switch req.Method {
|
||||
case "eth_blockNumber":
|
||||
res.Result = rctx.latest
|
||||
return RewriteOverrideResponse, nil
|
||||
}
|
||||
return RewriteNone, nil
|
||||
}
|
||||
|
||||
// RewriteRequest modifies the request object to comply with the rewrite context
|
||||
// before the method has been called at the backend
|
||||
// it returns false if nothing was changed
|
||||
func RewriteRequest(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) {
|
||||
switch req.Method {
|
||||
case "eth_getLogs",
|
||||
"eth_newFilter":
|
||||
return rewriteRange(rctx, req, res, 0)
|
||||
case "debug_getRawReceipts", "consensus_getReceipts":
|
||||
return rewriteParam(rctx, req, res, 0, true, false)
|
||||
case "eth_getBalance",
|
||||
"eth_getCode",
|
||||
"eth_getTransactionCount",
|
||||
"eth_call":
|
||||
return rewriteParam(rctx, req, res, 1, false, true)
|
||||
case "eth_getStorageAt",
|
||||
"eth_getProof":
|
||||
return rewriteParam(rctx, req, res, 2, false, true)
|
||||
case "eth_getBlockTransactionCountByNumber",
|
||||
"eth_getUncleCountByBlockNumber",
|
||||
"eth_getBlockByNumber",
|
||||
"eth_getTransactionByBlockNumberAndIndex",
|
||||
"eth_getUncleByBlockNumberAndIndex":
|
||||
return rewriteParam(rctx, req, res, 0, false, false)
|
||||
}
|
||||
return RewriteNone, nil
|
||||
}
|
||||
|
||||
func rewriteParam(rctx RewriteContext, req *RPCReq, res *RPCRes, pos int, required bool, blockNrOrHash bool) (RewriteResult, error) {
|
||||
var p []interface{}
|
||||
err := json.Unmarshal(req.Params, &p)
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
|
||||
// we assume latest if the param is missing,
|
||||
// and we don't rewrite if there is not enough params
|
||||
if len(p) == pos && !required {
|
||||
p = append(p, "latest")
|
||||
} else if len(p) <= pos {
|
||||
return RewriteNone, nil
|
||||
}
|
||||
|
||||
// support for https://eips.ethereum.org/EIPS/eip-1898
|
||||
var val interface{}
|
||||
var rw bool
|
||||
if blockNrOrHash {
|
||||
bnh, err := remarshalBlockNumberOrHash(p[pos])
|
||||
if err != nil {
|
||||
// fallback to string
|
||||
s, ok := p[pos].(string)
|
||||
if ok {
|
||||
val, rw, err = rewriteTag(rctx, s)
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
} else {
|
||||
return RewriteOverrideError, errors.New("expected BlockNumberOrHash or string")
|
||||
}
|
||||
} else {
|
||||
val, rw, err = rewriteTagBlockNumberOrHash(rctx, bnh)
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s, ok := p[pos].(string)
|
||||
if !ok {
|
||||
return RewriteOverrideError, errors.New("expected string")
|
||||
}
|
||||
|
||||
val, rw, err = rewriteTag(rctx, s)
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
}
|
||||
|
||||
if rw {
|
||||
p[pos] = val
|
||||
paramRaw, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
req.Params = paramRaw
|
||||
return RewriteOverrideRequest, nil
|
||||
}
|
||||
return RewriteNone, nil
|
||||
}
|
||||
|
||||
func rewriteRange(rctx RewriteContext, req *RPCReq, res *RPCRes, pos int) (RewriteResult, error) {
|
||||
var p []map[string]interface{}
|
||||
err := json.Unmarshal(req.Params, &p)
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
|
||||
// if either fromBlock or toBlock is defined, default the other to "latest" if unset
|
||||
_, hasFrom := p[pos]["fromBlock"]
|
||||
_, hasTo := p[pos]["toBlock"]
|
||||
if hasFrom && !hasTo {
|
||||
p[pos]["toBlock"] = "latest"
|
||||
} else if hasTo && !hasFrom {
|
||||
p[pos]["fromBlock"] = "latest"
|
||||
}
|
||||
|
||||
modifiedFrom, err := rewriteTagMap(rctx, p[pos], "fromBlock")
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
|
||||
modifiedTo, err := rewriteTagMap(rctx, p[pos], "toBlock")
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
|
||||
if rctx.maxBlockRange > 0 && (hasFrom || hasTo) {
|
||||
from, err := blockNumber(p[pos], "fromBlock", uint64(rctx.latest))
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
to, err := blockNumber(p[pos], "toBlock", uint64(rctx.latest))
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
if to-from > rctx.maxBlockRange {
|
||||
return RewriteOverrideError, ErrRewriteRangeTooLarge
|
||||
}
|
||||
}
|
||||
|
||||
// if any of the fields the request have been changed, re-marshal the params
|
||||
if modifiedFrom || modifiedTo {
|
||||
paramsRaw, err := json.Marshal(p)
|
||||
req.Params = paramsRaw
|
||||
if err != nil {
|
||||
return RewriteOverrideError, err
|
||||
}
|
||||
return RewriteOverrideRequest, nil
|
||||
}
|
||||
|
||||
return RewriteNone, nil
|
||||
}
|
||||
|
||||
func blockNumber(m map[string]interface{}, key string, latest uint64) (uint64, error) {
|
||||
current, ok := m[key].(string)
|
||||
if !ok {
|
||||
return 0, errors.New("expected string")
|
||||
}
|
||||
// the latest/safe/finalized tags are already replaced by rewriteTag
|
||||
if current == "earliest" {
|
||||
return 0, nil
|
||||
}
|
||||
if current == "pending" {
|
||||
return latest + 1, nil
|
||||
}
|
||||
return hexutil.DecodeUint64(current)
|
||||
}
|
||||
|
||||
func rewriteTagMap(rctx RewriteContext, m map[string]interface{}, key string) (bool, error) {
|
||||
if m[key] == nil || m[key] == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
current, ok := m[key].(string)
|
||||
if !ok {
|
||||
return false, errors.New("expected string")
|
||||
}
|
||||
|
||||
val, rw, err := rewriteTag(rctx, current)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if rw {
|
||||
m[key] = val
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func remarshalBlockNumberOrHash(current interface{}) (*rpc.BlockNumberOrHash, error) {
|
||||
jv, err := json.Marshal(current)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var bnh rpc.BlockNumberOrHash
|
||||
err = bnh.UnmarshalJSON(jv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bnh, nil
|
||||
}
|
||||
|
||||
func rewriteTag(rctx RewriteContext, current string) (string, bool, error) {
|
||||
bnh, err := remarshalBlockNumberOrHash(current)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
// this is a hash, not a block
|
||||
if bnh.BlockNumber == nil {
|
||||
return current, false, nil
|
||||
}
|
||||
|
||||
switch *bnh.BlockNumber {
|
||||
case rpc.PendingBlockNumber,
|
||||
rpc.EarliestBlockNumber:
|
||||
return current, false, nil
|
||||
case rpc.FinalizedBlockNumber:
|
||||
return rctx.finalized.String(), true, nil
|
||||
case rpc.SafeBlockNumber:
|
||||
return rctx.safe.String(), true, nil
|
||||
case rpc.LatestBlockNumber:
|
||||
return rctx.latest.String(), true, nil
|
||||
default:
|
||||
if bnh.BlockNumber.Int64() > int64(rctx.latest) {
|
||||
return "", false, ErrRewriteBlockOutOfRange
|
||||
}
|
||||
}
|
||||
|
||||
return current, false, nil
|
||||
}
|
||||
|
||||
func rewriteTagBlockNumberOrHash(rctx RewriteContext, current *rpc.BlockNumberOrHash) (*rpc.BlockNumberOrHash, bool, error) {
|
||||
// this is a hash, not a block number
|
||||
if current.BlockNumber == nil {
|
||||
return current, false, nil
|
||||
}
|
||||
|
||||
switch *current.BlockNumber {
|
||||
case rpc.PendingBlockNumber,
|
||||
rpc.EarliestBlockNumber:
|
||||
return current, false, nil
|
||||
case rpc.FinalizedBlockNumber:
|
||||
bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.finalized))
|
||||
return &bn, true, nil
|
||||
case rpc.SafeBlockNumber:
|
||||
bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.safe))
|
||||
return &bn, true, nil
|
||||
case rpc.LatestBlockNumber:
|
||||
bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.latest))
|
||||
return &bn, true, nil
|
||||
default:
|
||||
if current.BlockNumber.Int64() > int64(rctx.latest) {
|
||||
return nil, false, ErrRewriteBlockOutOfRange
|
||||
}
|
||||
}
|
||||
|
||||
return current, false, nil
|
||||
}
|
717
proxyd/rewriter_test.go
Normal file
717
proxyd/rewriter_test.go
Normal file
@ -0,0 +1,717 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type args struct {
|
||||
rctx RewriteContext
|
||||
req *RPCReq
|
||||
res *RPCRes
|
||||
}
|
||||
|
||||
type rewriteTest struct {
|
||||
name string
|
||||
args args
|
||||
expected RewriteResult
|
||||
expectedErr error
|
||||
check func(*testing.T, args)
|
||||
}
|
||||
|
||||
func TestRewriteRequest(t *testing.T) {
|
||||
tests := []rewriteTest{
|
||||
/* range scoped */
|
||||
{
|
||||
name: "eth_getLogs fromBlock latest",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "latest"}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []map[string]interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs fromBlock within range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(55).String()}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []map[string]interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, hexutil.Uint64(55).String(), p[0]["fromBlock"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs fromBlock out of range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(111).String()}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteBlockOutOfRange,
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs toBlock latest",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": "latest"}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []map[string]interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs toBlock within range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": hexutil.Uint64(55).String()}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []map[string]interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, hexutil.Uint64(55).String(), p[0]["toBlock"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs toBlock out of range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": hexutil.Uint64(111).String()}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteBlockOutOfRange,
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs fromBlock, toBlock latest",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "latest", "toBlock": "latest"}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []map[string]interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"])
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs fromBlock, toBlock within range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(55).String(), "toBlock": hexutil.Uint64(77).String()}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []map[string]interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, hexutil.Uint64(55).String(), p[0]["fromBlock"])
|
||||
require.Equal(t, hexutil.Uint64(77).String(), p[0]["toBlock"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs fromBlock, toBlock out of range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(111).String(), "toBlock": hexutil.Uint64(222).String()}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteBlockOutOfRange,
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs fromBlock -> toBlock above max range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(20).String(), "toBlock": hexutil.Uint64(80).String()}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteRangeTooLarge,
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs earliest -> latest above max range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest", "toBlock": "latest"}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteRangeTooLarge,
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs earliest -> pending above max range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest", "toBlock": "pending"}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteRangeTooLarge,
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs earliest -> default above max range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest"}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteRangeTooLarge,
|
||||
},
|
||||
{
|
||||
name: "eth_getLogs default -> latest within range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30},
|
||||
req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": "latest"}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []map[string]interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"])
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"])
|
||||
},
|
||||
},
|
||||
/* required parameter at pos 0 */
|
||||
{
|
||||
name: "debug_getRawReceipts latest",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{"latest"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "debug_getRawReceipts within range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{hexutil.Uint64(55).String()})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
require.Equal(t, hexutil.Uint64(55).String(), p[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "debug_getRawReceipts out of range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{hexutil.Uint64(111).String()})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteBlockOutOfRange,
|
||||
},
|
||||
{
|
||||
name: "debug_getRawReceipts missing parameter",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
},
|
||||
{
|
||||
name: "debug_getRawReceipts with block hash",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
require.Equal(t, "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", p[0])
|
||||
},
|
||||
},
|
||||
/* default block parameter */
|
||||
{
|
||||
name: "eth_getCode omit block, should add",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 2, len(p))
|
||||
require.Equal(t, "0x123", p[0])
|
||||
bnh, err := remarshalBlockNumberOrHash(p[1])
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getCode not enough params, should do nothing",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 0, len(p))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getCode latest",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", "latest"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 2, len(p))
|
||||
require.Equal(t, "0x123", p[0])
|
||||
bnh, err := remarshalBlockNumberOrHash(p[1])
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getCode within range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", hexutil.Uint64(55).String()})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 2, len(p))
|
||||
require.Equal(t, "0x123", p[0])
|
||||
require.Equal(t, hexutil.Uint64(55).String(), p[1])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getCode out of range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", hexutil.Uint64(111).String()})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteBlockOutOfRange,
|
||||
},
|
||||
/* default block parameter, at position 2 */
|
||||
{
|
||||
name: "eth_getStorageAt omit block, should add",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 3, len(p))
|
||||
require.Equal(t, "0x123", p[0])
|
||||
require.Equal(t, "5", p[1])
|
||||
bnh, err := remarshalBlockNumberOrHash(p[2])
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getStorageAt latest",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", "latest"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 3, len(p))
|
||||
require.Equal(t, "0x123", p[0])
|
||||
require.Equal(t, "5", p[1])
|
||||
bnh, err := remarshalBlockNumberOrHash(p[2])
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getStorageAt within range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", hexutil.Uint64(55).String()})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 3, len(p))
|
||||
require.Equal(t, "0x123", p[0])
|
||||
require.Equal(t, "5", p[1])
|
||||
require.Equal(t, hexutil.Uint64(55).String(), p[2])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getStorageAt out of range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", hexutil.Uint64(111).String()})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteBlockOutOfRange,
|
||||
},
|
||||
/* default block parameter, at position 0 */
|
||||
{
|
||||
name: "eth_getBlockByNumber omit block, should add",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getBlockByNumber latest",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"latest"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
require.Equal(t, hexutil.Uint64(100).String(), p[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getBlockByNumber finalized",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100), finalized: hexutil.Uint64(55)},
|
||||
req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"finalized"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
require.Equal(t, hexutil.Uint64(55).String(), p[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getBlockByNumber safe",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100), safe: hexutil.Uint64(50)},
|
||||
req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"safe"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
require.Equal(t, hexutil.Uint64(50).String(), p[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getBlockByNumber within range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{hexutil.Uint64(55).String()})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []string
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
require.Equal(t, hexutil.Uint64(55).String(), p[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getBlockByNumber out of range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{hexutil.Uint64(111).String()})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteBlockOutOfRange,
|
||||
},
|
||||
{
|
||||
name: "eth_getStorageAt using rpc.BlockNumberOrHash",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{
|
||||
"0xae851f927ee40de99aabb7461c00f9622ab91d60",
|
||||
"0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08",
|
||||
"0x1c4840bcb3de3ac403c0075b46c2c47d4396c5b624b6e1b2874ec04e8879b483"})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
},
|
||||
// eip1898
|
||||
{
|
||||
name: "eth_getStorageAt using rpc.BlockNumberOrHash at genesis (blockNumber)",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{
|
||||
"0xae851f927ee40de99aabb7461c00f9622ab91d60",
|
||||
"10",
|
||||
map[string]interface{}{
|
||||
"blockNumber": "0x0",
|
||||
}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
},
|
||||
{
|
||||
name: "eth_getStorageAt using rpc.BlockNumberOrHash at genesis (hash)",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{
|
||||
"0xae851f927ee40de99aabb7461c00f9622ab91d60",
|
||||
"10",
|
||||
map[string]interface{}{
|
||||
"blockHash": "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3",
|
||||
"requireCanonical": true,
|
||||
}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteNone,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 3, len(p))
|
||||
require.Equal(t, "0xae851f927ee40de99aabb7461c00f9622ab91d60", p[0])
|
||||
require.Equal(t, "10", p[1])
|
||||
bnh, err := remarshalBlockNumberOrHash(p[2])
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, rpc.BlockNumberOrHashWithHash(common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"), true), *bnh)
|
||||
require.True(t, bnh.RequireCanonical)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getStorageAt using rpc.BlockNumberOrHash at latest (blockNumber)",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{
|
||||
"0xae851f927ee40de99aabb7461c00f9622ab91d60",
|
||||
"10",
|
||||
map[string]interface{}{
|
||||
"blockNumber": "latest",
|
||||
}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideRequest,
|
||||
check: func(t *testing.T, args args) {
|
||||
var p []interface{}
|
||||
err := json.Unmarshal(args.req.Params, &p)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 3, len(p))
|
||||
require.Equal(t, "0xae851f927ee40de99aabb7461c00f9622ab91d60", p[0])
|
||||
require.Equal(t, "10", p[1])
|
||||
bnh, err := remarshalBlockNumberOrHash(p[2])
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "eth_getStorageAt using rpc.BlockNumberOrHash out of range",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{
|
||||
"0xae851f927ee40de99aabb7461c00f9622ab91d60",
|
||||
"10",
|
||||
map[string]interface{}{
|
||||
"blockNumber": "0x111",
|
||||
}})},
|
||||
res: nil,
|
||||
},
|
||||
expected: RewriteOverrideError,
|
||||
expectedErr: ErrRewriteBlockOutOfRange,
|
||||
},
|
||||
}
|
||||
|
||||
// generalize tests for other methods with same interface and behavior
|
||||
tests = generalize(tests, "eth_getLogs", "eth_newFilter")
|
||||
tests = generalize(tests, "eth_getCode", "eth_getBalance")
|
||||
tests = generalize(tests, "eth_getCode", "eth_getTransactionCount")
|
||||
tests = generalize(tests, "eth_getCode", "eth_call")
|
||||
tests = generalize(tests, "eth_getBlockByNumber", "eth_getBlockTransactionCountByNumber")
|
||||
tests = generalize(tests, "eth_getBlockByNumber", "eth_getUncleCountByBlockNumber")
|
||||
tests = generalize(tests, "eth_getBlockByNumber", "eth_getTransactionByBlockNumberAndIndex")
|
||||
tests = generalize(tests, "eth_getBlockByNumber", "eth_getUncleByBlockNumberAndIndex")
|
||||
tests = generalize(tests, "eth_getStorageSlotAt", "eth_getProof")
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := RewriteRequest(tt.args.rctx, tt.args.req, tt.args.res)
|
||||
if result != RewriteOverrideError {
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, tt.expected, result)
|
||||
} else {
|
||||
require.Equal(t, tt.expectedErr, err)
|
||||
}
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.args)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func generalize(tests []rewriteTest, baseMethod string, generalizedMethod string) []rewriteTest {
|
||||
newCases := make([]rewriteTest, 0)
|
||||
for _, t := range tests {
|
||||
if t.args.req.Method == baseMethod {
|
||||
newName := strings.Replace(t.name, baseMethod, generalizedMethod, -1)
|
||||
var req *RPCReq
|
||||
var res *RPCRes
|
||||
|
||||
if t.args.req != nil {
|
||||
req = &RPCReq{
|
||||
JSONRPC: t.args.req.JSONRPC,
|
||||
Method: generalizedMethod,
|
||||
Params: t.args.req.Params,
|
||||
ID: t.args.req.ID,
|
||||
}
|
||||
}
|
||||
|
||||
if t.args.res != nil {
|
||||
res = &RPCRes{
|
||||
JSONRPC: t.args.res.JSONRPC,
|
||||
Result: t.args.res.Result,
|
||||
Error: t.args.res.Error,
|
||||
ID: t.args.res.ID,
|
||||
}
|
||||
}
|
||||
newCases = append(newCases, rewriteTest{
|
||||
name: newName,
|
||||
args: args{
|
||||
rctx: t.args.rctx,
|
||||
req: req,
|
||||
res: res,
|
||||
},
|
||||
expected: t.expected,
|
||||
expectedErr: t.expectedErr,
|
||||
check: t.check,
|
||||
})
|
||||
}
|
||||
}
|
||||
return append(tests, newCases...)
|
||||
}
|
||||
|
||||
func TestRewriteResponse(t *testing.T) {
|
||||
type args struct {
|
||||
rctx RewriteContext
|
||||
req *RPCReq
|
||||
res *RPCRes
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected RewriteResult
|
||||
check func(*testing.T, args)
|
||||
}{
|
||||
{
|
||||
name: "eth_blockNumber latest",
|
||||
args: args{
|
||||
rctx: RewriteContext{latest: hexutil.Uint64(100)},
|
||||
req: &RPCReq{Method: "eth_blockNumber"},
|
||||
res: &RPCRes{Result: hexutil.Uint64(200)},
|
||||
},
|
||||
expected: RewriteOverrideResponse,
|
||||
check: func(t *testing.T, args args) {
|
||||
require.Equal(t, args.res.Result, hexutil.Uint64(100))
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := RewriteResponse(tt.args.rctx, tt.args.req, tt.args.res)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, tt.expected, result)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.args)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
170
proxyd/rpc.go
Normal file
170
proxyd/rpc.go
Normal file
@ -0,0 +1,170 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type RPCReq struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
Method string `json:"method"`
|
||||
Params json.RawMessage `json:"params"`
|
||||
ID json.RawMessage `json:"id"`
|
||||
}
|
||||
|
||||
type RPCRes struct {
|
||||
JSONRPC string
|
||||
Result interface{}
|
||||
Error *RPCErr
|
||||
ID json.RawMessage
|
||||
}
|
||||
|
||||
type rpcResJSON struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
Result interface{} `json:"result,omitempty"`
|
||||
Error *RPCErr `json:"error,omitempty"`
|
||||
ID json.RawMessage `json:"id"`
|
||||
}
|
||||
|
||||
type nullResultRPCRes struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
Result interface{} `json:"result"`
|
||||
ID json.RawMessage `json:"id"`
|
||||
}
|
||||
|
||||
func (r *RPCRes) IsError() bool {
|
||||
return r.Error != nil
|
||||
}
|
||||
|
||||
func (r *RPCRes) MarshalJSON() ([]byte, error) {
|
||||
if r.Result == nil && r.Error == nil {
|
||||
return json.Marshal(&nullResultRPCRes{
|
||||
JSONRPC: r.JSONRPC,
|
||||
Result: nil,
|
||||
ID: r.ID,
|
||||
})
|
||||
}
|
||||
|
||||
return json.Marshal(&rpcResJSON{
|
||||
JSONRPC: r.JSONRPC,
|
||||
Result: r.Result,
|
||||
Error: r.Error,
|
||||
ID: r.ID,
|
||||
})
|
||||
}
|
||||
|
||||
type RPCErr struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data string `json:"data,omitempty"`
|
||||
HTTPErrorCode int `json:"-"`
|
||||
}
|
||||
|
||||
func (r *RPCErr) Error() string {
|
||||
return r.Message
|
||||
}
|
||||
|
||||
func (r *RPCErr) Clone() *RPCErr {
|
||||
return &RPCErr{
|
||||
Code: r.Code,
|
||||
Message: r.Message,
|
||||
HTTPErrorCode: r.HTTPErrorCode,
|
||||
}
|
||||
}
|
||||
|
||||
func IsValidID(id json.RawMessage) bool {
|
||||
// handle the case where the ID is a string
|
||||
if strings.HasPrefix(string(id), "\"") && strings.HasSuffix(string(id), "\"") {
|
||||
return len(id) > 2
|
||||
}
|
||||
|
||||
// technically allows a boolean/null ID, but so does Geth
|
||||
// https://github.com/ethereum/go-ethereum/blob/master/rpc/json.go#L72
|
||||
return len(id) > 0 && id[0] != '{' && id[0] != '['
|
||||
}
|
||||
|
||||
func ParseRPCReq(body []byte) (*RPCReq, error) {
|
||||
req := new(RPCReq)
|
||||
if err := json.Unmarshal(body, req); err != nil {
|
||||
return nil, ErrParseErr
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func ParseBatchRPCReq(body []byte) ([]json.RawMessage, error) {
|
||||
batch := make([]json.RawMessage, 0)
|
||||
if err := json.Unmarshal(body, &batch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
func ParseRPCRes(r io.Reader) (*RPCRes, error) {
|
||||
body, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, wrapErr(err, "error reading RPC response")
|
||||
}
|
||||
|
||||
res := new(RPCRes)
|
||||
if err := json.Unmarshal(body, res); err != nil {
|
||||
return nil, wrapErr(err, "error unmarshalling RPC response")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func ValidateRPCReq(req *RPCReq) error {
|
||||
if req.JSONRPC != JSONRPCVersion {
|
||||
return ErrInvalidRequest("invalid JSON-RPC version")
|
||||
}
|
||||
|
||||
if req.Method == "" {
|
||||
return ErrInvalidRequest("no method specified")
|
||||
}
|
||||
|
||||
if !IsValidID(req.ID) {
|
||||
return ErrInvalidRequest("invalid ID")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes {
|
||||
var rpcErr *RPCErr
|
||||
if rr, ok := err.(*RPCErr); ok {
|
||||
rpcErr = rr
|
||||
} else {
|
||||
rpcErr = &RPCErr{
|
||||
Code: JSONRPCErrorInternal,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
return &RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Error: rpcErr,
|
||||
ID: id,
|
||||
}
|
||||
}
|
||||
|
||||
func NewRPCRes(id json.RawMessage, result interface{}) *RPCRes {
|
||||
return &RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: result,
|
||||
ID: id,
|
||||
}
|
||||
}
|
||||
|
||||
func IsBatch(raw []byte) bool {
|
||||
for _, c := range raw {
|
||||
// skip insignificant whitespace (http://www.ietf.org/rfc/rfc4627.txt)
|
||||
if c == 0x20 || c == 0x09 || c == 0x0a || c == 0x0d {
|
||||
continue
|
||||
}
|
||||
return c == '['
|
||||
}
|
||||
return false
|
||||
}
|
89
proxyd/rpc_test.go
Normal file
89
proxyd/rpc_test.go
Normal file
@ -0,0 +1,89 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRPCResJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in *RPCRes
|
||||
out string
|
||||
}{
|
||||
{
|
||||
"string result",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: "foobar",
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","result":"foobar","id":123}`,
|
||||
},
|
||||
{
|
||||
"object result",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: struct {
|
||||
Str string `json:"str"`
|
||||
}{
|
||||
"test",
|
||||
},
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","result":{"str":"test"},"id":123}`,
|
||||
},
|
||||
{
|
||||
"nil result",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: nil,
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","result":null,"id":123}`,
|
||||
},
|
||||
{
|
||||
"error result without data",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Error: &RPCErr{
|
||||
Code: 1234,
|
||||
Message: "test err",
|
||||
},
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","error":{"code":1234,"message":"test err"},"id":123}`,
|
||||
},
|
||||
{
|
||||
"error result with data",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Error: &RPCErr{
|
||||
Code: 1234,
|
||||
Message: "test err",
|
||||
Data: "revert",
|
||||
},
|
||||
ID: []byte("123"),
|
||||
},
|
||||
`{"jsonrpc":"2.0","error":{"code":1234,"message":"test err","data":"revert"},"id":123}`,
|
||||
},
|
||||
{
|
||||
"string ID",
|
||||
&RPCRes{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: "foobar",
|
||||
ID: []byte("\"123\""),
|
||||
},
|
||||
`{"jsonrpc":"2.0","result":"foobar","id":"123"}`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
out, err := json.Marshal(tt.in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.out, string(out))
|
||||
})
|
||||
}
|
||||
}
|
877
proxyd/server.go
Normal file
877
proxyd/server.go
Normal file
@ -0,0 +1,877 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/rs/cors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
const (
|
||||
ContextKeyAuth = "authorization"
|
||||
ContextKeyReqID = "req_id"
|
||||
ContextKeyXForwardedFor = "x_forwarded_for"
|
||||
DefaultMaxBatchRPCCallsLimit = 100
|
||||
MaxBatchRPCCallsHardLimit = 1000
|
||||
cacheStatusHdr = "X-Proxyd-Cache-Status"
|
||||
defaultRPCTimeout = 10 * time.Second
|
||||
defaultBodySizeLimit = 256 * opt.KiB
|
||||
defaultWSHandshakeTimeout = 10 * time.Second
|
||||
defaultWSReadTimeout = 2 * time.Minute
|
||||
defaultWSWriteTimeout = 10 * time.Second
|
||||
defaultCacheTtl = 1 * time.Hour
|
||||
maxRequestBodyLogLen = 2000
|
||||
defaultMaxUpstreamBatchSize = 10
|
||||
defaultRateLimitHeader = "X-Forwarded-For"
|
||||
)
|
||||
|
||||
var emptyArrayResponse = json.RawMessage("[]")
|
||||
|
||||
type Server struct {
|
||||
BackendGroups map[string]*BackendGroup
|
||||
wsBackendGroup *BackendGroup
|
||||
wsMethodWhitelist *StringSet
|
||||
rpcMethodMappings map[string]string
|
||||
maxBodySize int64
|
||||
enableRequestLog bool
|
||||
maxRequestBodyLogLen int
|
||||
authenticatedPaths map[string]string
|
||||
timeout time.Duration
|
||||
maxUpstreamBatchSize int
|
||||
maxBatchSize int
|
||||
enableServedByHeader bool
|
||||
upgrader *websocket.Upgrader
|
||||
mainLim FrontendRateLimiter
|
||||
overrideLims map[string]FrontendRateLimiter
|
||||
senderLim FrontendRateLimiter
|
||||
allowedChainIds []*big.Int
|
||||
limExemptOrigins []*regexp.Regexp
|
||||
limExemptUserAgents []*regexp.Regexp
|
||||
globallyLimitedMethods map[string]bool
|
||||
rpcServer *http.Server
|
||||
wsServer *http.Server
|
||||
cache RPCCache
|
||||
srvMu sync.Mutex
|
||||
rateLimitHeader string
|
||||
}
|
||||
|
||||
type limiterFunc func(method string) bool
|
||||
|
||||
func NewServer(
|
||||
backendGroups map[string]*BackendGroup,
|
||||
wsBackendGroup *BackendGroup,
|
||||
wsMethodWhitelist *StringSet,
|
||||
rpcMethodMappings map[string]string,
|
||||
maxBodySize int64,
|
||||
authenticatedPaths map[string]string,
|
||||
timeout time.Duration,
|
||||
maxUpstreamBatchSize int,
|
||||
enableServedByHeader bool,
|
||||
cache RPCCache,
|
||||
rateLimitConfig RateLimitConfig,
|
||||
senderRateLimitConfig SenderRateLimitConfig,
|
||||
enableRequestLog bool,
|
||||
maxRequestBodyLogLen int,
|
||||
maxBatchSize int,
|
||||
redisClient *redis.Client,
|
||||
) (*Server, error) {
|
||||
if cache == nil {
|
||||
cache = &NoopRPCCache{}
|
||||
}
|
||||
|
||||
if maxBodySize == 0 {
|
||||
maxBodySize = defaultBodySizeLimit
|
||||
}
|
||||
|
||||
if timeout == 0 {
|
||||
timeout = defaultRPCTimeout
|
||||
}
|
||||
|
||||
if maxUpstreamBatchSize == 0 {
|
||||
maxUpstreamBatchSize = defaultMaxUpstreamBatchSize
|
||||
}
|
||||
|
||||
if maxBatchSize == 0 {
|
||||
maxBatchSize = DefaultMaxBatchRPCCallsLimit
|
||||
}
|
||||
|
||||
if maxBatchSize > MaxBatchRPCCallsHardLimit {
|
||||
maxBatchSize = MaxBatchRPCCallsHardLimit
|
||||
}
|
||||
|
||||
limiterFactory := func(dur time.Duration, max int, prefix string) FrontendRateLimiter {
|
||||
if rateLimitConfig.UseRedis {
|
||||
return NewRedisFrontendRateLimiter(redisClient, dur, max, prefix)
|
||||
}
|
||||
|
||||
return NewMemoryFrontendRateLimit(dur, max)
|
||||
}
|
||||
|
||||
var mainLim FrontendRateLimiter
|
||||
limExemptOrigins := make([]*regexp.Regexp, 0)
|
||||
limExemptUserAgents := make([]*regexp.Regexp, 0)
|
||||
if rateLimitConfig.BaseRate > 0 {
|
||||
mainLim = limiterFactory(time.Duration(rateLimitConfig.BaseInterval), rateLimitConfig.BaseRate, "main")
|
||||
for _, origin := range rateLimitConfig.ExemptOrigins {
|
||||
pattern, err := regexp.Compile(origin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limExemptOrigins = append(limExemptOrigins, pattern)
|
||||
}
|
||||
for _, agent := range rateLimitConfig.ExemptUserAgents {
|
||||
pattern, err := regexp.Compile(agent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limExemptUserAgents = append(limExemptUserAgents, pattern)
|
||||
}
|
||||
} else {
|
||||
mainLim = NoopFrontendRateLimiter
|
||||
}
|
||||
|
||||
overrideLims := make(map[string]FrontendRateLimiter)
|
||||
globalMethodLims := make(map[string]bool)
|
||||
for method, override := range rateLimitConfig.MethodOverrides {
|
||||
overrideLims[method] = limiterFactory(time.Duration(override.Interval), override.Limit, method)
|
||||
|
||||
if override.Global {
|
||||
globalMethodLims[method] = true
|
||||
}
|
||||
}
|
||||
var senderLim FrontendRateLimiter
|
||||
if senderRateLimitConfig.Enabled {
|
||||
senderLim = limiterFactory(time.Duration(senderRateLimitConfig.Interval), senderRateLimitConfig.Limit, "senders")
|
||||
}
|
||||
|
||||
rateLimitHeader := defaultRateLimitHeader
|
||||
if rateLimitConfig.IPHeaderOverride != "" {
|
||||
rateLimitHeader = rateLimitConfig.IPHeaderOverride
|
||||
}
|
||||
|
||||
return &Server{
|
||||
BackendGroups: backendGroups,
|
||||
wsBackendGroup: wsBackendGroup,
|
||||
wsMethodWhitelist: wsMethodWhitelist,
|
||||
rpcMethodMappings: rpcMethodMappings,
|
||||
maxBodySize: maxBodySize,
|
||||
authenticatedPaths: authenticatedPaths,
|
||||
timeout: timeout,
|
||||
maxUpstreamBatchSize: maxUpstreamBatchSize,
|
||||
enableServedByHeader: enableServedByHeader,
|
||||
cache: cache,
|
||||
enableRequestLog: enableRequestLog,
|
||||
maxRequestBodyLogLen: maxRequestBodyLogLen,
|
||||
maxBatchSize: maxBatchSize,
|
||||
upgrader: &websocket.Upgrader{
|
||||
HandshakeTimeout: defaultWSHandshakeTimeout,
|
||||
},
|
||||
mainLim: mainLim,
|
||||
overrideLims: overrideLims,
|
||||
globallyLimitedMethods: globalMethodLims,
|
||||
senderLim: senderLim,
|
||||
allowedChainIds: senderRateLimitConfig.AllowedChainIds,
|
||||
limExemptOrigins: limExemptOrigins,
|
||||
limExemptUserAgents: limExemptUserAgents,
|
||||
rateLimitHeader: rateLimitHeader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) RPCListenAndServe(host string, port int) error {
|
||||
s.srvMu.Lock()
|
||||
hdlr := mux.NewRouter()
|
||||
hdlr.HandleFunc("/healthz", s.HandleHealthz).Methods("GET")
|
||||
hdlr.HandleFunc("/", s.HandleRPC).Methods("POST")
|
||||
hdlr.HandleFunc("/{authorization}", s.HandleRPC).Methods("POST")
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
})
|
||||
addr := fmt.Sprintf("%s:%d", host, port)
|
||||
s.rpcServer = &http.Server{
|
||||
Handler: instrumentedHdlr(c.Handler(hdlr)),
|
||||
Addr: addr,
|
||||
}
|
||||
log.Info("starting HTTP server", "addr", addr)
|
||||
s.srvMu.Unlock()
|
||||
return s.rpcServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func (s *Server) WSListenAndServe(host string, port int) error {
|
||||
s.srvMu.Lock()
|
||||
hdlr := mux.NewRouter()
|
||||
hdlr.HandleFunc("/", s.HandleWS)
|
||||
hdlr.HandleFunc("/{authorization}", s.HandleWS)
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
})
|
||||
addr := fmt.Sprintf("%s:%d", host, port)
|
||||
s.wsServer = &http.Server{
|
||||
Handler: instrumentedHdlr(c.Handler(hdlr)),
|
||||
Addr: addr,
|
||||
}
|
||||
log.Info("starting WS server", "addr", addr)
|
||||
s.srvMu.Unlock()
|
||||
return s.wsServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func (s *Server) Shutdown() {
|
||||
s.srvMu.Lock()
|
||||
defer s.srvMu.Unlock()
|
||||
if s.rpcServer != nil {
|
||||
_ = s.rpcServer.Shutdown(context.Background())
|
||||
}
|
||||
if s.wsServer != nil {
|
||||
_ = s.wsServer.Shutdown(context.Background())
|
||||
}
|
||||
for _, bg := range s.BackendGroups {
|
||||
bg.Shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte("OK"))
|
||||
}
|
||||
|
||||
func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := s.populateContext(w, r)
|
||||
if ctx == nil {
|
||||
return
|
||||
}
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, s.timeout)
|
||||
defer cancel()
|
||||
|
||||
origin := r.Header.Get("Origin")
|
||||
userAgent := r.Header.Get("User-Agent")
|
||||
// Use XFF in context since it will automatically be replaced by the remote IP
|
||||
xff := stripXFF(GetXForwardedFor(ctx))
|
||||
isUnlimitedOrigin := s.isUnlimitedOrigin(origin)
|
||||
isUnlimitedUserAgent := s.isUnlimitedUserAgent(userAgent)
|
||||
|
||||
if xff == "" {
|
||||
writeRPCError(ctx, w, nil, ErrInvalidRequest("request does not include a remote IP"))
|
||||
return
|
||||
}
|
||||
|
||||
isLimited := func(method string) bool {
|
||||
isGloballyLimitedMethod := s.isGlobalLimit(method)
|
||||
if !isGloballyLimitedMethod && (isUnlimitedOrigin || isUnlimitedUserAgent) {
|
||||
return false
|
||||
}
|
||||
|
||||
var lim FrontendRateLimiter
|
||||
if method == "" {
|
||||
lim = s.mainLim
|
||||
} else {
|
||||
lim = s.overrideLims[method]
|
||||
}
|
||||
|
||||
if lim == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
ok, err := lim.Take(ctx, xff)
|
||||
if err != nil {
|
||||
log.Warn("error taking rate limit", "err", err)
|
||||
return true
|
||||
}
|
||||
return !ok
|
||||
}
|
||||
|
||||
if isLimited("") {
|
||||
RecordRPCError(ctx, BackendProxyd, "unknown", ErrOverRateLimit)
|
||||
log.Warn(
|
||||
"rate limited request",
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"user_agent", userAgent,
|
||||
"origin", origin,
|
||||
"remote_ip", xff,
|
||||
)
|
||||
writeRPCError(ctx, w, nil, ErrOverRateLimit)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info(
|
||||
"received RPC request",
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"user_agent", userAgent,
|
||||
"origin", origin,
|
||||
"remote_ip", xff,
|
||||
)
|
||||
|
||||
body, err := io.ReadAll(LimitReader(r.Body, s.maxBodySize))
|
||||
if errors.Is(err, ErrLimitReaderOverLimit) {
|
||||
log.Error("request body too large", "req_id", GetReqID(ctx))
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrRequestBodyTooLarge)
|
||||
writeRPCError(ctx, w, nil, ErrRequestBodyTooLarge)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("error reading request body", "err", err)
|
||||
writeRPCError(ctx, w, nil, ErrInternal)
|
||||
return
|
||||
}
|
||||
RecordRequestPayloadSize(ctx, len(body))
|
||||
|
||||
if s.enableRequestLog {
|
||||
log.Info("Raw RPC request",
|
||||
"body", truncate(string(body), s.maxRequestBodyLogLen),
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
)
|
||||
}
|
||||
|
||||
if IsBatch(body) {
|
||||
reqs, err := ParseBatchRPCReq(body)
|
||||
if err != nil {
|
||||
log.Error("error parsing batch RPC request", "err", err)
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
||||
writeRPCError(ctx, w, nil, ErrParseErr)
|
||||
return
|
||||
}
|
||||
|
||||
RecordBatchSize(len(reqs))
|
||||
|
||||
if len(reqs) > s.maxBatchSize {
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrTooManyBatchRequests)
|
||||
writeRPCError(ctx, w, nil, ErrTooManyBatchRequests)
|
||||
return
|
||||
}
|
||||
|
||||
if len(reqs) == 0 {
|
||||
writeRPCError(ctx, w, nil, ErrInvalidRequest("must specify at least one batch call"))
|
||||
return
|
||||
}
|
||||
|
||||
batchRes, batchContainsCached, servedBy, err := s.handleBatchRPC(ctx, reqs, isLimited, true)
|
||||
if err == context.DeadlineExceeded {
|
||||
writeRPCError(ctx, w, nil, ErrGatewayTimeout)
|
||||
return
|
||||
}
|
||||
if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) ||
|
||||
errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) {
|
||||
writeRPCError(ctx, w, nil, ErrInvalidRequest(err.Error()))
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
writeRPCError(ctx, w, nil, ErrInternal)
|
||||
return
|
||||
}
|
||||
if s.enableServedByHeader {
|
||||
w.Header().Set("x-served-by", servedBy)
|
||||
}
|
||||
setCacheHeader(w, batchContainsCached)
|
||||
writeBatchRPCRes(ctx, w, batchRes)
|
||||
return
|
||||
}
|
||||
|
||||
rawBody := json.RawMessage(body)
|
||||
backendRes, cached, servedBy, err := s.handleBatchRPC(ctx, []json.RawMessage{rawBody}, isLimited, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) ||
|
||||
errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) {
|
||||
writeRPCError(ctx, w, nil, ErrInvalidRequest(err.Error()))
|
||||
return
|
||||
}
|
||||
writeRPCError(ctx, w, nil, ErrInternal)
|
||||
return
|
||||
}
|
||||
if s.enableServedByHeader {
|
||||
w.Header().Set("x-served-by", servedBy)
|
||||
}
|
||||
setCacheHeader(w, cached)
|
||||
writeRPCRes(ctx, w, backendRes[0])
|
||||
}
|
||||
|
||||
func (s *Server) handleBatchRPC(ctx context.Context, reqs []json.RawMessage, isLimited limiterFunc, isBatch bool) ([]*RPCRes, bool, string, error) {
|
||||
// A request set is transformed into groups of batches.
|
||||
// Each batch group maps to a forwarded JSON-RPC batch request (subject to maxUpstreamBatchSize constraints)
|
||||
// A groupID is used to decouple Requests that have duplicate ID so they're not part of the same batch that's
|
||||
// forwarded to the backend. This is done to ensure that the order of JSON-RPC Responses match the Request order
|
||||
// as the backend MAY return Responses out of order.
|
||||
// NOTE: Duplicate request ids induces 1-sized JSON-RPC batches
|
||||
type batchGroup struct {
|
||||
groupID int
|
||||
backendGroup string
|
||||
}
|
||||
|
||||
responses := make([]*RPCRes, len(reqs))
|
||||
batches := make(map[batchGroup][]batchElem)
|
||||
ids := make(map[string]int, len(reqs))
|
||||
|
||||
for i := range reqs {
|
||||
parsedReq, err := ParseRPCReq(reqs[i])
|
||||
if err != nil {
|
||||
log.Info("error parsing RPC call", "source", "rpc", "err", err)
|
||||
responses[i] = NewRPCErrorRes(nil, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Simple health check
|
||||
if len(reqs) == 1 && parsedReq.Method == proxydHealthzMethod {
|
||||
res := &RPCRes{
|
||||
ID: parsedReq.ID,
|
||||
JSONRPC: JSONRPCVersion,
|
||||
Result: "OK",
|
||||
}
|
||||
return []*RPCRes{res}, false, "", nil
|
||||
}
|
||||
|
||||
if err := ValidateRPCReq(parsedReq); err != nil {
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
||||
responses[i] = NewRPCErrorRes(nil, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if parsedReq.Method == "eth_accounts" {
|
||||
RecordRPCForward(ctx, BackendProxyd, "eth_accounts", RPCRequestSourceHTTP)
|
||||
responses[i] = NewRPCRes(parsedReq.ID, emptyArrayResponse)
|
||||
continue
|
||||
}
|
||||
|
||||
group := s.rpcMethodMappings[parsedReq.Method]
|
||||
if group == "" {
|
||||
// use unknown below to prevent DOS vector that fills up memory
|
||||
// with arbitrary method names.
|
||||
log.Info(
|
||||
"blocked request for non-whitelisted method",
|
||||
"source", "rpc",
|
||||
"req_id", GetReqID(ctx),
|
||||
"method", parsedReq.Method,
|
||||
)
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted)
|
||||
responses[i] = NewRPCErrorRes(parsedReq.ID, ErrMethodNotWhitelisted)
|
||||
continue
|
||||
}
|
||||
|
||||
// Take rate limit for specific methods.
|
||||
// NOTE: eventually, this should apply to all batch requests. However,
|
||||
// since we don't have data right now on the size of each batch, we
|
||||
// only apply this to the methods that have an additional rate limit.
|
||||
if _, ok := s.overrideLims[parsedReq.Method]; ok && isLimited(parsedReq.Method) {
|
||||
log.Info(
|
||||
"rate limited specific RPC",
|
||||
"source", "rpc",
|
||||
"req_id", GetReqID(ctx),
|
||||
"method", parsedReq.Method,
|
||||
)
|
||||
RecordRPCError(ctx, BackendProxyd, parsedReq.Method, ErrOverRateLimit)
|
||||
responses[i] = NewRPCErrorRes(parsedReq.ID, ErrOverRateLimit)
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply a sender-based rate limit if it is enabled. Note that sender-based rate
|
||||
// limits apply regardless of origin or user-agent. As such, they don't use the
|
||||
// isLimited method.
|
||||
if parsedReq.Method == "eth_sendRawTransaction" && s.senderLim != nil {
|
||||
if err := s.rateLimitSender(ctx, parsedReq); err != nil {
|
||||
RecordRPCError(ctx, BackendProxyd, parsedReq.Method, err)
|
||||
responses[i] = NewRPCErrorRes(parsedReq.ID, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
id := string(parsedReq.ID)
|
||||
// If this is a duplicate Request ID, move the Request to a new batchGroup
|
||||
ids[id]++
|
||||
batchGroupID := ids[id]
|
||||
batchGroup := batchGroup{groupID: batchGroupID, backendGroup: group}
|
||||
batches[batchGroup] = append(batches[batchGroup], batchElem{parsedReq, i})
|
||||
}
|
||||
|
||||
servedBy := make(map[string]bool, 0)
|
||||
var cached bool
|
||||
for group, batch := range batches {
|
||||
var cacheMisses []batchElem
|
||||
|
||||
for _, req := range batch {
|
||||
backendRes, _ := s.cache.GetRPC(ctx, req.Req)
|
||||
if backendRes != nil {
|
||||
responses[req.Index] = backendRes
|
||||
cached = true
|
||||
} else {
|
||||
cacheMisses = append(cacheMisses, req)
|
||||
}
|
||||
}
|
||||
|
||||
// Create minibatches - each minibatch must be no larger than the maxUpstreamBatchSize
|
||||
numBatches := int(math.Ceil(float64(len(cacheMisses)) / float64(s.maxUpstreamBatchSize)))
|
||||
for i := 0; i < numBatches; i++ {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
log.Info("short-circuiting batch RPC",
|
||||
"req_id", GetReqID(ctx),
|
||||
"auth", GetAuthCtx(ctx),
|
||||
"batch_index", i,
|
||||
)
|
||||
batchRPCShortCircuitsTotal.Inc()
|
||||
return nil, false, "", context.DeadlineExceeded
|
||||
}
|
||||
|
||||
start := i * s.maxUpstreamBatchSize
|
||||
end := int(math.Min(float64(start+s.maxUpstreamBatchSize), float64(len(cacheMisses))))
|
||||
elems := cacheMisses[start:end]
|
||||
res, sb, err := s.BackendGroups[group.backendGroup].Forward(ctx, createBatchRequest(elems), isBatch)
|
||||
servedBy[sb] = true
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) ||
|
||||
errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) {
|
||||
return nil, false, "", err
|
||||
}
|
||||
log.Error(
|
||||
"error forwarding RPC batch",
|
||||
"batch_size", len(elems),
|
||||
"backend_group", group,
|
||||
"req_id", GetReqID(ctx),
|
||||
"err", err,
|
||||
)
|
||||
res = nil
|
||||
for _, elem := range elems {
|
||||
res = append(res, NewRPCErrorRes(elem.Req.ID, err))
|
||||
}
|
||||
}
|
||||
|
||||
for i := range elems {
|
||||
responses[elems[i].Index] = res[i]
|
||||
|
||||
// TODO(inphi): batch put these
|
||||
if res[i].Error == nil && res[i].Result != nil {
|
||||
if err := s.cache.PutRPC(ctx, elems[i].Req, res[i]); err != nil {
|
||||
log.Warn(
|
||||
"cache put error",
|
||||
"req_id", GetReqID(ctx),
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
servedByString := ""
|
||||
for sb := range servedBy {
|
||||
if servedByString != "" {
|
||||
servedByString += ", "
|
||||
}
|
||||
servedByString += sb
|
||||
}
|
||||
|
||||
return responses, cached, servedByString, nil
|
||||
}
|
||||
|
||||
func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := s.populateContext(w, r)
|
||||
if ctx == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("received WS connection", "req_id", GetReqID(ctx))
|
||||
|
||||
clientConn, err := s.upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Error("error upgrading client conn", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
||||
return
|
||||
}
|
||||
clientConn.SetReadLimit(s.maxBodySize)
|
||||
|
||||
proxier, err := s.wsBackendGroup.ProxyWS(ctx, clientConn, s.wsMethodWhitelist)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoBackends) {
|
||||
RecordUnserviceableRequest(ctx, RPCRequestSourceWS)
|
||||
}
|
||||
log.Error("error dialing ws backend", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
||||
clientConn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Inc()
|
||||
go func() {
|
||||
// Below call blocks so run it in a goroutine.
|
||||
if err := proxier.Proxy(ctx); err != nil {
|
||||
log.Error("error proxying websocket", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err)
|
||||
}
|
||||
activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Dec()
|
||||
}()
|
||||
|
||||
log.Info("accepted WS connection", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx))
|
||||
}
|
||||
|
||||
func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context.Context {
|
||||
vars := mux.Vars(r)
|
||||
authorization := vars["authorization"]
|
||||
xff := r.Header.Get(s.rateLimitHeader)
|
||||
if xff == "" {
|
||||
ipPort := strings.Split(r.RemoteAddr, ":")
|
||||
if len(ipPort) == 2 {
|
||||
xff = ipPort[0]
|
||||
}
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), ContextKeyXForwardedFor, xff) // nolint:staticcheck
|
||||
|
||||
if len(s.authenticatedPaths) > 0 {
|
||||
if authorization == "" || s.authenticatedPaths[authorization] == "" {
|
||||
log.Info("blocked unauthorized request", "authorization", authorization)
|
||||
httpResponseCodesTotal.WithLabelValues("401").Inc()
|
||||
w.WriteHeader(401)
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, ContextKeyAuth, s.authenticatedPaths[authorization]) // nolint:staticcheck
|
||||
}
|
||||
|
||||
return context.WithValue(
|
||||
ctx,
|
||||
ContextKeyReqID, // nolint:staticcheck
|
||||
randStr(10),
|
||||
)
|
||||
}
|
||||
|
||||
func randStr(l int) string {
|
||||
b := make([]byte, l)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hex.EncodeToString(b)
|
||||
}
|
||||
|
||||
func (s *Server) isUnlimitedOrigin(origin string) bool {
|
||||
for _, pat := range s.limExemptOrigins {
|
||||
if pat.MatchString(origin) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Server) isUnlimitedUserAgent(origin string) bool {
|
||||
for _, pat := range s.limExemptUserAgents {
|
||||
if pat.MatchString(origin) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Server) isGlobalLimit(method string) bool {
|
||||
return s.globallyLimitedMethods[method]
|
||||
}
|
||||
|
||||
func (s *Server) rateLimitSender(ctx context.Context, req *RPCReq) error {
|
||||
var params []string
|
||||
if err := json.Unmarshal(req.Params, ¶ms); err != nil {
|
||||
log.Debug("error unmarshalling raw transaction params", "err", err, "req_Id", GetReqID(ctx))
|
||||
return ErrParseErr
|
||||
}
|
||||
|
||||
if len(params) != 1 {
|
||||
log.Debug("raw transaction request has invalid number of params", "req_id", GetReqID(ctx))
|
||||
// The error below is identical to the one Geth responds with.
|
||||
return ErrInvalidParams("missing value for required argument 0")
|
||||
}
|
||||
|
||||
var data hexutil.Bytes
|
||||
if err := data.UnmarshalText([]byte(params[0])); err != nil {
|
||||
log.Debug("error decoding raw tx data", "err", err, "req_id", GetReqID(ctx))
|
||||
// Geth returns the raw error from UnmarshalText.
|
||||
return ErrInvalidParams(err.Error())
|
||||
}
|
||||
|
||||
// Inflates a types.Transaction object from the transaction's raw bytes.
|
||||
tx := new(types.Transaction)
|
||||
if err := tx.UnmarshalBinary(data); err != nil {
|
||||
log.Debug("could not unmarshal transaction", "err", err, "req_id", GetReqID(ctx))
|
||||
return ErrInvalidParams(err.Error())
|
||||
}
|
||||
|
||||
// Check if the transaction is for the expected chain,
|
||||
// otherwise reject before rate limiting to avoid replay attacks.
|
||||
if !s.isAllowedChainId(tx.ChainId()) {
|
||||
log.Debug("chain id is not allowed", "req_id", GetReqID(ctx))
|
||||
return txpool.ErrInvalidSender
|
||||
}
|
||||
|
||||
// Convert the transaction into a Message object so that we can get the
|
||||
// sender. This method performs an ecrecover, which can be expensive.
|
||||
msg, err := core.TransactionToMessage(tx, types.LatestSignerForChainID(tx.ChainId()), nil)
|
||||
if err != nil {
|
||||
log.Debug("could not get message from transaction", "err", err, "req_id", GetReqID(ctx))
|
||||
return ErrInvalidParams(err.Error())
|
||||
}
|
||||
ok, err := s.senderLim.Take(ctx, fmt.Sprintf("%s:%d", msg.From.Hex(), tx.Nonce()))
|
||||
if err != nil {
|
||||
log.Error("error taking from sender limiter", "err", err, "req_id", GetReqID(ctx))
|
||||
return ErrInternal
|
||||
}
|
||||
if !ok {
|
||||
log.Debug("sender rate limit exceeded", "sender", msg.From.Hex(), "req_id", GetReqID(ctx))
|
||||
return ErrOverSenderRateLimit
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) isAllowedChainId(chainId *big.Int) bool {
|
||||
if s.allowedChainIds == nil || len(s.allowedChainIds) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, id := range s.allowedChainIds {
|
||||
if chainId.Cmp(id) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func setCacheHeader(w http.ResponseWriter, cached bool) {
|
||||
if cached {
|
||||
w.Header().Set(cacheStatusHdr, "HIT")
|
||||
} else {
|
||||
w.Header().Set(cacheStatusHdr, "MISS")
|
||||
}
|
||||
}
|
||||
|
||||
func writeRPCError(ctx context.Context, w http.ResponseWriter, id json.RawMessage, err error) {
|
||||
var res *RPCRes
|
||||
if r, ok := err.(*RPCErr); ok {
|
||||
res = NewRPCErrorRes(id, r)
|
||||
} else {
|
||||
res = NewRPCErrorRes(id, ErrInternal)
|
||||
}
|
||||
writeRPCRes(ctx, w, res)
|
||||
}
|
||||
|
||||
func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
|
||||
statusCode := 200
|
||||
if res.IsError() && res.Error.HTTPErrorCode != 0 {
|
||||
statusCode = res.Error.HTTPErrorCode
|
||||
}
|
||||
|
||||
w.Header().Set("content-type", "application/json")
|
||||
w.WriteHeader(statusCode)
|
||||
ww := &recordLenWriter{Writer: w}
|
||||
enc := json.NewEncoder(ww)
|
||||
if err := enc.Encode(res); err != nil {
|
||||
log.Error("error writing rpc response", "err", err)
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
||||
return
|
||||
}
|
||||
httpResponseCodesTotal.WithLabelValues(strconv.Itoa(statusCode)).Inc()
|
||||
RecordResponsePayloadSize(ctx, ww.Len)
|
||||
}
|
||||
|
||||
func writeBatchRPCRes(ctx context.Context, w http.ResponseWriter, res []*RPCRes) {
|
||||
w.Header().Set("content-type", "application/json")
|
||||
w.WriteHeader(200)
|
||||
ww := &recordLenWriter{Writer: w}
|
||||
enc := json.NewEncoder(ww)
|
||||
if err := enc.Encode(res); err != nil {
|
||||
log.Error("error writing batch rpc response", "err", err)
|
||||
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
|
||||
return
|
||||
}
|
||||
RecordResponsePayloadSize(ctx, ww.Len)
|
||||
}
|
||||
|
||||
func instrumentedHdlr(h http.Handler) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
respTimer := prometheus.NewTimer(httpRequestDurationSumm)
|
||||
h.ServeHTTP(w, r)
|
||||
respTimer.ObserveDuration()
|
||||
}
|
||||
}
|
||||
|
||||
func GetAuthCtx(ctx context.Context) string {
|
||||
authUser, ok := ctx.Value(ContextKeyAuth).(string)
|
||||
if !ok {
|
||||
return "none"
|
||||
}
|
||||
|
||||
return authUser
|
||||
}
|
||||
|
||||
func GetReqID(ctx context.Context) string {
|
||||
reqId, ok := ctx.Value(ContextKeyReqID).(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return reqId
|
||||
}
|
||||
|
||||
func GetXForwardedFor(ctx context.Context) string {
|
||||
xff, ok := ctx.Value(ContextKeyXForwardedFor).(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return xff
|
||||
}
|
||||
|
||||
type recordLenWriter struct {
|
||||
io.Writer
|
||||
Len int
|
||||
}
|
||||
|
||||
func (w *recordLenWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = w.Writer.Write(p)
|
||||
w.Len += n
|
||||
return
|
||||
}
|
||||
|
||||
type NoopRPCCache struct{}
|
||||
|
||||
func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func truncate(str string, maxLen int) string {
|
||||
if maxLen == 0 {
|
||||
maxLen = maxRequestBodyLogLen
|
||||
}
|
||||
|
||||
if len(str) > maxLen {
|
||||
return str[:maxLen] + "..."
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
type batchElem struct {
|
||||
Req *RPCReq
|
||||
Index int
|
||||
}
|
||||
|
||||
func createBatchRequest(elems []batchElem) []*RPCReq {
|
||||
batch := make([]*RPCReq, len(elems))
|
||||
for i := range elems {
|
||||
batch[i] = elems[i].Req
|
||||
}
|
||||
return batch
|
||||
}
|
56
proxyd/string_set.go
Normal file
56
proxyd/string_set.go
Normal file
@ -0,0 +1,56 @@
|
||||
package proxyd
|
||||
|
||||
import "sync"
|
||||
|
||||
type StringSet struct {
|
||||
underlying map[string]bool
|
||||
mtx sync.RWMutex
|
||||
}
|
||||
|
||||
func NewStringSet() *StringSet {
|
||||
return &StringSet{
|
||||
underlying: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func NewStringSetFromStrings(in []string) *StringSet {
|
||||
underlying := make(map[string]bool)
|
||||
for _, str := range in {
|
||||
underlying[str] = true
|
||||
}
|
||||
return &StringSet{
|
||||
underlying: underlying,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StringSet) Has(test string) bool {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
return s.underlying[test]
|
||||
}
|
||||
|
||||
func (s *StringSet) Add(str string) {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
s.underlying[str] = true
|
||||
}
|
||||
|
||||
func (s *StringSet) Entries() []string {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
out := make([]string, len(s.underlying))
|
||||
var i int
|
||||
for entry := range s.underlying {
|
||||
out[i] = entry
|
||||
i++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *StringSet) Extend(in []string) *StringSet {
|
||||
out := NewStringSetFromStrings(in)
|
||||
for k := range s.underlying {
|
||||
out.Add(k)
|
||||
}
|
||||
return out
|
||||
}
|
33
proxyd/tls.go
Normal file
33
proxyd/tls.go
Normal file
@ -0,0 +1,33 @@
|
||||
package proxyd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
func CreateTLSClient(ca string) (*tls.Config, error) {
|
||||
pem, err := os.ReadFile(ca)
|
||||
if err != nil {
|
||||
return nil, wrapErr(err, "error reading CA")
|
||||
}
|
||||
|
||||
roots := x509.NewCertPool()
|
||||
ok := roots.AppendCertsFromPEM(pem)
|
||||
if !ok {
|
||||
return nil, errors.New("error parsing TLS client cert")
|
||||
}
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: roots,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ParseKeyPair(crt, key string) (tls.Certificate, error) {
|
||||
cert, err := tls.LoadX509KeyPair(crt, key)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, wrapErr(err, "error loading x509 key pair")
|
||||
}
|
||||
return cert, nil
|
||||
}
|
135
proxyd/tools/mockserver/handler/handler.go
Normal file
135
proxyd/tools/mockserver/handler/handler.go
Normal file
@ -0,0 +1,135 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type MethodTemplate struct {
|
||||
Method string `yaml:"method"`
|
||||
Block string `yaml:"block"`
|
||||
Response string `yaml:"response"`
|
||||
}
|
||||
|
||||
type MockedHandler struct {
|
||||
Overrides []*MethodTemplate
|
||||
Autoload bool
|
||||
AutoloadFile string
|
||||
}
|
||||
|
||||
func (mh *MockedHandler) Serve(port int) error {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", mh.Handler)
|
||||
http.Handle("/", r)
|
||||
fmt.Printf("starting server up on :%d serving MockedResponsesFile %s\n", port, mh.AutoloadFile)
|
||||
err := http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
|
||||
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
fmt.Printf("server closed\n")
|
||||
} else if err != nil {
|
||||
fmt.Printf("error starting server: %s\n", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mh *MockedHandler) Handler(w http.ResponseWriter, req *http.Request) {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading request: %v\n", err)
|
||||
}
|
||||
|
||||
var template []*MethodTemplate
|
||||
if mh.Autoload {
|
||||
template = append(template, mh.LoadFromFile(mh.AutoloadFile)...)
|
||||
}
|
||||
if mh.Overrides != nil {
|
||||
template = append(template, mh.Overrides...)
|
||||
}
|
||||
|
||||
batched := proxyd.IsBatch(body)
|
||||
var requests []map[string]interface{}
|
||||
if batched {
|
||||
err = json.Unmarshal(body, &requests)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading request: %v\n", err)
|
||||
}
|
||||
} else {
|
||||
var j map[string]interface{}
|
||||
err = json.Unmarshal(body, &j)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading request: %v\n", err)
|
||||
}
|
||||
requests = append(requests, j)
|
||||
}
|
||||
|
||||
var responses []string
|
||||
for _, r := range requests {
|
||||
method := r["method"]
|
||||
block := ""
|
||||
if method == "eth_getBlockByNumber" || method == "debug_getRawReceipts" {
|
||||
block = (r["params"].([]interface{})[0]).(string)
|
||||
}
|
||||
|
||||
var selectedResponse string
|
||||
for _, r := range template {
|
||||
if r.Method == method && r.Block == block {
|
||||
selectedResponse = r.Response
|
||||
}
|
||||
}
|
||||
if selectedResponse != "" {
|
||||
var rpcRes proxyd.RPCRes
|
||||
err = json.Unmarshal([]byte(selectedResponse), &rpcRes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
idJson, _ := json.Marshal(r["id"])
|
||||
rpcRes.ID = idJson
|
||||
res, _ := json.Marshal(rpcRes)
|
||||
responses = append(responses, string(res))
|
||||
}
|
||||
}
|
||||
|
||||
resBody := ""
|
||||
if batched {
|
||||
resBody = "[" + strings.Join(responses, ",") + "]"
|
||||
} else if len(responses) > 0 {
|
||||
resBody = responses[0]
|
||||
}
|
||||
|
||||
_, err = fmt.Fprint(w, resBody)
|
||||
if err != nil {
|
||||
fmt.Printf("error writing response: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (mh *MockedHandler) LoadFromFile(file string) []*MethodTemplate {
|
||||
contents, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading MockedResponsesFile: %v\n", err)
|
||||
}
|
||||
var template []*MethodTemplate
|
||||
err = yaml.Unmarshal(contents, &template)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading MockedResponsesFile: %v\n", err)
|
||||
}
|
||||
return template
|
||||
}
|
||||
|
||||
func (mh *MockedHandler) AddOverride(template *MethodTemplate) {
|
||||
mh.Overrides = append(mh.Overrides, template)
|
||||
}
|
||||
|
||||
func (mh *MockedHandler) ResetOverrides() {
|
||||
mh.Overrides = make([]*MethodTemplate, 0)
|
||||
}
|
30
proxyd/tools/mockserver/main.go
Normal file
30
proxyd/tools/mockserver/main.go
Normal file
@ -0,0 +1,30 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum-optimism/optimism/proxyd/tools/mockserver/handler"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 3 {
|
||||
fmt.Printf("simply mock a response based on an external text MockedResponsesFile\n")
|
||||
fmt.Printf("usage: mockserver <port> <MockedResponsesFile.yml>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
port, _ := strconv.ParseInt(os.Args[1], 10, 32)
|
||||
dir, _ := os.Getwd()
|
||||
|
||||
h := handler.MockedHandler{
|
||||
Autoload: true,
|
||||
AutoloadFile: path.Join(dir, os.Args[2]),
|
||||
}
|
||||
|
||||
err := h.Serve(int(port))
|
||||
if err != nil {
|
||||
fmt.Printf("error starting mockserver: %v\n", err)
|
||||
}
|
||||
}
|
52
proxyd/tools/mockserver/node1.yml
Normal file
52
proxyd/tools/mockserver/node1.yml
Normal file
File diff suppressed because one or more lines are too long
44
proxyd/tools/mockserver/node2.yml
Normal file
44
proxyd/tools/mockserver/node2.yml
Normal file
@ -0,0 +1,44 @@
|
||||
- method: eth_getBlockByNumber
|
||||
block: latest
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash2",
|
||||
"number": "0x2"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x1
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash1",
|
||||
"number": "0x1"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x2
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash2",
|
||||
"number": "0x2"
|
||||
}
|
||||
}
|
||||
- method: eth_getBlockByNumber
|
||||
block: 0x3
|
||||
response: >
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"hash": "hash3",
|
||||
"number": "0x3"
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user