diff --git a/.circleci/config.yml b/.circleci/config.yml index 7ac83a4..6853d9d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -17,6 +17,7 @@ workflows: mapping: | op-conductor-mon/.* run-build-op-conductor-mon true op-ufm/.* run-build-op-ufm true + proxyd/.* run-build-proxyd true .circleci/.* run-all true .github/.* run-all true diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index adcc7af..74a524e 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -13,6 +13,9 @@ parameters: run-build-op-ufm: type: boolean default: false + run-build-proxyd: + type: boolean + default: false run-all: type: boolean default: false @@ -148,7 +151,7 @@ jobs: - run: sudo sed -i '13 i \ \ \ \ \ \ \ \ \ \ \ \ nameservers:' /etc/netplan/50-cloud-init.yaml - run: sudo sed -i '14 i \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ addresses:' /etc/netplan/50-cloud-init.yaml - run: sudo sed -i "s/addresses:/ addresses":" [8.8.8.8, 8.8.4.4] /g" /etc/netplan/50-cloud-init.yaml - - run: cat /etc/netplan/50-cloud-init.yaml + - run: sudo cat /etc/netplan/50-cloud-init.yaml - run: sudo netplan apply - run: name: Publish @@ -380,11 +383,82 @@ workflows: docker_name: op-ufm docker_tags: <>,<> docker_context: . + op-proxyd: + when: + or: [<< pipeline.parameters.run-build-proxyd >>, << pipeline.parameters.run-all >>] + jobs: + - go-lint: + name: proxyd-lint + module: proxyd + - go-test: + name: proxyd-tests + module: proxyd + - docker-build: + name: proxyd-docker-build + docker_file: proxyd/Dockerfile + docker_name: proxyd + docker_tags: <>,<> + docker_context: . + release: + when: + not: + equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + jobs: + - hold: + type: approval + filters: + tags: + only: /^(proxyd|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ + branches: + ignore: /.*/ + - docker-build: + name: op-ufm-docker-build + filters: + tags: + only: /^op-ufm\/v.*/ + branches: + ignore: /.*/ + docker_name: op-ufm + docker_tags: <> + docker_context: . + docker_file: op-ufm/Dockerfile + context: + - oplabs-gcr-release + requires: + - hold - docker-publish: name: op-ufm-docker-publish docker_name: op-ufm - docker_tags: <>,<> + docker_tags: <> context: - oplabs-gcr requires: - op-ufm-docker-build + - docker-build: + name: proxyd-docker-build + filters: + tags: + only: /^proxyd\/v.*/ + branches: + ignore: /.*/ + docker_name: proxyd + docker_tags: <> + docker_context: . + docker_file: proxyd/Dockerfile + context: + - oplabs-gcr-release + requires: + - hold + - docker-publish: + name: proxyd-docker-release + filters: + tags: + only: /^proxyd\/v.*/ + branches: + ignore: /.*/ + docker_name: proxyd + docker_tags: <> + context: + - oplabs-gcr-release + requires: + - proxyd-docker-build diff --git a/.github/workflows/tag-service.yml b/.github/workflows/tag-service.yml new file mode 100644 index 0000000..60baa6f --- /dev/null +++ b/.github/workflows/tag-service.yml @@ -0,0 +1,55 @@ +name: Tag Service + +on: + workflow_dispatch: + inputs: + bump: + description: 'How much to bump the version by' + required: true + type: choice + options: + - major + - minor + - patch + - prerelease + - finalize-prerelease + service: + description: 'Which service to release' + required: true + type: choice + options: + - op-ufm + - proxyd + prerelease: + description: Increment major/minor/patch as prerelease? + required: false + type: boolean + default: false + +jobs: + release: + runs-on: ubuntu-latest + environment: op-stack-production + steps: + - uses: actions/checkout@v4 + - name: Fetch tags + run: git fetch --tags origin --force + - name: Setup Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: Install deps + run: pip install -r requirements.txt + working-directory: ops/tag-service + - run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" + env: + INPUT_GITHUB_TOKEN: ${{ github.token }} + BUMP: ${{ github.event.inputs.bump }} + SERVICE: ${{ github.event.inputs.service }} + if: ${{ github.event.inputs.prerelease == 'false' }} + - run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" --pre-release + env: + INPUT_GITHUB_TOKEN: ${{ github.token }} + BUMP: ${{ github.event.inputs.bump }} + SERVICE: ${{ github.event.inputs.service }} + if: ${{ github.event.inputs.prerelease == 'true' }} diff --git a/ops/tag-service/.gitignore b/ops/tag-service/.gitignore new file mode 100644 index 0000000..f5e96db --- /dev/null +++ b/ops/tag-service/.gitignore @@ -0,0 +1 @@ +venv \ No newline at end of file diff --git a/ops/tag-service/README.md b/ops/tag-service/README.md new file mode 100644 index 0000000..b1fdc89 --- /dev/null +++ b/ops/tag-service/README.md @@ -0,0 +1,21 @@ +# Tag Service +Tag Service is a Github action which builds new tags and applies them to services in the monorepo. +It accepts: +* Service name +* Bump Amount [major, minor, patch] +* Prerelease and Finalize-Prerelease (to add/remove `rc` versions) + +It can be triggered from the Github Actions panel in the monorepo + +# Tag Tool +Tag Tool is a minimal rewrite of the Tag Service to let operators prepare and commit tags from commandline +It accepts: +* Service name +* Bump Amount [major, minor, patch, prerelease, finalize-prerelease] + +Tag Tool is meant to be run locally, and *does not* perform any write operations. Instead, it prints the git commands to console for the operator to use. + +Additionally, a special service name "op-stack" is available, which will bump versions for `op-node`, `op-batcher` and `op-proposer` from the highest semver amongst them. + +To run Tag Tool locally, the only dependency is `pip install semver` + diff --git a/ops/tag-service/requirements.txt b/ops/tag-service/requirements.txt new file mode 100644 index 0000000..c01ed2c --- /dev/null +++ b/ops/tag-service/requirements.txt @@ -0,0 +1,2 @@ +click==8.1.3 +semver==3.0.0-dev4 diff --git a/ops/tag-service/tag-service.py b/ops/tag-service/tag-service.py new file mode 100755 index 0000000..c911043 --- /dev/null +++ b/ops/tag-service/tag-service.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 +import logging.config +import os +import re +import subprocess +import sys + +import click +import semver + +# Minimum version numbers for packages migrating from legacy versioning. +MIN_VERSIONS = { + 'proxyd': '4.6.1', +} + +VALID_BUMPS = ('major', 'minor', 'patch', 'prerelease', 'finalize-prerelease') + +MESSAGE_TEMPLATE = '[tag-service-release] Tag {service} at {version}' + +LOGGING_CONFIG = { + 'version': 1, + 'disable_existing_loggers': True, + 'formatters': { + 'standard': { + 'format': '%(asctime)s [%(levelname)s]: %(message)s' + }, + }, + 'handlers': { + 'default': { + 'level': 'INFO', + 'formatter': 'standard', + 'class': 'logging.StreamHandler', + 'stream': 'ext://sys.stderr' + }, + }, + 'loggers': { + '': { + 'handlers': ['default'], + 'level': 'INFO', + 'propagate': False + }, + } +} + +logging.config.dictConfig(LOGGING_CONFIG) +log = logging.getLogger(__name__) + + +@click.command() +@click.option('--bump', required=True, type=click.Choice(VALID_BUMPS)) +@click.option('--service', required=True, type=click.Choice(list(MIN_VERSIONS.keys()))) +@click.option('--pre-release/--no-pre-release', default=False) +def tag_version(bump, service, pre_release): + tags = subprocess.run(['git', 'tag', '--list'], capture_output=True, check=True) \ + .stdout.decode('utf-8').splitlines() + + # Filter out tags that don't match the service name, and tags + # for prerelease versions. + version_pattern = f'^{service}/v\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?$' + svc_versions = [t.replace(f'{service}/v', '') for t in tags if re.match(version_pattern, t)] + svc_versions = sorted(svc_versions, key=lambda v: semver.Version.parse(v), reverse=True) + + if pre_release and bump == 'prerelease': + raise Exception('Cannot use --bump=prerelease with --pre-release') + + if pre_release and bump == 'finalize-prerelease': + raise Exception('Cannot use --bump=finalize-prerelease with --pre-release') + + if len(svc_versions) == 0: + latest_version = MIN_VERSIONS[service] + else: + latest_version = svc_versions[0] + + latest_version = semver.Version.parse(latest_version) + + log.info(f'Latest version: v{latest_version}') + + if bump == 'major': + bumped = latest_version.bump_major() + elif bump == 'minor': + bumped = latest_version.bump_minor() + elif bump == 'patch': + bumped = latest_version.bump_patch() + elif bump == 'prerelease': + bumped = latest_version.bump_prerelease() + elif bump == 'finalize-prerelease': + bumped = latest_version.finalize_version() + else: + raise Exception('Invalid bump type: {}'.format(bump)) + + if pre_release: + bumped = bumped.bump_prerelease() + + new_version = 'v' + str(bumped) + new_tag = f'{service}/{new_version}' + + log.info(f'Bumped version: {new_version}') + + log.info('Configuring git') + # The below env vars are set by GHA. + gh_actor = os.environ['GITHUB_ACTOR'] + gh_token = os.environ['INPUT_GITHUB_TOKEN'] + gh_repo = os.environ['GITHUB_REPOSITORY'] + origin_url = f'https://{gh_actor}:${gh_token}@github.com/{gh_repo}.git' + subprocess.run(['git', 'config', 'user.name', gh_actor], check=True) + subprocess.run(['git', 'config', 'user.email', f'{gh_actor}@users.noreply.github.com'], check=True) + subprocess.run(['git', 'remote', 'set-url', 'origin', origin_url], check=True) + + log.info(f'Creating tag: {new_tag}') + subprocess.run([ + 'git', + 'tag', + '-a', + new_tag, + '-m', + MESSAGE_TEMPLATE.format(service=service, version=new_version) + ], check=True) + + log.info('Pushing tag to origin') + subprocess.run(['git', 'push', 'origin', new_tag], check=True) + + +if __name__ == '__main__': + tag_version() diff --git a/ops/tag-service/tag-tool.py b/ops/tag-service/tag-tool.py new file mode 100644 index 0000000..19fbf7d --- /dev/null +++ b/ops/tag-service/tag-tool.py @@ -0,0 +1,81 @@ +import argparse +import subprocess +import re +import semver + +SERVICES = [ + 'proxyd', + 'op-ufm', + 'op-conductor-mon', +] +VERSION_PATTERN = '^{service}/v\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?$' +GIT_TAG_COMMAND = 'git tag -a {tag} -m "{message}"' +GIT_PUSH_COMMAND = 'git push origin {tag}' + +def new_tag(service, version, bump): + if bump == 'major': + bumped = version.bump_major() + elif bump == 'minor': + bumped = version.bump_minor() + elif bump == 'patch': + bumped = version.bump_patch() + elif bump == 'prerelease': + bumped = version.bump_prerelease() + elif bump == 'finalize-prerelease': + bumped = version.finalize_version() + else: + raise Exception('Invalid bump type: {}'.format(bump)) + return f'{service}/v{bumped}' + +def latest_version(service): + # Get the list of tags from the git repository. + tags = subprocess.run(['git', 'tag', '--list', f'{service}/v*'], capture_output=True, check=True) \ + .stdout.decode('utf-8').splitlines() + # Filter out tags that don't match the service name, and tags for prerelease versions. + svc_versions = sorted([t.replace(f'{service}/v', '') for t in tags]) + if len(svc_versions) == 0: + raise Exception(f'No tags found for service: {service}') + return svc_versions[-1] + +def latest_among_services(services): + latest = '0.0.0' + for service in services: + candidate = latest_version(service) + if semver.compare(candidate, latest) > 0: + latest = candidate + return latest + +def main(): + parser = argparse.ArgumentParser(description='Create a new git tag for a service') + parser.add_argument('--service', type=str, help='The name of the Service') + parser.add_argument('--bump', type=str, help='The type of bump to apply to the version number') + parser.add_argument('--message', type=str, help='Message to include in git tag', default='[tag-tool-release]') + args = parser.parse_args() + + service = args.service + + if service == 'op-stack': + latest = latest_among_services(['op-node', 'op-batcher', 'op-proposer']) + else: + latest = latest_version(service) + + bumped = new_tag(service, semver.VersionInfo.parse(latest), args.bump) + + print(f'latest tag: {latest}') + print(f'new tag: {bumped}') + print('run the following commands to create the new tag:\n') + # special case for tagging op-node, op-batcher, and op-proposer together. All three would share the same semver + if args.service == 'op-stack': + print(GIT_TAG_COMMAND.format(tag=bumped.replace('op-stack', 'op-node'), message=args.message)) + print(GIT_PUSH_COMMAND.format(tag=bumped.replace('op-stack', 'op-node'))) + print(GIT_TAG_COMMAND.format(tag=bumped.replace('op-stack', 'op-batcher'), message=args.message)) + print(GIT_PUSH_COMMAND.format(tag=bumped.replace('op-stack', 'op-batcher'))) + print(GIT_TAG_COMMAND.format(tag=bumped.replace('op-stack', 'op-proposer'), message=args.message)) + print(GIT_PUSH_COMMAND.format(tag=bumped.replace('op-stack', 'op-proposer'))) + else: + print(GIT_TAG_COMMAND.format(tag=bumped, message=args.message)) + print(GIT_PUSH_COMMAND.format(tag=bumped)) + +if __name__ == "__main__": + main() + diff --git a/proxyd/.gitignore b/proxyd/.gitignore new file mode 100644 index 0000000..65e6a82 --- /dev/null +++ b/proxyd/.gitignore @@ -0,0 +1,3 @@ +bin + +config.toml diff --git a/proxyd/CHANGELOG.md b/proxyd/CHANGELOG.md new file mode 100644 index 0000000..dd78bfe --- /dev/null +++ b/proxyd/CHANGELOG.md @@ -0,0 +1,252 @@ +# @eth-optimism/proxyd + +## 3.14.1 + +### Patch Changes + +- 5602deec7: chore(deps): bump github.com/prometheus/client_golang from 1.11.0 to 1.11.1 in /proxyd +- 6b3cf2070: Remove useless logging + +## 3.14.0 + +### Minor Changes + +- 9cc39bcfa: Add support for global method override rate limit +- 30db32862: Include nonce in sender rate limit + +### Patch Changes + +- b9bb1a98a: proxyd: Add req_id to log + +## 3.13.0 + +### Minor Changes + +- 6de891d3b: Add sender-based rate limiter + +## 3.12.0 + +### Minor Changes + +- e9f2c701: Allow disabling backend rate limiter +- ca45a85e: Support pattern matching in exempt origins/user agents +- f4faa44c: adds server.log_level config + +## 3.11.0 + +### Minor Changes + +- b3c5eeec: Fixed JSON-RPC 2.0 specification compliance by adding the optional data field on an RPCError +- 01ae6625: Adds new Redis rate limiter + +## 3.10.2 + +### Patch Changes + +- 6bb35fd8: Add customizable whitelist error +- 7121648c: Batch metrics and max batch size + +## 3.10.1 + +### Patch Changes + +- b82a8f48: Add logging for origin and remote IP' +- 1bf9559c: Carry over custom limit message in batches + +## 3.10.0 + +### Minor Changes + +- 157ccc84: Support per-method rate limiting + +## 3.9.1 + +### Patch Changes + +- dc4f6a06: Add logging/metrics + +## 3.9.0 + +### Minor Changes + +- b6f4bfcf: Add frontend rate limiting + +### Patch Changes + +- 406a4fce: Unwrap single RPC batches +- 915f3b28: Parameterize full RPC request logging + +## 3.8.9 + +### Patch Changes + +- 063c55cf: Use canned response for eth_accounts + +## 3.8.8 + +### Patch Changes + +- 58dc7adc: Improve robustness against unexpected JSON-RPC from upstream +- 552cd641: Fix concurrent write panic in WS + +## 3.8.7 + +### Patch Changes + +- 6f458607: Bump go-ethereum to 1.10.17 + +## 3.8.6 + +### Patch Changes + +- d79d40c4: proxyd: Proxy requests using batch JSON-RPC + +## 3.8.5 + +### Patch Changes + +- 2a062b11: proxyd: Log ssanitized RPC requests +- d9f058ce: proxyd: Reduced RPC request logging +- a4bfd9e7: proxyd: Limit the number of concurrent RPCs to backends + +## 3.8.4 + +### Patch Changes + +- 08329ba2: proxyd: Record redis cache operation latency +- ae112021: proxyd: Request-scoped context for fast batch RPC short-circuiting + +## 3.8.3 + +### Patch Changes + +- 160f4c3d: Update docker image to use golang 1.18.0 + +## 3.8.2 + +### Patch Changes + +- ae18cea1: Don't hit Redis when the out of service interval is zero + +## 3.8.1 + +### Patch Changes + +- acf7dbd5: Update to go-ethereum v1.10.16 + +## 3.8.0 + +### Minor Changes + +- 527448bb: Handle nil responses better + +## 3.7.0 + +### Minor Changes + +- 3c2926b1: Add debug cache status header to proxyd responses + +## 3.6.0 + +### Minor Changes + +- 096c5f20: proxyd: Allow cached RPCs to be evicted by redis +- 71d64834: Add caching for block-dependent RPCs +- fd2e1523: proxyd: Cache block-dependent RPCs +- 1760613c: Add integration tests and batching + +## 3.5.0 + +### Minor Changes + +- 025a3c0d: Add request/response payload size metrics to proxyd +- daf8db0b: cache immutable RPC responses in proxyd +- 8aa89bf3: Add X-Forwarded-For header when proxying RPCs on proxyd + +## 3.4.1 + +### Patch Changes + +- 415164e1: Force proxyd build + +## 3.4.0 + +### Minor Changes + +- 4b56ed84: Various proxyd fixes + +## 3.3.0 + +### Minor Changes + +- 7b7ffd2e: Allows string RPC ids on proxyd + +## 3.2.0 + +### Minor Changes + +- 73484138: Adds ability to specify env vars in config + +## 3.1.2 + +### Patch Changes + +- 1b79aa62: Release proxyd + +## 3.1.1 + +### Patch Changes + +- b8802054: Trigger release of proxyd +- 34fcb277: Bump proxyd to test release build workflow + +## 3.1.0 + +### Minor Changes + +- da6138fd: Updated metrics, support local rate limiter + +### Patch Changes + +- 6c7f483b: Add support for additional SSL certificates in Docker container + +## 3.0.0 + +### Major Changes + +- abe231bf: Make endpoints match Geth, better logging + +## 2.0.0 + +### Major Changes + +- 6c50098b: Update metrics, support WS +- f827dbda: Brings back the ability to selectively route RPC methods to backend groups + +### Minor Changes + +- 8cc824e5: Updates proxyd to include additional error metrics. +- 9ba4c5e0: Update metrics, support authenticated endpoints +- 78d0f3f0: Put special errors in a dedicated metric, pass along the content-type header + +### Patch Changes + +- 6e6a55b1: Canary release + +## 1.0.2 + +### Patch Changes + +- b9d2fbee: Trigger releases + +## 1.0.1 + +### Patch Changes + +- 893623c9: Trigger patch releases for dockerhub + +## 1.0.0 + +### Major Changes + +- 28aabc41: Initial release of RPC proxy daemon diff --git a/proxyd/Dockerfile b/proxyd/Dockerfile new file mode 100644 index 0000000..b066e0e --- /dev/null +++ b/proxyd/Dockerfile @@ -0,0 +1,32 @@ +FROM golang:1.21.3-alpine3.18 as builder + +ARG GITCOMMIT=docker +ARG GITDATE=docker +ARG GITVERSION=docker + +RUN apk add make jq git gcc musl-dev linux-headers + +COPY ./proxyd /app + +WORKDIR /app + +RUN make proxyd + +FROM alpine:3.18 + +RUN apk add bind-tools jq curl bash git redis + +COPY ./proxyd/entrypoint.sh /bin/entrypoint.sh + +RUN apk update && \ + apk add ca-certificates && \ + chmod +x /bin/entrypoint.sh + +EXPOSE 8080 + +VOLUME /etc/proxyd + +COPY --from=builder /app/bin/proxyd /bin/proxyd + +ENTRYPOINT ["/bin/entrypoint.sh"] +CMD ["/bin/proxyd", "/etc/proxyd/proxyd.toml"] diff --git a/proxyd/Dockerfile.ignore b/proxyd/Dockerfile.ignore new file mode 100644 index 0000000..eac1d0b --- /dev/null +++ b/proxyd/Dockerfile.ignore @@ -0,0 +1,3 @@ +# ignore everything but proxyd, proxyd defines all its dependencies in the go.mod +* +!/proxyd diff --git a/proxyd/Makefile b/proxyd/Makefile new file mode 100644 index 0000000..d9ffb57 --- /dev/null +++ b/proxyd/Makefile @@ -0,0 +1,25 @@ +LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) +LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) +LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION) +LDFLAGS := -ldflags "$(LDFLAGSSTRING)" + +proxyd: + go build -v $(LDFLAGS) -o ./bin/proxyd ./cmd/proxyd +.PHONY: proxyd + +fmt: + go mod tidy + gofmt -w . +.PHONY: fmt + +test: + go test -v ./... +.PHONY: test + +lint: + go vet ./... +.PHONY: test + +test-fallback: + go test -v ./... -test.run ^TestFallback$ +.PHONY: test-fallback diff --git a/proxyd/README.md b/proxyd/README.md new file mode 100644 index 0000000..4a3a84b --- /dev/null +++ b/proxyd/README.md @@ -0,0 +1,146 @@ +# rpc-proxy + +This tool implements `proxyd`, an RPC request router and proxy. It does the following things: + +1. Whitelists RPC methods. +2. Routes RPC methods to groups of backend services. +3. Automatically retries failed backend requests. +4. Track backend consensus (`latest`, `safe`, `finalized` blocks), peer count and sync state. +5. Re-write requests and responses to enforce consensus. +6. Load balance requests across backend services. +7. Cache immutable responses from backends. +8. Provides metrics to measure request latency, error rates, and the like. + + +## Usage + +Run `make proxyd` to build the binary. No additional dependencies are necessary. + +To configure `proxyd` for use, you'll need to create a configuration file to define your proxy backends and routing rules. Check out [example.config.toml](./example.config.toml) for how to do this alongside a full list of all options with commentary. + +Once you have a config file, start the daemon via `proxyd .toml`. + + +## Consensus awareness + +Starting on v4.0.0, `proxyd` is aware of the consensus state of its backends. This helps minimize chain reorgs experienced by clients. + +To enable this behavior, you must set `consensus_aware` value to `true` in the backend group. + +When consensus awareness is enabled, `proxyd` will poll the backends for their states and resolve a consensus group based on: +* the common ancestor `latest` block, i.e. if a backend is experiencing a fork, the fork won't be visible to the clients +* the lowest `safe` block +* the lowest `finalized` block +* peer count +* sync state + +The backend group then acts as a round-robin load balancer distributing traffic equally across healthy backends in the consensus group, increasing the availability of the proxy. + +A backend is considered healthy if it meets the following criteria: +* not banned +* avg 1-min moving window error rate ≤ configurable threshold +* avg 1-min moving window latency ≤ configurable threshold +* peer count ≥ configurable threshold +* `latest` block lag ≤ configurable threshold +* last state update ≤ configurable threshold +* not currently syncing + +When a backend is experiencing inconsistent consensus, high error rates or high latency, +the backend will be banned for a configurable amount of time (default 5 minutes) +and won't receive any traffic during this period. + + +## Tag rewrite + +When consensus awareness is enabled, `proxyd` will enforce the consensus state transparently for all the clients. + +For example, if a client requests the `eth_getBlockByNumber` method with the `latest` tag, +`proxyd` will rewrite the request to use the resolved latest block from the consensus group +and forward it to the backend. + +The following request methods are rewritten: +* `eth_getLogs` +* `eth_newFilter` +* `eth_getBalance` +* `eth_getCode` +* `eth_getTransactionCount` +* `eth_call` +* `eth_getStorageAt` +* `eth_getBlockTransactionCountByNumber` +* `eth_getUncleCountByBlockNumber` +* `eth_getBlockByNumber` +* `eth_getTransactionByBlockNumberAndIndex` +* `eth_getUncleByBlockNumberAndIndex` +* `debug_getRawReceipts` + +And `eth_blockNumber` response is overridden with current block consensus. + + +## Cacheable methods + +Cache use Redis and can be enabled for the following immutable methods: + +* `eth_chainId` +* `net_version` +* `eth_getBlockTransactionCountByHash` +* `eth_getUncleCountByBlockHash` +* `eth_getBlockByHash` +* `eth_getTransactionByBlockHashAndIndex` +* `eth_getUncleByBlockHashAndIndex` +* `debug_getRawReceipts` (block hash only) + +## Meta method `consensus_getReceipts` + +To support backends with different specifications in the same backend group, +proxyd exposes a convenient method to fetch receipts abstracting away +what specific backend will serve the request. + +Each backend specifies their preferred method to fetch receipts with `consensus_receipts_target` config, +which will be translated from `consensus_getReceipts`. + +This method takes a `blockNumberOrHash` (i.e. `tag|qty|hash`) +and returns the receipts for all transactions in the block. + +Request example +```json +{ + "jsonrpc":"2.0", + "id": 1, + "params": ["0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"] +} +``` + +It currently supports translation to the following targets: +* `debug_getRawReceipts(blockOrHash)` (default) +* `alchemy_getTransactionReceipts(blockOrHash)` +* `parity_getBlockReceipts(blockOrHash)` +* `eth_getBlockReceipts(blockOrHash)` + +The selected target is returned in the response, in a wrapped result. + +Response example +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "method": "debug_getRawReceipts", + "result": { + // the actual raw result from backend + } + } +} +``` + +See [op-node receipt fetcher](https://github.com/ethereum-optimism/optimism/blob/186e46a47647a51a658e699e9ff047d39444c2de/op-node/sources/receipts.go#L186-L253). + + +## Metrics + +See `metrics.go` for a list of all available metrics. + +The metrics port is configurable via the `metrics.port` and `metrics.host` keys in the config. + +## Adding Backend SSL Certificates in Docker + +The Docker image runs on Alpine Linux. If you get SSL errors when connecting to a backend within Docker, you may need to add additional certificates to Alpine's certificate store. To do this, bind mount the certificate bundle into a file in `/usr/local/share/ca-certificates`. The `entrypoint.sh` script will then update the store with whatever is in the `ca-certificates` directory prior to starting `proxyd`. diff --git a/proxyd/backend.go b/proxyd/backend.go new file mode 100644 index 0000000..802b94a --- /dev/null +++ b/proxyd/backend.go @@ -0,0 +1,1272 @@ +package proxyd + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + sw "github.com/ethereum-optimism/optimism/proxyd/pkg/avg-sliding-window" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/gorilla/websocket" + "github.com/prometheus/client_golang/prometheus" + "github.com/xaionaro-go/weightedshuffle" + "golang.org/x/sync/semaphore" +) + +const ( + JSONRPCVersion = "2.0" + JSONRPCErrorInternal = -32000 + notFoundRpcError = -32601 +) + +var ( + ErrParseErr = &RPCErr{ + Code: -32700, + Message: "parse error", + HTTPErrorCode: 400, + } + ErrInternal = &RPCErr{ + Code: JSONRPCErrorInternal, + Message: "internal error", + HTTPErrorCode: 500, + } + ErrMethodNotWhitelisted = &RPCErr{ + Code: notFoundRpcError, + Message: "rpc method is not whitelisted", + HTTPErrorCode: 403, + } + ErrBackendOffline = &RPCErr{ + Code: JSONRPCErrorInternal - 10, + Message: "backend offline", + HTTPErrorCode: 503, + } + ErrNoBackends = &RPCErr{ + Code: JSONRPCErrorInternal - 11, + Message: "no backends available for method", + HTTPErrorCode: 503, + } + ErrBackendOverCapacity = &RPCErr{ + Code: JSONRPCErrorInternal - 12, + Message: "backend is over capacity", + HTTPErrorCode: 429, + } + ErrBackendBadResponse = &RPCErr{ + Code: JSONRPCErrorInternal - 13, + Message: "backend returned an invalid response", + HTTPErrorCode: 500, + } + ErrTooManyBatchRequests = &RPCErr{ + Code: JSONRPCErrorInternal - 14, + Message: "too many RPC calls in batch request", + } + ErrGatewayTimeout = &RPCErr{ + Code: JSONRPCErrorInternal - 15, + Message: "gateway timeout", + HTTPErrorCode: 504, + } + ErrOverRateLimit = &RPCErr{ + Code: JSONRPCErrorInternal - 16, + Message: "over rate limit", + HTTPErrorCode: 429, + } + ErrOverSenderRateLimit = &RPCErr{ + Code: JSONRPCErrorInternal - 17, + Message: "sender is over rate limit", + HTTPErrorCode: 429, + } + ErrNotHealthy = &RPCErr{ + Code: JSONRPCErrorInternal - 18, + Message: "backend is currently not healthy to serve traffic", + HTTPErrorCode: 503, + } + ErrBlockOutOfRange = &RPCErr{ + Code: JSONRPCErrorInternal - 19, + Message: "block is out of range", + HTTPErrorCode: 400, + } + + ErrRequestBodyTooLarge = &RPCErr{ + Code: JSONRPCErrorInternal - 21, + Message: "request body too large", + HTTPErrorCode: 413, + } + + ErrBackendResponseTooLarge = &RPCErr{ + Code: JSONRPCErrorInternal - 20, + Message: "backend response too large", + HTTPErrorCode: 500, + } + + ErrBackendUnexpectedJSONRPC = errors.New("backend returned an unexpected JSON-RPC response") + + ErrConsensusGetReceiptsCantBeBatched = errors.New("consensus_getReceipts cannot be batched") + ErrConsensusGetReceiptsInvalidTarget = errors.New("unsupported consensus_receipts_target") +) + +func ErrInvalidRequest(msg string) *RPCErr { + return &RPCErr{ + Code: -32600, + Message: msg, + HTTPErrorCode: 400, + } +} + +func ErrInvalidParams(msg string) *RPCErr { + return &RPCErr{ + Code: -32602, + Message: msg, + HTTPErrorCode: 400, + } +} + +type Backend struct { + Name string + rpcURL string + receiptsTarget string + wsURL string + authUsername string + authPassword string + headers map[string]string + client *LimitedHTTPClient + dialer *websocket.Dialer + maxRetries int + maxResponseSize int64 + maxRPS int + maxWSConns int + outOfServiceInterval time.Duration + stripTrailingXFF bool + proxydIP string + + skipPeerCountCheck bool + forcedCandidate bool + + maxDegradedLatencyThreshold time.Duration + maxLatencyThreshold time.Duration + maxErrorRateThreshold float64 + + latencySlidingWindow *sw.AvgSlidingWindow + networkRequestsSlidingWindow *sw.AvgSlidingWindow + networkErrorsSlidingWindow *sw.AvgSlidingWindow + + weight int +} + +type BackendOpt func(b *Backend) + +func WithBasicAuth(username, password string) BackendOpt { + return func(b *Backend) { + b.authUsername = username + b.authPassword = password + } +} + +func WithHeaders(headers map[string]string) BackendOpt { + return func(b *Backend) { + b.headers = headers + } +} + +func WithTimeout(timeout time.Duration) BackendOpt { + return func(b *Backend) { + b.client.Timeout = timeout + } +} + +func WithMaxRetries(retries int) BackendOpt { + return func(b *Backend) { + b.maxRetries = retries + } +} + +func WithMaxResponseSize(size int64) BackendOpt { + return func(b *Backend) { + b.maxResponseSize = size + } +} + +func WithOutOfServiceDuration(interval time.Duration) BackendOpt { + return func(b *Backend) { + b.outOfServiceInterval = interval + } +} + +func WithMaxRPS(maxRPS int) BackendOpt { + return func(b *Backend) { + b.maxRPS = maxRPS + } +} + +func WithMaxWSConns(maxConns int) BackendOpt { + return func(b *Backend) { + b.maxWSConns = maxConns + } +} + +func WithTLSConfig(tlsConfig *tls.Config) BackendOpt { + return func(b *Backend) { + if b.client.Transport == nil { + b.client.Transport = &http.Transport{} + } + b.client.Transport.(*http.Transport).TLSClientConfig = tlsConfig + } +} + +func WithStrippedTrailingXFF() BackendOpt { + return func(b *Backend) { + b.stripTrailingXFF = true + } +} + +func WithProxydIP(ip string) BackendOpt { + return func(b *Backend) { + b.proxydIP = ip + } +} + +func WithConsensusSkipPeerCountCheck(skipPeerCountCheck bool) BackendOpt { + return func(b *Backend) { + b.skipPeerCountCheck = skipPeerCountCheck + } +} + +func WithConsensusForcedCandidate(forcedCandidate bool) BackendOpt { + return func(b *Backend) { + b.forcedCandidate = forcedCandidate + } +} + +func WithWeight(weight int) BackendOpt { + return func(b *Backend) { + b.weight = weight + } +} + +func WithMaxDegradedLatencyThreshold(maxDegradedLatencyThreshold time.Duration) BackendOpt { + return func(b *Backend) { + b.maxDegradedLatencyThreshold = maxDegradedLatencyThreshold + } +} + +func WithMaxLatencyThreshold(maxLatencyThreshold time.Duration) BackendOpt { + return func(b *Backend) { + b.maxLatencyThreshold = maxLatencyThreshold + } +} + +func WithMaxErrorRateThreshold(maxErrorRateThreshold float64) BackendOpt { + return func(b *Backend) { + b.maxErrorRateThreshold = maxErrorRateThreshold + } +} + +func WithConsensusReceiptTarget(receiptsTarget string) BackendOpt { + return func(b *Backend) { + b.receiptsTarget = receiptsTarget + } +} + +type indexedReqRes struct { + index int + req *RPCReq + res *RPCRes +} + +const proxydHealthzMethod = "proxyd_healthz" + +const ConsensusGetReceiptsMethod = "consensus_getReceipts" + +const ReceiptsTargetDebugGetRawReceipts = "debug_getRawReceipts" +const ReceiptsTargetAlchemyGetTransactionReceipts = "alchemy_getTransactionReceipts" +const ReceiptsTargetParityGetTransactionReceipts = "parity_getBlockReceipts" +const ReceiptsTargetEthGetTransactionReceipts = "eth_getBlockReceipts" + +type ConsensusGetReceiptsResult struct { + Method string `json:"method"` + Result interface{} `json:"result"` +} + +// BlockHashOrNumberParameter is a non-conventional wrapper used by alchemy_getTransactionReceipts +type BlockHashOrNumberParameter struct { + BlockHash *common.Hash `json:"blockHash"` + BlockNumber *rpc.BlockNumber `json:"blockNumber"` +} + +func NewBackend( + name string, + rpcURL string, + wsURL string, + rpcSemaphore *semaphore.Weighted, + opts ...BackendOpt, +) *Backend { + backend := &Backend{ + Name: name, + rpcURL: rpcURL, + wsURL: wsURL, + maxResponseSize: math.MaxInt64, + client: &LimitedHTTPClient{ + Client: http.Client{Timeout: 5 * time.Second}, + sem: rpcSemaphore, + backendName: name, + }, + dialer: &websocket.Dialer{}, + + maxLatencyThreshold: 10 * time.Second, + maxDegradedLatencyThreshold: 5 * time.Second, + maxErrorRateThreshold: 0.5, + + latencySlidingWindow: sw.NewSlidingWindow(), + networkRequestsSlidingWindow: sw.NewSlidingWindow(), + networkErrorsSlidingWindow: sw.NewSlidingWindow(), + } + + backend.Override(opts...) + + if !backend.stripTrailingXFF && backend.proxydIP == "" { + log.Warn("proxied requests' XFF header will not contain the proxyd ip address") + } + + return backend +} + +func (b *Backend) Override(opts ...BackendOpt) { + for _, opt := range opts { + opt(b) + } +} + +func (b *Backend) Forward(ctx context.Context, reqs []*RPCReq, isBatch bool) ([]*RPCRes, error) { + var lastError error + // <= to account for the first attempt not technically being + // a retry + for i := 0; i <= b.maxRetries; i++ { + RecordBatchRPCForward(ctx, b.Name, reqs, RPCRequestSourceHTTP) + metricLabelMethod := reqs[0].Method + if isBatch { + metricLabelMethod = "" + } + timer := prometheus.NewTimer( + rpcBackendRequestDurationSumm.WithLabelValues( + b.Name, + metricLabelMethod, + strconv.FormatBool(isBatch), + ), + ) + + res, err := b.doForward(ctx, reqs, isBatch) + switch err { + case nil: // do nothing + case ErrBackendResponseTooLarge: + log.Warn( + "backend response too large", + "name", b.Name, + "req_id", GetReqID(ctx), + "max", b.maxResponseSize, + ) + RecordBatchRPCError(ctx, b.Name, reqs, err) + case ErrConsensusGetReceiptsCantBeBatched: + log.Warn( + "Received unsupported batch request for consensus_getReceipts", + "name", b.Name, + "req_id", GetReqID(ctx), + "err", err, + ) + case ErrConsensusGetReceiptsInvalidTarget: + log.Error( + "Unsupported consensus_receipts_target for consensus_getReceipts", + "name", b.Name, + "req_id", GetReqID(ctx), + "err", err, + ) + // ErrBackendUnexpectedJSONRPC occurs because infura responds with a single JSON-RPC object + // to a batch request whenever any Request Object in the batch would induce a partial error. + // We don't label the backend offline in this case. But the error is still returned to + // callers so failover can occur if needed. + case ErrBackendUnexpectedJSONRPC: + log.Debug( + "Received unexpected JSON-RPC response", + "name", b.Name, + "req_id", GetReqID(ctx), + "err", err, + ) + default: + lastError = err + log.Warn( + "backend request failed, trying again", + "name", b.Name, + "req_id", GetReqID(ctx), + "err", err, + ) + timer.ObserveDuration() + RecordBatchRPCError(ctx, b.Name, reqs, err) + sleepContext(ctx, calcBackoff(i)) + continue + } + timer.ObserveDuration() + + MaybeRecordErrorsInRPCRes(ctx, b.Name, reqs, res) + return res, err + } + + return nil, wrapErr(lastError, "permanent error forwarding request") +} + +func (b *Backend) ProxyWS(clientConn *websocket.Conn, methodWhitelist *StringSet) (*WSProxier, error) { + backendConn, _, err := b.dialer.Dial(b.wsURL, nil) // nolint:bodyclose + if err != nil { + return nil, wrapErr(err, "error dialing backend") + } + + activeBackendWsConnsGauge.WithLabelValues(b.Name).Inc() + return NewWSProxier(b, clientConn, backendConn, methodWhitelist), nil +} + +// ForwardRPC makes a call directly to a backend and populate the response into `res` +func (b *Backend) ForwardRPC(ctx context.Context, res *RPCRes, id string, method string, params ...any) error { + jsonParams, err := json.Marshal(params) + if err != nil { + return err + } + + rpcReq := RPCReq{ + JSONRPC: JSONRPCVersion, + Method: method, + Params: jsonParams, + ID: []byte(id), + } + + slicedRes, err := b.doForward(ctx, []*RPCReq{&rpcReq}, false) + if err != nil { + return err + } + + if len(slicedRes) != 1 { + return fmt.Errorf("unexpected response len for non-batched request (len != 1)") + } + if slicedRes[0].IsError() { + return fmt.Errorf(slicedRes[0].Error.Error()) + } + + *res = *(slicedRes[0]) + return nil +} + +func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool) ([]*RPCRes, error) { + // we are concerned about network error rates, so we record 1 request independently of how many are in the batch + b.networkRequestsSlidingWindow.Incr() + + translatedReqs := make(map[string]*RPCReq, len(rpcReqs)) + // translate consensus_getReceipts to receipts target + // right now we only support non-batched + if isBatch { + for _, rpcReq := range rpcReqs { + if rpcReq.Method == ConsensusGetReceiptsMethod { + return nil, ErrConsensusGetReceiptsCantBeBatched + } + } + } else { + for _, rpcReq := range rpcReqs { + if rpcReq.Method == ConsensusGetReceiptsMethod { + translatedReqs[string(rpcReq.ID)] = rpcReq + rpcReq.Method = b.receiptsTarget + var reqParams []rpc.BlockNumberOrHash + err := json.Unmarshal(rpcReq.Params, &reqParams) + if err != nil { + return nil, ErrInvalidRequest("invalid request") + } + + var translatedParams []byte + switch rpcReq.Method { + case ReceiptsTargetDebugGetRawReceipts, + ReceiptsTargetEthGetTransactionReceipts, + ReceiptsTargetParityGetTransactionReceipts: + // conventional methods use an array of strings having either block number or block hash + // i.e. ["0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"] + params := make([]string, 1) + if reqParams[0].BlockNumber != nil { + params[0] = reqParams[0].BlockNumber.String() + } else { + params[0] = reqParams[0].BlockHash.Hex() + } + translatedParams = mustMarshalJSON(params) + case ReceiptsTargetAlchemyGetTransactionReceipts: + // alchemy uses an array of object with either block number or block hash + // i.e. [{ blockHash: "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b" }] + params := make([]BlockHashOrNumberParameter, 1) + if reqParams[0].BlockNumber != nil { + params[0].BlockNumber = reqParams[0].BlockNumber + } else { + params[0].BlockHash = reqParams[0].BlockHash + } + translatedParams = mustMarshalJSON(params) + default: + return nil, ErrConsensusGetReceiptsInvalidTarget + } + + rpcReq.Params = translatedParams + } + } + } + + isSingleElementBatch := len(rpcReqs) == 1 + + // Single element batches are unwrapped before being sent + // since Alchemy handles single requests better than batches. + var body []byte + if isSingleElementBatch { + body = mustMarshalJSON(rpcReqs[0]) + } else { + body = mustMarshalJSON(rpcReqs) + } + + httpReq, err := http.NewRequestWithContext(ctx, "POST", b.rpcURL, bytes.NewReader(body)) + if err != nil { + b.networkErrorsSlidingWindow.Incr() + RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) + return nil, wrapErr(err, "error creating backend request") + } + + if b.authPassword != "" { + httpReq.SetBasicAuth(b.authUsername, b.authPassword) + } + + xForwardedFor := GetXForwardedFor(ctx) + if b.stripTrailingXFF { + xForwardedFor = stripXFF(xForwardedFor) + } else if b.proxydIP != "" { + xForwardedFor = fmt.Sprintf("%s, %s", xForwardedFor, b.proxydIP) + } + + httpReq.Header.Set("content-type", "application/json") + httpReq.Header.Set("X-Forwarded-For", xForwardedFor) + + for name, value := range b.headers { + httpReq.Header.Set(name, value) + } + + start := time.Now() + httpRes, err := b.client.DoLimited(httpReq) + if err != nil { + b.networkErrorsSlidingWindow.Incr() + RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) + return nil, wrapErr(err, "error in backend request") + } + + metricLabelMethod := rpcReqs[0].Method + if isBatch { + metricLabelMethod = "" + } + rpcBackendHTTPResponseCodesTotal.WithLabelValues( + GetAuthCtx(ctx), + b.Name, + metricLabelMethod, + strconv.Itoa(httpRes.StatusCode), + strconv.FormatBool(isBatch), + ).Inc() + + // Alchemy returns a 400 on bad JSONs, so handle that case + if httpRes.StatusCode != 200 && httpRes.StatusCode != 400 { + b.networkErrorsSlidingWindow.Incr() + RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) + return nil, fmt.Errorf("response code %d", httpRes.StatusCode) + } + + defer httpRes.Body.Close() + resB, err := io.ReadAll(LimitReader(httpRes.Body, b.maxResponseSize)) + if errors.Is(err, ErrLimitReaderOverLimit) { + return nil, ErrBackendResponseTooLarge + } + if err != nil { + b.networkErrorsSlidingWindow.Incr() + RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) + return nil, wrapErr(err, "error reading response body") + } + + var rpcRes []*RPCRes + if isSingleElementBatch { + var singleRes RPCRes + if err := json.Unmarshal(resB, &singleRes); err != nil { + return nil, ErrBackendBadResponse + } + rpcRes = []*RPCRes{ + &singleRes, + } + } else { + if err := json.Unmarshal(resB, &rpcRes); err != nil { + // Infura may return a single JSON-RPC response if, for example, the batch contains a request for an unsupported method + if responseIsNotBatched(resB) { + b.networkErrorsSlidingWindow.Incr() + RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) + return nil, ErrBackendUnexpectedJSONRPC + } + b.networkErrorsSlidingWindow.Incr() + RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) + return nil, ErrBackendBadResponse + } + } + + if len(rpcReqs) != len(rpcRes) { + b.networkErrorsSlidingWindow.Incr() + RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) + return nil, ErrBackendUnexpectedJSONRPC + } + + // capture the HTTP status code in the response. this will only + // ever be 400 given the status check on line 318 above. + if httpRes.StatusCode != 200 { + for _, res := range rpcRes { + res.Error.HTTPErrorCode = httpRes.StatusCode + } + } + duration := time.Since(start) + b.latencySlidingWindow.Add(float64(duration)) + RecordBackendNetworkLatencyAverageSlidingWindow(b, time.Duration(b.latencySlidingWindow.Avg())) + RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) + + // enrich the response with the actual request method + for _, res := range rpcRes { + translatedReq, exist := translatedReqs[string(res.ID)] + if exist { + res.Result = ConsensusGetReceiptsResult{ + Method: translatedReq.Method, + Result: res.Result, + } + } + } + + sortBatchRPCResponse(rpcReqs, rpcRes) + + return rpcRes, nil +} + +// IsHealthy checks if the backend is able to serve traffic, based on dynamic parameters +func (b *Backend) IsHealthy() bool { + errorRate := b.ErrorRate() + avgLatency := time.Duration(b.latencySlidingWindow.Avg()) + if errorRate >= b.maxErrorRateThreshold { + return false + } + if avgLatency >= b.maxLatencyThreshold { + return false + } + return true +} + +// ErrorRate returns the instant error rate of the backend +func (b *Backend) ErrorRate() (errorRate float64) { + // we only really start counting the error rate after a minimum of 10 requests + // this is to avoid false positives when the backend is just starting up + if b.networkRequestsSlidingWindow.Sum() >= 10 { + errorRate = b.networkErrorsSlidingWindow.Sum() / b.networkRequestsSlidingWindow.Sum() + } + return errorRate +} + +// IsDegraded checks if the backend is serving traffic in a degraded state (i.e. used as a last resource) +func (b *Backend) IsDegraded() bool { + avgLatency := time.Duration(b.latencySlidingWindow.Avg()) + return avgLatency >= b.maxDegradedLatencyThreshold +} + +func responseIsNotBatched(b []byte) bool { + var r RPCRes + return json.Unmarshal(b, &r) == nil +} + +// sortBatchRPCResponse sorts the RPCRes slice according to the position of its corresponding ID in the RPCReq slice +func sortBatchRPCResponse(req []*RPCReq, res []*RPCRes) { + pos := make(map[string]int, len(req)) + for i, r := range req { + key := string(r.ID) + if _, ok := pos[key]; ok { + panic("bug! detected requests with duplicate IDs") + } + pos[key] = i + } + + sort.Slice(res, func(i, j int) bool { + l := res[i].ID + r := res[j].ID + return pos[string(l)] < pos[string(r)] + }) +} + +type BackendGroup struct { + Name string + Backends []*Backend + WeightedRouting bool + Consensus *ConsensusPoller + FallbackBackends map[string]bool +} + +func (bg *BackendGroup) Fallbacks() []*Backend { + fallbacks := []*Backend{} + for _, a := range bg.Backends { + if fallback, ok := bg.FallbackBackends[a.Name]; ok && fallback { + fallbacks = append(fallbacks, a) + } + } + return fallbacks +} + +func (bg *BackendGroup) Primaries() []*Backend { + primaries := []*Backend{} + for _, a := range bg.Backends { + fallback, ok := bg.FallbackBackends[a.Name] + if ok && !fallback { + primaries = append(primaries, a) + } + } + return primaries +} + +// NOTE: BackendGroup Forward contains the log for balancing with consensus aware +func (bg *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool) ([]*RPCRes, string, error) { + if len(rpcReqs) == 0 { + return nil, "", nil + } + + backends := bg.orderedBackendsForRequest() + + overriddenResponses := make([]*indexedReqRes, 0) + rewrittenReqs := make([]*RPCReq, 0, len(rpcReqs)) + + if bg.Consensus != nil { + // When `consensus_aware` is set to `true`, the backend group acts as a load balancer + // serving traffic from any backend that agrees in the consensus group + + // We also rewrite block tags to enforce compliance with consensus + rctx := RewriteContext{ + latest: bg.Consensus.GetLatestBlockNumber(), + safe: bg.Consensus.GetSafeBlockNumber(), + finalized: bg.Consensus.GetFinalizedBlockNumber(), + maxBlockRange: bg.Consensus.maxBlockRange, + } + + for i, req := range rpcReqs { + res := RPCRes{JSONRPC: JSONRPCVersion, ID: req.ID} + result, err := RewriteTags(rctx, req, &res) + switch result { + case RewriteOverrideError: + overriddenResponses = append(overriddenResponses, &indexedReqRes{ + index: i, + req: req, + res: &res, + }) + if errors.Is(err, ErrRewriteBlockOutOfRange) { + res.Error = ErrBlockOutOfRange + } else if errors.Is(err, ErrRewriteRangeTooLarge) { + res.Error = ErrInvalidParams( + fmt.Sprintf("block range greater than %d max", rctx.maxBlockRange), + ) + } else { + res.Error = ErrParseErr + } + case RewriteOverrideResponse: + overriddenResponses = append(overriddenResponses, &indexedReqRes{ + index: i, + req: req, + res: &res, + }) + case RewriteOverrideRequest, RewriteNone: + rewrittenReqs = append(rewrittenReqs, req) + } + } + rpcReqs = rewrittenReqs + } + + rpcRequestsTotal.Inc() + + for _, back := range backends { + res := make([]*RPCRes, 0) + var err error + + servedBy := fmt.Sprintf("%s/%s", bg.Name, back.Name) + + if len(rpcReqs) > 0 { + res, err = back.Forward(ctx, rpcReqs, isBatch) + if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) || + errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) || + errors.Is(err, ErrMethodNotWhitelisted) { + return nil, "", err + } + if errors.Is(err, ErrBackendResponseTooLarge) { + return nil, servedBy, err + } + if errors.Is(err, ErrBackendOffline) { + log.Warn( + "skipping offline backend", + "name", back.Name, + "auth", GetAuthCtx(ctx), + "req_id", GetReqID(ctx), + ) + continue + } + if errors.Is(err, ErrBackendOverCapacity) { + log.Warn( + "skipping over-capacity backend", + "name", back.Name, + "auth", GetAuthCtx(ctx), + "req_id", GetReqID(ctx), + ) + continue + } + if err != nil { + log.Error( + "error forwarding request to backend", + "name", back.Name, + "req_id", GetReqID(ctx), + "auth", GetAuthCtx(ctx), + "err", err, + ) + continue + } + } + + // re-apply overridden responses + for _, ov := range overriddenResponses { + if len(res) > 0 { + // insert ov.res at position ov.index + res = append(res[:ov.index], append([]*RPCRes{ov.res}, res[ov.index:]...)...) + } else { + res = append(res, ov.res) + } + } + + return res, servedBy, nil + } + + RecordUnserviceableRequest(ctx, RPCRequestSourceHTTP) + return nil, "", ErrNoBackends +} + +func (bg *BackendGroup) ProxyWS(ctx context.Context, clientConn *websocket.Conn, methodWhitelist *StringSet) (*WSProxier, error) { + for _, back := range bg.Backends { + proxier, err := back.ProxyWS(clientConn, methodWhitelist) + if errors.Is(err, ErrBackendOffline) { + log.Warn( + "skipping offline backend", + "name", back.Name, + "req_id", GetReqID(ctx), + "auth", GetAuthCtx(ctx), + ) + continue + } + if errors.Is(err, ErrBackendOverCapacity) { + log.Warn( + "skipping over-capacity backend", + "name", back.Name, + "req_id", GetReqID(ctx), + "auth", GetAuthCtx(ctx), + ) + continue + } + if err != nil { + log.Warn( + "error dialing ws backend", + "name", back.Name, + "req_id", GetReqID(ctx), + "auth", GetAuthCtx(ctx), + "err", err, + ) + continue + } + return proxier, nil + } + + return nil, ErrNoBackends +} + +func weightedShuffle(backends []*Backend) { + weight := func(i int) float64 { + return float64(backends[i].weight) + } + + weightedshuffle.ShuffleInplace(backends, weight, nil) +} + +func (bg *BackendGroup) orderedBackendsForRequest() []*Backend { + if bg.Consensus != nil { + return bg.loadBalancedConsensusGroup() + } else if bg.WeightedRouting { + result := make([]*Backend, len(bg.Backends)) + copy(result, bg.Backends) + weightedShuffle(result) + return result + } else { + return bg.Backends + } +} + +func (bg *BackendGroup) loadBalancedConsensusGroup() []*Backend { + cg := bg.Consensus.GetConsensusGroup() + + backendsHealthy := make([]*Backend, 0, len(cg)) + backendsDegraded := make([]*Backend, 0, len(cg)) + // separate into healthy, degraded and unhealthy backends + for _, be := range cg { + // unhealthy are filtered out and not attempted + if !be.IsHealthy() { + continue + } + if be.IsDegraded() { + backendsDegraded = append(backendsDegraded, be) + continue + } + backendsHealthy = append(backendsHealthy, be) + } + + // shuffle both slices + r := rand.New(rand.NewSource(time.Now().UnixNano())) + r.Shuffle(len(backendsHealthy), func(i, j int) { + backendsHealthy[i], backendsHealthy[j] = backendsHealthy[j], backendsHealthy[i] + }) + r.Shuffle(len(backendsDegraded), func(i, j int) { + backendsDegraded[i], backendsDegraded[j] = backendsDegraded[j], backendsDegraded[i] + }) + + if bg.WeightedRouting { + weightedShuffle(backendsHealthy) + } + + // healthy are put into a priority position + // degraded backends are used as fallback + backendsHealthy = append(backendsHealthy, backendsDegraded...) + + return backendsHealthy +} + +func (bg *BackendGroup) Shutdown() { + if bg.Consensus != nil { + bg.Consensus.Shutdown() + } +} + +func calcBackoff(i int) time.Duration { + jitter := float64(rand.Int63n(250)) + ms := math.Min(math.Pow(2, float64(i))*1000+jitter, 3000) + return time.Duration(ms) * time.Millisecond +} + +type WSProxier struct { + backend *Backend + clientConn *websocket.Conn + clientConnMu sync.Mutex + backendConn *websocket.Conn + backendConnMu sync.Mutex + methodWhitelist *StringSet + readTimeout time.Duration + writeTimeout time.Duration +} + +func NewWSProxier(backend *Backend, clientConn, backendConn *websocket.Conn, methodWhitelist *StringSet) *WSProxier { + return &WSProxier{ + backend: backend, + clientConn: clientConn, + backendConn: backendConn, + methodWhitelist: methodWhitelist, + readTimeout: defaultWSReadTimeout, + writeTimeout: defaultWSWriteTimeout, + } +} + +func (w *WSProxier) Proxy(ctx context.Context) error { + errC := make(chan error, 2) + go w.clientPump(ctx, errC) + go w.backendPump(ctx, errC) + err := <-errC + w.close() + return err +} + +func (w *WSProxier) clientPump(ctx context.Context, errC chan error) { + for { + // Block until we get a message. + msgType, msg, err := w.clientConn.ReadMessage() + if err != nil { + if err := w.writeBackendConn(websocket.CloseMessage, formatWSError(err)); err != nil { + log.Error("error writing backendConn message", "err", err) + errC <- err + return + } + } + + RecordWSMessage(ctx, w.backend.Name, SourceClient) + + // Route control messages to the backend. These don't + // count towards the total RPC requests count. + if msgType != websocket.TextMessage && msgType != websocket.BinaryMessage { + err := w.writeBackendConn(msgType, msg) + if err != nil { + errC <- err + return + } + continue + } + + rpcRequestsTotal.Inc() + + // Don't bother sending invalid requests to the backend, + // just handle them here. + req, err := w.prepareClientMsg(msg) + if err != nil { + var id json.RawMessage + method := MethodUnknown + if req != nil { + id = req.ID + method = req.Method + } + log.Info( + "error preparing client message", + "auth", GetAuthCtx(ctx), + "req_id", GetReqID(ctx), + "err", err, + ) + msg = mustMarshalJSON(NewRPCErrorRes(id, err)) + RecordRPCError(ctx, BackendProxyd, method, err) + + // Send error response to client + err = w.writeClientConn(msgType, msg) + if err != nil { + errC <- err + return + } + continue + } + + // Send eth_accounts requests directly to the client + if req.Method == "eth_accounts" { + msg = mustMarshalJSON(NewRPCRes(req.ID, emptyArrayResponse)) + RecordRPCForward(ctx, BackendProxyd, "eth_accounts", RPCRequestSourceWS) + err = w.writeClientConn(msgType, msg) + if err != nil { + errC <- err + return + } + continue + } + + RecordRPCForward(ctx, w.backend.Name, req.Method, RPCRequestSourceWS) + log.Info( + "forwarded WS message to backend", + "method", req.Method, + "auth", GetAuthCtx(ctx), + "req_id", GetReqID(ctx), + ) + + err = w.writeBackendConn(msgType, msg) + if err != nil { + errC <- err + return + } + } +} + +func (w *WSProxier) backendPump(ctx context.Context, errC chan error) { + for { + // Block until we get a message. + msgType, msg, err := w.backendConn.ReadMessage() + if err != nil { + if err := w.writeClientConn(websocket.CloseMessage, formatWSError(err)); err != nil { + log.Error("error writing clientConn message", "err", err) + errC <- err + return + } + } + + RecordWSMessage(ctx, w.backend.Name, SourceBackend) + + // Route control messages directly to the client. + if msgType != websocket.TextMessage && msgType != websocket.BinaryMessage { + err := w.writeClientConn(msgType, msg) + if err != nil { + errC <- err + return + } + continue + } + + res, err := w.parseBackendMsg(msg) + if err != nil { + var id json.RawMessage + if res != nil { + id = res.ID + } + msg = mustMarshalJSON(NewRPCErrorRes(id, err)) + log.Info("backend responded with error", "err", err) + } else { + if res.IsError() { + log.Info( + "backend responded with RPC error", + "code", res.Error.Code, + "msg", res.Error.Message, + "source", "ws", + "auth", GetAuthCtx(ctx), + "req_id", GetReqID(ctx), + ) + RecordRPCError(ctx, w.backend.Name, MethodUnknown, res.Error) + } else { + log.Info( + "forwarded WS message to client", + "auth", GetAuthCtx(ctx), + "req_id", GetReqID(ctx), + ) + } + } + + err = w.writeClientConn(msgType, msg) + if err != nil { + errC <- err + return + } + } +} + +func (w *WSProxier) close() { + w.clientConn.Close() + w.backendConn.Close() + activeBackendWsConnsGauge.WithLabelValues(w.backend.Name).Dec() +} + +func (w *WSProxier) prepareClientMsg(msg []byte) (*RPCReq, error) { + req, err := ParseRPCReq(msg) + if err != nil { + return nil, err + } + + if !w.methodWhitelist.Has(req.Method) { + return req, ErrMethodNotWhitelisted + } + + return req, nil +} + +func (w *WSProxier) parseBackendMsg(msg []byte) (*RPCRes, error) { + res, err := ParseRPCRes(bytes.NewReader(msg)) + if err != nil { + log.Warn("error parsing RPC response", "source", "ws", "err", err) + return res, ErrBackendBadResponse + } + return res, nil +} + +func (w *WSProxier) writeClientConn(msgType int, msg []byte) error { + w.clientConnMu.Lock() + defer w.clientConnMu.Unlock() + if err := w.clientConn.SetWriteDeadline(time.Now().Add(w.writeTimeout)); err != nil { + log.Error("ws client write timeout", "err", err) + return err + } + err := w.clientConn.WriteMessage(msgType, msg) + return err +} + +func (w *WSProxier) writeBackendConn(msgType int, msg []byte) error { + w.backendConnMu.Lock() + defer w.backendConnMu.Unlock() + if err := w.backendConn.SetWriteDeadline(time.Now().Add(w.writeTimeout)); err != nil { + log.Error("ws backend write timeout", "err", err) + return err + } + err := w.backendConn.WriteMessage(msgType, msg) + return err +} + +func mustMarshalJSON(in interface{}) []byte { + out, err := json.Marshal(in) + if err != nil { + panic(err) + } + return out +} + +func formatWSError(err error) []byte { + m := websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%v", err)) + if e, ok := err.(*websocket.CloseError); ok { + if e.Code != websocket.CloseNoStatusReceived { + m = websocket.FormatCloseMessage(e.Code, e.Text) + } + } + return m +} + +func sleepContext(ctx context.Context, duration time.Duration) { + select { + case <-ctx.Done(): + case <-time.After(duration): + } +} + +type LimitedHTTPClient struct { + http.Client + sem *semaphore.Weighted + backendName string +} + +func (c *LimitedHTTPClient) DoLimited(req *http.Request) (*http.Response, error) { + if err := c.sem.Acquire(req.Context(), 1); err != nil { + tooManyRequestErrorsTotal.WithLabelValues(c.backendName).Inc() + return nil, wrapErr(err, "too many requests") + } + defer c.sem.Release(1) + return c.Do(req) +} + +func RecordBatchRPCError(ctx context.Context, backendName string, reqs []*RPCReq, err error) { + for _, req := range reqs { + RecordRPCError(ctx, backendName, req.Method, err) + } +} + +func MaybeRecordErrorsInRPCRes(ctx context.Context, backendName string, reqs []*RPCReq, resBatch []*RPCRes) { + log.Info("forwarded RPC request", + "backend", backendName, + "auth", GetAuthCtx(ctx), + "req_id", GetReqID(ctx), + "batch_size", len(reqs), + ) + + var lastError *RPCErr + for i, res := range resBatch { + if res.IsError() { + lastError = res.Error + RecordRPCError(ctx, backendName, reqs[i].Method, res.Error) + } + } + + if lastError != nil { + log.Info( + "backend responded with RPC error", + "backend", backendName, + "last_error_code", lastError.Code, + "last_error_msg", lastError.Message, + "req_id", GetReqID(ctx), + "source", "rpc", + "auth", GetAuthCtx(ctx), + ) + } +} + +func RecordBatchRPCForward(ctx context.Context, backendName string, reqs []*RPCReq, source string) { + for _, req := range reqs { + RecordRPCForward(ctx, backendName, req.Method, source) + } +} + +func stripXFF(xff string) string { + ipList := strings.Split(xff, ",") + return strings.TrimSpace(ipList[0]) +} diff --git a/proxyd/backend_test.go b/proxyd/backend_test.go new file mode 100644 index 0000000..73ebebf --- /dev/null +++ b/proxyd/backend_test.go @@ -0,0 +1,22 @@ +package proxyd + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStripXFF(t *testing.T) { + tests := []struct { + in, out string + }{ + {"1.2.3, 4.5.6, 7.8.9", "1.2.3"}, + {"1.2.3,4.5.6", "1.2.3"}, + {" 1.2.3 , 4.5.6 ", "1.2.3"}, + } + + for _, test := range tests { + actual := stripXFF(test.in) + assert.Equal(t, test.out, actual) + } +} diff --git a/proxyd/cache.go b/proxyd/cache.go new file mode 100644 index 0000000..5add4f2 --- /dev/null +++ b/proxyd/cache.go @@ -0,0 +1,192 @@ +package proxyd + +import ( + "context" + "encoding/json" + "strings" + "time" + + "github.com/ethereum/go-ethereum/rpc" + "github.com/redis/go-redis/v9" + + "github.com/golang/snappy" + lru "github.com/hashicorp/golang-lru" +) + +type Cache interface { + Get(ctx context.Context, key string) (string, error) + Put(ctx context.Context, key string, value string) error +} + +const ( + // assuming an average RPCRes size of 3 KB + memoryCacheLimit = 4096 +) + +type cache struct { + lru *lru.Cache +} + +func newMemoryCache() *cache { + rep, _ := lru.New(memoryCacheLimit) + return &cache{rep} +} + +func (c *cache) Get(ctx context.Context, key string) (string, error) { + if val, ok := c.lru.Get(key); ok { + return val.(string), nil + } + return "", nil +} + +func (c *cache) Put(ctx context.Context, key string, value string) error { + c.lru.Add(key, value) + return nil +} + +type redisCache struct { + rdb *redis.Client + prefix string + ttl time.Duration +} + +func newRedisCache(rdb *redis.Client, prefix string, ttl time.Duration) *redisCache { + return &redisCache{rdb, prefix, ttl} +} + +func (c *redisCache) namespaced(key string) string { + if c.prefix == "" { + return key + } + return strings.Join([]string{c.prefix, key}, ":") +} + +func (c *redisCache) Get(ctx context.Context, key string) (string, error) { + start := time.Now() + val, err := c.rdb.Get(ctx, c.namespaced(key)).Result() + redisCacheDurationSumm.WithLabelValues("GET").Observe(float64(time.Since(start).Milliseconds())) + + if err == redis.Nil { + return "", nil + } else if err != nil { + RecordRedisError("CacheGet") + return "", err + } + return val, nil +} + +func (c *redisCache) Put(ctx context.Context, key string, value string) error { + start := time.Now() + err := c.rdb.SetEx(ctx, c.namespaced(key), value, c.ttl).Err() + redisCacheDurationSumm.WithLabelValues("SETEX").Observe(float64(time.Since(start).Milliseconds())) + + if err != nil { + RecordRedisError("CacheSet") + } + return err +} + +type cacheWithCompression struct { + cache Cache +} + +func newCacheWithCompression(cache Cache) *cacheWithCompression { + return &cacheWithCompression{cache} +} + +func (c *cacheWithCompression) Get(ctx context.Context, key string) (string, error) { + encodedVal, err := c.cache.Get(ctx, key) + if err != nil { + return "", err + } + if encodedVal == "" { + return "", nil + } + val, err := snappy.Decode(nil, []byte(encodedVal)) + if err != nil { + return "", err + } + return string(val), nil +} + +func (c *cacheWithCompression) Put(ctx context.Context, key string, value string) error { + encodedVal := snappy.Encode(nil, []byte(value)) + return c.cache.Put(ctx, key, string(encodedVal)) +} + +type RPCCache interface { + GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) + PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error +} + +type rpcCache struct { + cache Cache + handlers map[string]RPCMethodHandler +} + +func newRPCCache(cache Cache) RPCCache { + staticHandler := &StaticMethodHandler{cache: cache} + debugGetRawReceiptsHandler := &StaticMethodHandler{cache: cache, + filterGet: func(req *RPCReq) bool { + // cache only if the request is for a block hash + + var p []rpc.BlockNumberOrHash + err := json.Unmarshal(req.Params, &p) + if err != nil { + return false + } + if len(p) != 1 { + return false + } + return p[0].BlockHash != nil + }, + filterPut: func(req *RPCReq, res *RPCRes) bool { + // don't cache if response contains 0 receipts + rawReceipts, ok := res.Result.([]interface{}) + if !ok { + return false + } + return len(rawReceipts) > 0 + }, + } + handlers := map[string]RPCMethodHandler{ + "eth_chainId": staticHandler, + "net_version": staticHandler, + "eth_getBlockTransactionCountByHash": staticHandler, + "eth_getUncleCountByBlockHash": staticHandler, + "eth_getBlockByHash": staticHandler, + "eth_getTransactionByBlockHashAndIndex": staticHandler, + "eth_getUncleByBlockHashAndIndex": staticHandler, + "debug_getRawReceipts": debugGetRawReceiptsHandler, + } + return &rpcCache{ + cache: cache, + handlers: handlers, + } +} + +func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) { + handler := c.handlers[req.Method] + if handler == nil { + return nil, nil + } + res, err := handler.GetRPCMethod(ctx, req) + if err != nil { + RecordCacheError(req.Method) + return nil, err + } + if res == nil { + RecordCacheMiss(req.Method) + } else { + RecordCacheHit(req.Method) + } + return res, nil +} + +func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error { + handler := c.handlers[req.Method] + if handler == nil { + return nil + } + return handler.PutRPCMethod(ctx, req, res) +} diff --git a/proxyd/cache_test.go b/proxyd/cache_test.go new file mode 100644 index 0000000..1a5d543 --- /dev/null +++ b/proxyd/cache_test.go @@ -0,0 +1,213 @@ +package proxyd + +import ( + "context" + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRPCCacheImmutableRPCs(t *testing.T) { + ctx := context.Background() + + cache := newRPCCache(newMemoryCache()) + ID := []byte(strconv.Itoa(1)) + + rpcs := []struct { + req *RPCReq + res *RPCRes + name string + }{ + { + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_chainId", + ID: ID, + }, + res: &RPCRes{ + JSONRPC: "2.0", + Result: "0xff", + ID: ID, + }, + name: "eth_chainId", + }, + { + req: &RPCReq{ + JSONRPC: "2.0", + Method: "net_version", + ID: ID, + }, + res: &RPCRes{ + JSONRPC: "2.0", + Result: "9999", + ID: ID, + }, + name: "net_version", + }, + { + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_getBlockTransactionCountByHash", + Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}), + ID: ID, + }, + res: &RPCRes{ + JSONRPC: "2.0", + Result: `{"eth_getBlockTransactionCountByHash":"!"}`, + ID: ID, + }, + name: "eth_getBlockTransactionCountByHash", + }, + { + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_getUncleCountByBlockHash", + Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}), + ID: ID, + }, + res: &RPCRes{ + JSONRPC: "2.0", + Result: `{"eth_getUncleCountByBlockHash":"!"}`, + ID: ID, + }, + name: "eth_getUncleCountByBlockHash", + }, + { + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_getBlockByHash", + Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}), + ID: ID, + }, + res: &RPCRes{ + JSONRPC: "2.0", + Result: `{"eth_getBlockByHash":"!"}`, + ID: ID, + }, + name: "eth_getBlockByHash", + }, + { + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_getUncleByBlockHashAndIndex", + Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", "0x90"}), + ID: ID, + }, + res: &RPCRes{ + JSONRPC: "2.0", + Result: `{"eth_getUncleByBlockHashAndIndex":"!"}`, + ID: ID, + }, + name: "eth_getUncleByBlockHashAndIndex", + }, + { + req: &RPCReq{ + JSONRPC: "2.0", + Method: "debug_getRawReceipts", + Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"}), + ID: ID, + }, + res: &RPCRes{ + JSONRPC: "2.0", + Result: []interface{}{"a"}, + ID: ID, + }, + name: "debug_getRawReceipts", + }, + } + + for _, rpc := range rpcs { + t.Run(rpc.name, func(t *testing.T) { + err := cache.PutRPC(ctx, rpc.req, rpc.res) + require.NoError(t, err) + + cachedRes, err := cache.GetRPC(ctx, rpc.req) + require.NoError(t, err) + require.Equal(t, rpc.res, cachedRes) + }) + } +} + +func TestRPCCacheUnsupportedMethod(t *testing.T) { + ctx := context.Background() + + cache := newRPCCache(newMemoryCache()) + ID := []byte(strconv.Itoa(1)) + + rpcs := []struct { + req *RPCReq + name string + }{ + { + name: "eth_syncing", + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_syncing", + ID: ID, + }, + }, + { + name: "eth_blockNumber", + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_blockNumber", + ID: ID, + }, + }, + { + name: "eth_getBlockByNumber", + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_getBlockByNumber", + ID: ID, + }, + }, + { + name: "eth_getBlockRange", + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_getBlockRange", + ID: ID, + }, + }, + { + name: "eth_gasPrice", + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_gasPrice", + ID: ID, + }, + }, + { + name: "eth_call", + req: &RPCReq{ + JSONRPC: "2.0", + Method: "eth_call", + ID: ID, + }, + }, + { + req: &RPCReq{ + JSONRPC: "2.0", + Method: "debug_getRawReceipts", + Params: mustMarshalJSON([]string{"0x100"}), + ID: ID, + }, + name: "debug_getRawReceipts", + }, + } + + for _, rpc := range rpcs { + t.Run(rpc.name, func(t *testing.T) { + fakeval := mustMarshalJSON([]string{rpc.name}) + err := cache.PutRPC(ctx, rpc.req, &RPCRes{Result: fakeval}) + require.NoError(t, err) + + cachedRes, err := cache.GetRPC(ctx, rpc.req) + require.NoError(t, err) + require.Nil(t, cachedRes) + }) + } + +} diff --git a/proxyd/cmd/proxyd/main.go b/proxyd/cmd/proxyd/main.go new file mode 100644 index 0000000..c2b613c --- /dev/null +++ b/proxyd/cmd/proxyd/main.go @@ -0,0 +1,122 @@ +package main + +import ( + "fmt" + "net" + "net/http" + "net/http/pprof" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + + "github.com/BurntSushi/toml" + "golang.org/x/exp/slog" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/proxyd" +) + +var ( + GitVersion = "" + GitCommit = "" + GitDate = "" +) + +func main() { + // Set up logger with a default INFO level in case we fail to parse flags. + // Otherwise the final critical log won't show what the parsing error was. + proxyd.SetLogLevel(slog.LevelInfo) + + log.Info("starting proxyd", "version", GitVersion, "commit", GitCommit, "date", GitDate) + + if len(os.Args) < 2 { + log.Crit("must specify a config file on the command line") + } + + config := new(proxyd.Config) + if _, err := toml.DecodeFile(os.Args[1], config); err != nil { + log.Crit("error reading config file", "err", err) + } + + // update log level from config + logLevel, err := LevelFromString(config.Server.LogLevel) + if err != nil { + logLevel = log.LevelInfo + if config.Server.LogLevel != "" { + log.Warn("invalid server.log_level set: " + config.Server.LogLevel) + } + } + proxyd.SetLogLevel(logLevel) + + if config.Server.EnablePprof { + log.Info("starting pprof", "addr", "0.0.0.0", "port", "6060") + pprofSrv := StartPProf("0.0.0.0", 6060) + log.Info("started pprof server", "addr", pprofSrv.Addr) + defer func() { + if err := pprofSrv.Close(); err != nil { + log.Error("failed to stop pprof server", "err", err) + } + }() + } + + _, shutdown, err := proxyd.Start(config) + if err != nil { + log.Crit("error starting proxyd", "err", err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) + recvSig := <-sig + log.Info("caught signal, shutting down", "signal", recvSig) + shutdown() +} + +// LevelFromString returns the appropriate Level from a string name. +// Useful for parsing command line args and configuration files. +// It also converts strings to lowercase. +// Note: copied from op-service/log to avoid monorepo dependency +func LevelFromString(lvlString string) (slog.Level, error) { + lvlString = strings.ToLower(lvlString) // ignore case + switch lvlString { + case "trace", "trce": + return log.LevelTrace, nil + case "debug", "dbug": + return log.LevelDebug, nil + case "info": + return log.LevelInfo, nil + case "warn": + return log.LevelWarn, nil + case "error", "eror": + return log.LevelError, nil + case "crit": + return log.LevelCrit, nil + default: + return log.LevelDebug, fmt.Errorf("unknown level: %v", lvlString) + } +} + +func StartPProf(hostname string, port int) *http.Server { + mux := http.NewServeMux() + + // have to do below to support multiple servers, since the + // pprof import only uses DefaultServeMux + mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) + mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) + mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) + mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) + mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) + + addr := net.JoinHostPort(hostname, strconv.Itoa(port)) + srv := &http.Server{ + Handler: mux, + Addr: addr, + } + + // nolint:errcheck + go srv.ListenAndServe() + + return srv +} diff --git a/proxyd/config.go b/proxyd/config.go new file mode 100644 index 0000000..4719a55 --- /dev/null +++ b/proxyd/config.go @@ -0,0 +1,184 @@ +package proxyd + +import ( + "fmt" + "math/big" + "os" + "strings" + "time" +) + +type ServerConfig struct { + RPCHost string `toml:"rpc_host"` + RPCPort int `toml:"rpc_port"` + WSHost string `toml:"ws_host"` + WSPort int `toml:"ws_port"` + MaxBodySizeBytes int64 `toml:"max_body_size_bytes"` + MaxConcurrentRPCs int64 `toml:"max_concurrent_rpcs"` + LogLevel string `toml:"log_level"` + + // TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections + TimeoutSeconds int `toml:"timeout_seconds"` + + MaxUpstreamBatchSize int `toml:"max_upstream_batch_size"` + + EnableRequestLog bool `toml:"enable_request_log"` + MaxRequestBodyLogLen int `toml:"max_request_body_log_len"` + EnablePprof bool `toml:"enable_pprof"` + EnableXServedByHeader bool `toml:"enable_served_by_header"` + AllowAllOrigins bool `toml:"allow_all_origins"` +} + +type CacheConfig struct { + Enabled bool `toml:"enabled"` + TTL TOMLDuration `toml:"ttl"` +} + +type RedisConfig struct { + URL string `toml:"url"` + Namespace string `toml:"namespace"` +} + +type MetricsConfig struct { + Enabled bool `toml:"enabled"` + Host string `toml:"host"` + Port int `toml:"port"` +} + +type RateLimitConfig struct { + UseRedis bool `toml:"use_redis"` + BaseRate int `toml:"base_rate"` + BaseInterval TOMLDuration `toml:"base_interval"` + ExemptOrigins []string `toml:"exempt_origins"` + ExemptUserAgents []string `toml:"exempt_user_agents"` + ErrorMessage string `toml:"error_message"` + MethodOverrides map[string]*RateLimitMethodOverride `toml:"method_overrides"` + IPHeaderOverride string `toml:"ip_header_override"` +} + +type RateLimitMethodOverride struct { + Limit int `toml:"limit"` + Interval TOMLDuration `toml:"interval"` + Global bool `toml:"global"` +} + +type TOMLDuration time.Duration + +func (t *TOMLDuration) UnmarshalText(b []byte) error { + d, err := time.ParseDuration(string(b)) + if err != nil { + return err + } + + *t = TOMLDuration(d) + return nil +} + +type BackendOptions struct { + ResponseTimeoutSeconds int `toml:"response_timeout_seconds"` + MaxResponseSizeBytes int64 `toml:"max_response_size_bytes"` + MaxRetries int `toml:"max_retries"` + OutOfServiceSeconds int `toml:"out_of_service_seconds"` + MaxDegradedLatencyThreshold TOMLDuration `toml:"max_degraded_latency_threshold"` + MaxLatencyThreshold TOMLDuration `toml:"max_latency_threshold"` + MaxErrorRateThreshold float64 `toml:"max_error_rate_threshold"` +} + +type BackendConfig struct { + Username string `toml:"username"` + Password string `toml:"password"` + RPCURL string `toml:"rpc_url"` + WSURL string `toml:"ws_url"` + WSPort int `toml:"ws_port"` + MaxRPS int `toml:"max_rps"` + MaxWSConns int `toml:"max_ws_conns"` + CAFile string `toml:"ca_file"` + ClientCertFile string `toml:"client_cert_file"` + ClientKeyFile string `toml:"client_key_file"` + StripTrailingXFF bool `toml:"strip_trailing_xff"` + Headers map[string]string `toml:"headers"` + + Weight int `toml:"weight"` + + ConsensusSkipPeerCountCheck bool `toml:"consensus_skip_peer_count"` + ConsensusForcedCandidate bool `toml:"consensus_forced_candidate"` + ConsensusReceiptsTarget string `toml:"consensus_receipts_target"` +} + +type BackendsConfig map[string]*BackendConfig + +type BackendGroupConfig struct { + Backends []string `toml:"backends"` + + WeightedRouting bool `toml:"weighted_routing"` + + ConsensusAware bool `toml:"consensus_aware"` + ConsensusAsyncHandler string `toml:"consensus_handler"` + ConsensusPollerInterval TOMLDuration `toml:"consensus_poller_interval"` + + ConsensusBanPeriod TOMLDuration `toml:"consensus_ban_period"` + ConsensusMaxUpdateThreshold TOMLDuration `toml:"consensus_max_update_threshold"` + ConsensusMaxBlockLag uint64 `toml:"consensus_max_block_lag"` + ConsensusMaxBlockRange uint64 `toml:"consensus_max_block_range"` + ConsensusMinPeerCount int `toml:"consensus_min_peer_count"` + + ConsensusHA bool `toml:"consensus_ha"` + ConsensusHAHeartbeatInterval TOMLDuration `toml:"consensus_ha_heartbeat_interval"` + ConsensusHALockPeriod TOMLDuration `toml:"consensus_ha_lock_period"` + ConsensusHARedis RedisConfig `toml:"consensus_ha_redis"` + + Fallbacks []string `toml:"fallbacks"` +} + +type BackendGroupsConfig map[string]*BackendGroupConfig + +type MethodMappingsConfig map[string]string + +type BatchConfig struct { + MaxSize int `toml:"max_size"` + ErrorMessage string `toml:"error_message"` +} + +// SenderRateLimitConfig configures the sender-based rate limiter +// for eth_sendRawTransaction requests. +// To enable pre-eip155 transactions, add '0' to allowed_chain_ids. +type SenderRateLimitConfig struct { + Enabled bool + Interval TOMLDuration + Limit int + AllowedChainIds []*big.Int `toml:"allowed_chain_ids"` +} + +type Config struct { + WSBackendGroup string `toml:"ws_backend_group"` + Server ServerConfig `toml:"server"` + Cache CacheConfig `toml:"cache"` + Redis RedisConfig `toml:"redis"` + Metrics MetricsConfig `toml:"metrics"` + RateLimit RateLimitConfig `toml:"rate_limit"` + BackendOptions BackendOptions `toml:"backend"` + Backends BackendsConfig `toml:"backends"` + BatchConfig BatchConfig `toml:"batch"` + Authentication map[string]string `toml:"authentication"` + BackendGroups BackendGroupsConfig `toml:"backend_groups"` + RPCMethodMappings map[string]string `toml:"rpc_method_mappings"` + WSMethodWhitelist []string `toml:"ws_method_whitelist"` + WhitelistErrorMessage string `toml:"whitelist_error_message"` + SenderRateLimit SenderRateLimitConfig `toml:"sender_rate_limit"` +} + +func ReadFromEnvOrConfig(value string) (string, error) { + if strings.HasPrefix(value, "$") { + envValue := os.Getenv(strings.TrimPrefix(value, "$")) + if envValue == "" { + return "", fmt.Errorf("config env var %s not found", value) + } + return envValue, nil + } + + if strings.HasPrefix(value, "\\") { + return strings.TrimPrefix(value, "\\"), nil + } + + return value, nil +} diff --git a/proxyd/consensus_poller.go b/proxyd/consensus_poller.go new file mode 100644 index 0000000..90af41d --- /dev/null +++ b/proxyd/consensus_poller.go @@ -0,0 +1,746 @@ +package proxyd + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/ethereum/go-ethereum/log" +) + +const ( + DefaultPollerInterval = 1 * time.Second +) + +type OnConsensusBroken func() + +// ConsensusPoller checks the consensus state for each member of a BackendGroup +// resolves the highest common block for multiple nodes, and reconciles the consensus +// in case of block hash divergence to minimize re-orgs +type ConsensusPoller struct { + ctx context.Context + cancelFunc context.CancelFunc + listeners []OnConsensusBroken + + backendGroup *BackendGroup + backendState map[*Backend]*backendState + consensusGroupMux sync.Mutex + consensusGroup []*Backend + + tracker ConsensusTracker + asyncHandler ConsensusAsyncHandler + + minPeerCount uint64 + banPeriod time.Duration + maxUpdateThreshold time.Duration + maxBlockLag uint64 + maxBlockRange uint64 + interval time.Duration +} + +type backendState struct { + backendStateMux sync.Mutex + + latestBlockNumber hexutil.Uint64 + latestBlockHash string + safeBlockNumber hexutil.Uint64 + finalizedBlockNumber hexutil.Uint64 + + peerCount uint64 + inSync bool + + lastUpdate time.Time + + bannedUntil time.Time +} + +func (bs *backendState) IsBanned() bool { + return time.Now().Before(bs.bannedUntil) +} + +// GetConsensusGroup returns the backend members that are agreeing in a consensus +func (cp *ConsensusPoller) GetConsensusGroup() []*Backend { + defer cp.consensusGroupMux.Unlock() + cp.consensusGroupMux.Lock() + + g := make([]*Backend, len(cp.consensusGroup)) + copy(g, cp.consensusGroup) + + return g +} + +// GetLatestBlockNumber returns the `latest` agreed block number in a consensus +func (ct *ConsensusPoller) GetLatestBlockNumber() hexutil.Uint64 { + return ct.tracker.GetLatestBlockNumber() +} + +// GetSafeBlockNumber returns the `safe` agreed block number in a consensus +func (ct *ConsensusPoller) GetSafeBlockNumber() hexutil.Uint64 { + return ct.tracker.GetSafeBlockNumber() +} + +// GetFinalizedBlockNumber returns the `finalized` agreed block number in a consensus +func (ct *ConsensusPoller) GetFinalizedBlockNumber() hexutil.Uint64 { + return ct.tracker.GetFinalizedBlockNumber() +} + +func (cp *ConsensusPoller) Shutdown() { + cp.asyncHandler.Shutdown() +} + +// ConsensusAsyncHandler controls the asynchronous polling mechanism, interval and shutdown +type ConsensusAsyncHandler interface { + Init() + Shutdown() +} + +// NoopAsyncHandler allows fine control updating the consensus +type NoopAsyncHandler struct{} + +func NewNoopAsyncHandler() ConsensusAsyncHandler { + log.Warn("using NewNoopAsyncHandler") + return &NoopAsyncHandler{} +} +func (ah *NoopAsyncHandler) Init() {} +func (ah *NoopAsyncHandler) Shutdown() {} + +// PollerAsyncHandler asynchronously updates each individual backend and the group consensus +type PollerAsyncHandler struct { + ctx context.Context + cp *ConsensusPoller +} + +func NewPollerAsyncHandler(ctx context.Context, cp *ConsensusPoller) ConsensusAsyncHandler { + return &PollerAsyncHandler{ + ctx: ctx, + cp: cp, + } +} +func (ah *PollerAsyncHandler) Init() { + // create the individual backend pollers. + log.Info("total number of primary candidates", "primaries", len(ah.cp.backendGroup.Primaries())) + log.Info("total number of fallback candidates", "fallbacks", len(ah.cp.backendGroup.Fallbacks())) + + for _, be := range ah.cp.backendGroup.Primaries() { + go func(be *Backend) { + for { + timer := time.NewTimer(ah.cp.interval) + ah.cp.UpdateBackend(ah.ctx, be) + select { + case <-timer.C: + case <-ah.ctx.Done(): + timer.Stop() + return + } + } + }(be) + } + + for _, be := range ah.cp.backendGroup.Fallbacks() { + go func(be *Backend) { + for { + timer := time.NewTimer(ah.cp.interval) + + healthyCandidates := ah.cp.FilterCandidates(ah.cp.backendGroup.Primaries()) + + log.Info("number of healthy primary candidates", "healthy_candidates", len(healthyCandidates)) + if len(healthyCandidates) == 0 { + log.Debug("zero healthy candidates, querying fallback backend", + "backend_name", be.Name) + ah.cp.UpdateBackend(ah.ctx, be) + } + + select { + case <-timer.C: + case <-ah.ctx.Done(): + timer.Stop() + return + } + } + }(be) + } + + // create the group consensus poller + go func() { + for { + timer := time.NewTimer(ah.cp.interval) + log.Info("updating backend group consensus") + ah.cp.UpdateBackendGroupConsensus(ah.ctx) + + select { + case <-timer.C: + case <-ah.ctx.Done(): + timer.Stop() + return + } + } + }() +} +func (ah *PollerAsyncHandler) Shutdown() { + ah.cp.cancelFunc() +} + +type ConsensusOpt func(cp *ConsensusPoller) + +func WithTracker(tracker ConsensusTracker) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.tracker = tracker + } +} + +func WithAsyncHandler(asyncHandler ConsensusAsyncHandler) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.asyncHandler = asyncHandler + } +} + +func WithListener(listener OnConsensusBroken) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.AddListener(listener) + } +} + +func (cp *ConsensusPoller) AddListener(listener OnConsensusBroken) { + cp.listeners = append(cp.listeners, listener) +} + +func (cp *ConsensusPoller) ClearListeners() { + cp.listeners = []OnConsensusBroken{} +} + +func WithBanPeriod(banPeriod time.Duration) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.banPeriod = banPeriod + } +} + +func WithMaxUpdateThreshold(maxUpdateThreshold time.Duration) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.maxUpdateThreshold = maxUpdateThreshold + } +} + +func WithMaxBlockLag(maxBlockLag uint64) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.maxBlockLag = maxBlockLag + } +} + +func WithMaxBlockRange(maxBlockRange uint64) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.maxBlockRange = maxBlockRange + } +} + +func WithMinPeerCount(minPeerCount uint64) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.minPeerCount = minPeerCount + } +} + +func WithPollerInterval(interval time.Duration) ConsensusOpt { + return func(cp *ConsensusPoller) { + cp.interval = interval + } +} + +func NewConsensusPoller(bg *BackendGroup, opts ...ConsensusOpt) *ConsensusPoller { + ctx, cancelFunc := context.WithCancel(context.Background()) + + state := make(map[*Backend]*backendState, len(bg.Backends)) + + cp := &ConsensusPoller{ + ctx: ctx, + cancelFunc: cancelFunc, + backendGroup: bg, + backendState: state, + + banPeriod: 5 * time.Minute, + maxUpdateThreshold: 30 * time.Second, + maxBlockLag: 8, // 8*12 seconds = 96 seconds ~ 1.6 minutes + minPeerCount: 3, + interval: DefaultPollerInterval, + } + + for _, opt := range opts { + opt(cp) + } + + if cp.tracker == nil { + cp.tracker = NewInMemoryConsensusTracker() + } + + if cp.asyncHandler == nil { + cp.asyncHandler = NewPollerAsyncHandler(ctx, cp) + } + + cp.Reset() + cp.asyncHandler.Init() + + return cp +} + +// UpdateBackend refreshes the consensus state of a single backend +func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) { + bs := cp.getBackendState(be) + RecordConsensusBackendBanned(be, bs.IsBanned()) + + if bs.IsBanned() { + log.Debug("skipping backend - banned", "backend", be.Name) + return + } + + // if backend is not healthy state we'll only resume checking it after ban + if !be.IsHealthy() && !be.forcedCandidate { + log.Warn("backend banned - not healthy", "backend", be.Name) + cp.Ban(be) + return + } + + inSync, err := cp.isInSync(ctx, be) + RecordConsensusBackendInSync(be, err == nil && inSync) + if err != nil { + log.Warn("error updating backend sync state", "name", be.Name, "err", err) + } + + var peerCount uint64 + if !be.skipPeerCountCheck { + peerCount, err = cp.getPeerCount(ctx, be) + if err != nil { + log.Warn("error updating backend peer count", "name", be.Name, "err", err) + } + RecordConsensusBackendPeerCount(be, peerCount) + } + + latestBlockNumber, latestBlockHash, err := cp.fetchBlock(ctx, be, "latest") + if err != nil { + log.Warn("error updating backend - latest block", "name", be.Name, "err", err) + } + + safeBlockNumber, _, err := cp.fetchBlock(ctx, be, "safe") + if err != nil { + log.Warn("error updating backend - safe block", "name", be.Name, "err", err) + } + + finalizedBlockNumber, _, err := cp.fetchBlock(ctx, be, "finalized") + if err != nil { + log.Warn("error updating backend - finalized block", "name", be.Name, "err", err) + } + + RecordConsensusBackendUpdateDelay(be, bs.lastUpdate) + + changed := cp.setBackendState(be, peerCount, inSync, + latestBlockNumber, latestBlockHash, + safeBlockNumber, finalizedBlockNumber) + + RecordBackendLatestBlock(be, latestBlockNumber) + RecordBackendSafeBlock(be, safeBlockNumber) + RecordBackendFinalizedBlock(be, finalizedBlockNumber) + + if changed { + log.Debug("backend state updated", + "name", be.Name, + "peerCount", peerCount, + "inSync", inSync, + "latestBlockNumber", latestBlockNumber, + "latestBlockHash", latestBlockHash, + "safeBlockNumber", safeBlockNumber, + "finalizedBlockNumber", finalizedBlockNumber, + "lastUpdate", bs.lastUpdate) + } + + // sanity check for latest, safe and finalized block tags + expectedBlockTags := cp.checkExpectedBlockTags( + latestBlockNumber, + bs.safeBlockNumber, safeBlockNumber, + bs.finalizedBlockNumber, finalizedBlockNumber) + + RecordBackendUnexpectedBlockTags(be, !expectedBlockTags) + + if !expectedBlockTags && !be.forcedCandidate { + log.Warn("backend banned - unexpected block tags", + "backend", be.Name, + "oldFinalized", bs.finalizedBlockNumber, + "finalizedBlockNumber", finalizedBlockNumber, + "oldSafe", bs.safeBlockNumber, + "safeBlockNumber", safeBlockNumber, + "latestBlockNumber", latestBlockNumber, + ) + cp.Ban(be) + } +} + +// checkExpectedBlockTags for unexpected conditions on block tags +// - finalized block number should never decrease +// - safe block number should never decrease +// - finalized block should be <= safe block <= latest block +func (cp *ConsensusPoller) checkExpectedBlockTags( + currentLatest hexutil.Uint64, + oldSafe hexutil.Uint64, currentSafe hexutil.Uint64, + oldFinalized hexutil.Uint64, currentFinalized hexutil.Uint64) bool { + return currentFinalized >= oldFinalized && + currentSafe >= oldSafe && + currentFinalized <= currentSafe && + currentSafe <= currentLatest +} + +// UpdateBackendGroupConsensus resolves the current group consensus based on the state of the backends +func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) { + // get the latest block number from the tracker + currentConsensusBlockNumber := cp.GetLatestBlockNumber() + + // get the candidates for the consensus group + candidates := cp.getConsensusCandidates() + + // update the lowest latest block number and hash + // the lowest safe block number + // the lowest finalized block number + var lowestLatestBlock hexutil.Uint64 + var lowestLatestBlockHash string + var lowestFinalizedBlock hexutil.Uint64 + var lowestSafeBlock hexutil.Uint64 + for _, bs := range candidates { + if lowestLatestBlock == 0 || bs.latestBlockNumber < lowestLatestBlock { + lowestLatestBlock = bs.latestBlockNumber + lowestLatestBlockHash = bs.latestBlockHash + } + if lowestFinalizedBlock == 0 || bs.finalizedBlockNumber < lowestFinalizedBlock { + lowestFinalizedBlock = bs.finalizedBlockNumber + } + if lowestSafeBlock == 0 || bs.safeBlockNumber < lowestSafeBlock { + lowestSafeBlock = bs.safeBlockNumber + } + } + + // find the proposed block among the candidates + // the proposed block needs have the same hash in the entire consensus group + proposedBlock := lowestLatestBlock + proposedBlockHash := lowestLatestBlockHash + hasConsensus := false + broken := false + + if lowestLatestBlock > currentConsensusBlockNumber { + log.Debug("validating consensus on block", "lowestLatestBlock", lowestLatestBlock) + } + + // if there is a block to propose, check if it is the same in all backends + if proposedBlock > 0 { + for !hasConsensus { + allAgreed := true + for be := range candidates { + actualBlockNumber, actualBlockHash, err := cp.fetchBlock(ctx, be, proposedBlock.String()) + if err != nil { + log.Warn("error updating backend", "name", be.Name, "err", err) + continue + } + if proposedBlockHash == "" { + proposedBlockHash = actualBlockHash + } + blocksDontMatch := (actualBlockNumber != proposedBlock) || (actualBlockHash != proposedBlockHash) + if blocksDontMatch { + if currentConsensusBlockNumber >= actualBlockNumber { + log.Warn("backend broke consensus", + "name", be.Name, + "actualBlockNumber", actualBlockNumber, + "actualBlockHash", actualBlockHash, + "proposedBlock", proposedBlock, + "proposedBlockHash", proposedBlockHash) + broken = true + } + allAgreed = false + break + } + } + if allAgreed { + hasConsensus = true + } else { + // walk one block behind and try again + proposedBlock -= 1 + proposedBlockHash = "" + log.Debug("no consensus, now trying", "block:", proposedBlock) + } + } + } + + if broken { + // propagate event to other interested parts, such as cache invalidator + for _, l := range cp.listeners { + l() + } + log.Info("consensus broken", + "currentConsensusBlockNumber", currentConsensusBlockNumber, + "proposedBlock", proposedBlock, + "proposedBlockHash", proposedBlockHash) + } + + // update tracker + cp.tracker.SetLatestBlockNumber(proposedBlock) + cp.tracker.SetSafeBlockNumber(lowestSafeBlock) + cp.tracker.SetFinalizedBlockNumber(lowestFinalizedBlock) + + // update consensus group + group := make([]*Backend, 0, len(candidates)) + consensusBackendsNames := make([]string, 0, len(candidates)) + filteredBackendsNames := make([]string, 0, len(cp.backendGroup.Backends)) + for _, be := range cp.backendGroup.Backends { + _, exist := candidates[be] + if exist { + group = append(group, be) + consensusBackendsNames = append(consensusBackendsNames, be.Name) + } else { + filteredBackendsNames = append(filteredBackendsNames, be.Name) + } + } + + cp.consensusGroupMux.Lock() + cp.consensusGroup = group + cp.consensusGroupMux.Unlock() + + RecordGroupConsensusLatestBlock(cp.backendGroup, proposedBlock) + RecordGroupConsensusSafeBlock(cp.backendGroup, lowestSafeBlock) + RecordGroupConsensusFinalizedBlock(cp.backendGroup, lowestFinalizedBlock) + + RecordGroupConsensusCount(cp.backendGroup, len(group)) + RecordGroupConsensusFilteredCount(cp.backendGroup, len(filteredBackendsNames)) + RecordGroupTotalCount(cp.backendGroup, len(cp.backendGroup.Backends)) + + log.Debug("group state", + "proposedBlock", proposedBlock, + "consensusBackends", strings.Join(consensusBackendsNames, ", "), + "filteredBackends", strings.Join(filteredBackendsNames, ", ")) +} + +// IsBanned checks if a specific backend is banned +func (cp *ConsensusPoller) IsBanned(be *Backend) bool { + bs := cp.backendState[be] + defer bs.backendStateMux.Unlock() + bs.backendStateMux.Lock() + return bs.IsBanned() +} + +// Ban bans a specific backend +func (cp *ConsensusPoller) Ban(be *Backend) { + if be.forcedCandidate { + return + } + + bs := cp.backendState[be] + defer bs.backendStateMux.Unlock() + bs.backendStateMux.Lock() + bs.bannedUntil = time.Now().Add(cp.banPeriod) + + // when we ban a node, we give it the chance to start from any block when it is back + bs.latestBlockNumber = 0 + bs.safeBlockNumber = 0 + bs.finalizedBlockNumber = 0 +} + +// Unban removes any bans from the backends +func (cp *ConsensusPoller) Unban(be *Backend) { + bs := cp.backendState[be] + defer bs.backendStateMux.Unlock() + bs.backendStateMux.Lock() + bs.bannedUntil = time.Now().Add(-10 * time.Hour) +} + +// Reset reset all backend states +func (cp *ConsensusPoller) Reset() { + for _, be := range cp.backendGroup.Backends { + cp.backendState[be] = &backendState{} + } +} + +// fetchBlock is a convenient wrapper to make a request to get a block directly from the backend +func (cp *ConsensusPoller) fetchBlock(ctx context.Context, be *Backend, block string) (blockNumber hexutil.Uint64, blockHash string, err error) { + var rpcRes RPCRes + err = be.ForwardRPC(ctx, &rpcRes, "67", "eth_getBlockByNumber", block, false) + if err != nil { + return 0, "", err + } + + jsonMap, ok := rpcRes.Result.(map[string]interface{}) + if !ok { + return 0, "", fmt.Errorf("unexpected response to eth_getBlockByNumber on backend %s", be.Name) + } + blockNumber = hexutil.Uint64(hexutil.MustDecodeUint64(jsonMap["number"].(string))) + blockHash = jsonMap["hash"].(string) + + return +} + +// getPeerCount is a convenient wrapper to retrieve the current peer count from the backend +func (cp *ConsensusPoller) getPeerCount(ctx context.Context, be *Backend) (count uint64, err error) { + var rpcRes RPCRes + err = be.ForwardRPC(ctx, &rpcRes, "67", "net_peerCount") + if err != nil { + return 0, err + } + + jsonMap, ok := rpcRes.Result.(string) + if !ok { + return 0, fmt.Errorf("unexpected response to net_peerCount on backend %s", be.Name) + } + + count = hexutil.MustDecodeUint64(jsonMap) + + return count, nil +} + +// isInSync is a convenient wrapper to check if the backend is in sync from the network +func (cp *ConsensusPoller) isInSync(ctx context.Context, be *Backend) (result bool, err error) { + var rpcRes RPCRes + err = be.ForwardRPC(ctx, &rpcRes, "67", "eth_syncing") + if err != nil { + return false, err + } + + var res bool + switch typed := rpcRes.Result.(type) { + case bool: + syncing := typed + res = !syncing + case string: + syncing, err := strconv.ParseBool(typed) + if err != nil { + return false, err + } + res = !syncing + default: + // result is a json when not in sync + res = false + } + + return res, nil +} + +// getBackendState creates a copy of backend state so that the caller can use it without locking +func (cp *ConsensusPoller) getBackendState(be *Backend) *backendState { + bs := cp.backendState[be] + defer bs.backendStateMux.Unlock() + bs.backendStateMux.Lock() + + return &backendState{ + latestBlockNumber: bs.latestBlockNumber, + latestBlockHash: bs.latestBlockHash, + safeBlockNumber: bs.safeBlockNumber, + finalizedBlockNumber: bs.finalizedBlockNumber, + peerCount: bs.peerCount, + inSync: bs.inSync, + lastUpdate: bs.lastUpdate, + bannedUntil: bs.bannedUntil, + } +} + +func (cp *ConsensusPoller) GetLastUpdate(be *Backend) time.Time { + bs := cp.backendState[be] + defer bs.backendStateMux.Unlock() + bs.backendStateMux.Lock() + return bs.lastUpdate +} + +func (cp *ConsensusPoller) setBackendState(be *Backend, peerCount uint64, inSync bool, + latestBlockNumber hexutil.Uint64, latestBlockHash string, + safeBlockNumber hexutil.Uint64, + finalizedBlockNumber hexutil.Uint64) bool { + bs := cp.backendState[be] + bs.backendStateMux.Lock() + changed := bs.latestBlockHash != latestBlockHash + bs.peerCount = peerCount + bs.inSync = inSync + bs.latestBlockNumber = latestBlockNumber + bs.latestBlockHash = latestBlockHash + bs.finalizedBlockNumber = finalizedBlockNumber + bs.safeBlockNumber = safeBlockNumber + bs.lastUpdate = time.Now() + bs.backendStateMux.Unlock() + return changed +} + +// getConsensusCandidates will search for candidates in the primary group, +// if there are none it will search for candidates in he fallback group +func (cp *ConsensusPoller) getConsensusCandidates() map[*Backend]*backendState { + + healthyPrimaries := cp.FilterCandidates(cp.backendGroup.Primaries()) + + RecordHealthyCandidates(cp.backendGroup, len(healthyPrimaries)) + if len(healthyPrimaries) > 0 { + return healthyPrimaries + } + + return cp.FilterCandidates(cp.backendGroup.Fallbacks()) +} + +// filterCandidates find out what backends are the candidates to be in the consensus group +// and create a copy of current their state +// +// a candidate is a serving node within the following conditions: +// - not banned +// - healthy (network latency and error rate) +// - with minimum peer count +// - in sync +// - updated recently +// - not lagging latest block +func (cp *ConsensusPoller) FilterCandidates(backends []*Backend) map[*Backend]*backendState { + + candidates := make(map[*Backend]*backendState, len(cp.backendGroup.Backends)) + + for _, be := range backends { + + bs := cp.getBackendState(be) + if be.forcedCandidate { + candidates[be] = bs + continue + } + if bs.IsBanned() { + continue + } + if !be.IsHealthy() { + continue + } + if !be.skipPeerCountCheck && bs.peerCount < cp.minPeerCount { + log.Debug("backend peer count too low for inclusion in consensus", + "backend_name", be.Name, + "peer_count", bs.peerCount, + "min_peer_count", cp.minPeerCount, + ) + continue + } + if !bs.inSync { + continue + } + if bs.lastUpdate.Add(cp.maxUpdateThreshold).Before(time.Now()) { + continue + } + + candidates[be] = bs + } + + // find the highest block, in order to use it defining the highest non-lagging ancestor block + var highestLatestBlock hexutil.Uint64 + for _, bs := range candidates { + if bs.latestBlockNumber > highestLatestBlock { + highestLatestBlock = bs.latestBlockNumber + } + } + + // find the highest common ancestor block + lagging := make([]*Backend, 0, len(candidates)) + for be, bs := range candidates { + // check if backend is lagging behind the highest block + if uint64(highestLatestBlock-bs.latestBlockNumber) > cp.maxBlockLag { + lagging = append(lagging, be) + } + } + + // remove lagging backends from the candidates + for _, be := range lagging { + delete(candidates, be) + } + + return candidates +} diff --git a/proxyd/consensus_tracker.go b/proxyd/consensus_tracker.go new file mode 100644 index 0000000..77e0fdb --- /dev/null +++ b/proxyd/consensus_tracker.go @@ -0,0 +1,356 @@ +package proxyd + +import ( + "context" + "encoding/json" + "fmt" + "os" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/go-redsync/redsync/v4" + "github.com/go-redsync/redsync/v4/redis/goredis/v9" + "github.com/redis/go-redis/v9" +) + +// ConsensusTracker abstracts how we store and retrieve the current consensus +// allowing it to be stored locally in-memory or in a shared Redis cluster +type ConsensusTracker interface { + GetLatestBlockNumber() hexutil.Uint64 + SetLatestBlockNumber(blockNumber hexutil.Uint64) + GetSafeBlockNumber() hexutil.Uint64 + SetSafeBlockNumber(blockNumber hexutil.Uint64) + GetFinalizedBlockNumber() hexutil.Uint64 + SetFinalizedBlockNumber(blockNumber hexutil.Uint64) +} + +// DTO to hold the current consensus state +type ConsensusTrackerState struct { + Latest hexutil.Uint64 `json:"latest"` + Safe hexutil.Uint64 `json:"safe"` + Finalized hexutil.Uint64 `json:"finalized"` +} + +func (ct *InMemoryConsensusTracker) update(o *ConsensusTrackerState) { + ct.mutex.Lock() + defer ct.mutex.Unlock() + + ct.state.Latest = o.Latest + ct.state.Safe = o.Safe + ct.state.Finalized = o.Finalized +} + +// InMemoryConsensusTracker store and retrieve in memory, async-safe +type InMemoryConsensusTracker struct { + mutex sync.Mutex + state *ConsensusTrackerState +} + +func NewInMemoryConsensusTracker() ConsensusTracker { + return &InMemoryConsensusTracker{ + mutex: sync.Mutex{}, + state: &ConsensusTrackerState{}, + } +} + +func (ct *InMemoryConsensusTracker) Valid() bool { + return ct.GetLatestBlockNumber() > 0 && + ct.GetSafeBlockNumber() > 0 && + ct.GetFinalizedBlockNumber() > 0 +} + +func (ct *InMemoryConsensusTracker) Behind(other *InMemoryConsensusTracker) bool { + return ct.GetLatestBlockNumber() < other.GetLatestBlockNumber() || + ct.GetSafeBlockNumber() < other.GetSafeBlockNumber() || + ct.GetFinalizedBlockNumber() < other.GetFinalizedBlockNumber() +} + +func (ct *InMemoryConsensusTracker) GetLatestBlockNumber() hexutil.Uint64 { + defer ct.mutex.Unlock() + ct.mutex.Lock() + + return ct.state.Latest +} + +func (ct *InMemoryConsensusTracker) SetLatestBlockNumber(blockNumber hexutil.Uint64) { + defer ct.mutex.Unlock() + ct.mutex.Lock() + + ct.state.Latest = blockNumber +} + +func (ct *InMemoryConsensusTracker) GetSafeBlockNumber() hexutil.Uint64 { + defer ct.mutex.Unlock() + ct.mutex.Lock() + + return ct.state.Safe +} + +func (ct *InMemoryConsensusTracker) SetSafeBlockNumber(blockNumber hexutil.Uint64) { + defer ct.mutex.Unlock() + ct.mutex.Lock() + + ct.state.Safe = blockNumber +} + +func (ct *InMemoryConsensusTracker) GetFinalizedBlockNumber() hexutil.Uint64 { + defer ct.mutex.Unlock() + ct.mutex.Lock() + + return ct.state.Finalized +} + +func (ct *InMemoryConsensusTracker) SetFinalizedBlockNumber(blockNumber hexutil.Uint64) { + defer ct.mutex.Unlock() + ct.mutex.Lock() + + ct.state.Finalized = blockNumber +} + +// RedisConsensusTracker store and retrieve in a shared Redis cluster, with leader election +type RedisConsensusTracker struct { + ctx context.Context + client *redis.Client + namespace string + backendGroup *BackendGroup + + redlock *redsync.Mutex + lockPeriod time.Duration + heartbeatInterval time.Duration + + leader bool + leaderName string + + // holds the state collected by local pollers + local *InMemoryConsensusTracker + + // holds a copy of the remote shared state + // when leader, updates the remote with the local state + remote *InMemoryConsensusTracker +} + +type RedisConsensusTrackerOpt func(cp *RedisConsensusTracker) + +func WithLockPeriod(lockPeriod time.Duration) RedisConsensusTrackerOpt { + return func(ct *RedisConsensusTracker) { + ct.lockPeriod = lockPeriod + } +} + +func WithHeartbeatInterval(heartbeatInterval time.Duration) RedisConsensusTrackerOpt { + return func(ct *RedisConsensusTracker) { + ct.heartbeatInterval = heartbeatInterval + } +} +func NewRedisConsensusTracker(ctx context.Context, + redisClient *redis.Client, + bg *BackendGroup, + namespace string, + opts ...RedisConsensusTrackerOpt) ConsensusTracker { + + tracker := &RedisConsensusTracker{ + ctx: ctx, + client: redisClient, + backendGroup: bg, + namespace: namespace, + + lockPeriod: 30 * time.Second, + heartbeatInterval: 2 * time.Second, + local: NewInMemoryConsensusTracker().(*InMemoryConsensusTracker), + remote: NewInMemoryConsensusTracker().(*InMemoryConsensusTracker), + } + + for _, opt := range opts { + opt(tracker) + } + + return tracker +} + +func (ct *RedisConsensusTracker) Init() { + go func() { + for { + timer := time.NewTimer(ct.heartbeatInterval) + ct.stateHeartbeat() + + select { + case <-timer.C: + continue + case <-ct.ctx.Done(): + timer.Stop() + return + } + } + }() +} + +func (ct *RedisConsensusTracker) stateHeartbeat() { + pool := goredis.NewPool(ct.client) + rs := redsync.New(pool) + key := ct.key("mutex") + + val, err := ct.client.Get(ct.ctx, key).Result() + if err != nil && err != redis.Nil { + log.Error("failed to read the lock", "err", err) + RecordGroupConsensusError(ct.backendGroup, "read_lock", err) + if ct.leader { + ok, err := ct.redlock.Unlock() + if err != nil || !ok { + log.Error("failed to release the lock after error", "err", err) + RecordGroupConsensusError(ct.backendGroup, "leader_release_lock", err) + return + } + ct.leader = false + } + return + } + if val != "" { + if ct.leader { + log.Debug("extending lock") + ok, err := ct.redlock.Extend() + if err != nil || !ok { + log.Error("failed to extend lock", "err", err, "mutex", ct.redlock.Name(), "val", ct.redlock.Value()) + RecordGroupConsensusError(ct.backendGroup, "leader_extend_lock", err) + ok, err := ct.redlock.Unlock() + if err != nil || !ok { + log.Error("failed to release the lock after error", "err", err) + RecordGroupConsensusError(ct.backendGroup, "leader_release_lock", err) + return + } + ct.leader = false + return + } + ct.postPayload(val) + } else { + // retrieve current leader + leaderName, err := ct.client.Get(ct.ctx, ct.key(fmt.Sprintf("leader:%s", val))).Result() + if err != nil && err != redis.Nil { + log.Error("failed to read the remote leader", "err", err) + RecordGroupConsensusError(ct.backendGroup, "read_leader", err) + return + } + ct.leaderName = leaderName + log.Debug("following", "val", val, "leader", leaderName) + // retrieve payload + val, err := ct.client.Get(ct.ctx, ct.key(fmt.Sprintf("state:%s", val))).Result() + if err != nil && err != redis.Nil { + log.Error("failed to read the remote state", "err", err) + RecordGroupConsensusError(ct.backendGroup, "read_state", err) + return + } + if val == "" { + log.Error("remote state is missing (recent leader election maybe?)") + RecordGroupConsensusError(ct.backendGroup, "read_state_missing", err) + return + } + state := &ConsensusTrackerState{} + err = json.Unmarshal([]byte(val), state) + if err != nil { + log.Error("failed to unmarshal the remote state", "err", err) + RecordGroupConsensusError(ct.backendGroup, "read_unmarshal_state", err) + return + } + + ct.remote.update(state) + log.Debug("updated state from remote", "state", val, "leader", leaderName) + + RecordGroupConsensusHALatestBlock(ct.backendGroup, leaderName, ct.remote.state.Latest) + RecordGroupConsensusHASafeBlock(ct.backendGroup, leaderName, ct.remote.state.Safe) + RecordGroupConsensusHAFinalizedBlock(ct.backendGroup, leaderName, ct.remote.state.Finalized) + } + } else { + if !ct.local.Valid() { + log.Warn("local state is not valid or behind remote, skipping") + return + } + if ct.remote.Valid() && ct.local.Behind(ct.remote) { + log.Warn("local state is behind remote, skipping") + return + } + + log.Info("lock not found, creating a new one") + + mutex := rs.NewMutex(key, + redsync.WithExpiry(ct.lockPeriod), + redsync.WithFailFast(true), + redsync.WithTries(1)) + + // nosemgrep: missing-unlock-before-return + // this lock is hold indefinitely, and it is extended until the leader dies + if err := mutex.Lock(); err != nil { + log.Debug("failed to obtain lock", "err", err) + ct.leader = false + return + } + + log.Info("lock acquired", "mutex", mutex.Name(), "val", mutex.Value()) + ct.redlock = mutex + ct.leader = true + ct.postPayload(mutex.Value()) + } +} + +func (ct *RedisConsensusTracker) key(tag string) string { + return fmt.Sprintf("consensus:%s:%s", ct.namespace, tag) +} + +func (ct *RedisConsensusTracker) GetLatestBlockNumber() hexutil.Uint64 { + return ct.remote.GetLatestBlockNumber() +} + +func (ct *RedisConsensusTracker) SetLatestBlockNumber(blockNumber hexutil.Uint64) { + ct.local.SetLatestBlockNumber(blockNumber) +} + +func (ct *RedisConsensusTracker) GetSafeBlockNumber() hexutil.Uint64 { + return ct.remote.GetSafeBlockNumber() +} + +func (ct *RedisConsensusTracker) SetSafeBlockNumber(blockNumber hexutil.Uint64) { + ct.local.SetSafeBlockNumber(blockNumber) +} + +func (ct *RedisConsensusTracker) GetFinalizedBlockNumber() hexutil.Uint64 { + return ct.remote.GetFinalizedBlockNumber() +} + +func (ct *RedisConsensusTracker) SetFinalizedBlockNumber(blockNumber hexutil.Uint64) { + ct.local.SetFinalizedBlockNumber(blockNumber) +} + +func (ct *RedisConsensusTracker) postPayload(mutexVal string) { + jsonState, err := json.Marshal(ct.local.state) + if err != nil { + log.Error("failed to marshal local", "err", err) + RecordGroupConsensusError(ct.backendGroup, "leader_marshal_local_state", err) + ct.leader = false + return + } + err = ct.client.Set(ct.ctx, ct.key(fmt.Sprintf("state:%s", mutexVal)), jsonState, ct.lockPeriod).Err() + if err != nil { + log.Error("failed to post the state", "err", err) + RecordGroupConsensusError(ct.backendGroup, "leader_post_state", err) + ct.leader = false + return + } + + leader, _ := os.LookupEnv("HOSTNAME") + err = ct.client.Set(ct.ctx, ct.key(fmt.Sprintf("leader:%s", mutexVal)), leader, ct.lockPeriod).Err() + if err != nil { + log.Error("failed to post the leader", "err", err) + RecordGroupConsensusError(ct.backendGroup, "leader_post_leader", err) + ct.leader = false + return + } + + log.Debug("posted state", "state", string(jsonState), "leader", leader) + + ct.leaderName = leader + ct.remote.update(ct.local.state) + + RecordGroupConsensusHALatestBlock(ct.backendGroup, leader, ct.remote.state.Latest) + RecordGroupConsensusHASafeBlock(ct.backendGroup, leader, ct.remote.state.Safe) + RecordGroupConsensusHAFinalizedBlock(ct.backendGroup, leader, ct.remote.state.Finalized) +} diff --git a/proxyd/entrypoint.sh b/proxyd/entrypoint.sh new file mode 100644 index 0000000..ef83fa8 --- /dev/null +++ b/proxyd/entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +echo "Updating CA certificates." +update-ca-certificates +echo "Running CMD." +exec "$@" \ No newline at end of file diff --git a/proxyd/errors.go b/proxyd/errors.go new file mode 100644 index 0000000..51f8df6 --- /dev/null +++ b/proxyd/errors.go @@ -0,0 +1,7 @@ +package proxyd + +import "fmt" + +func wrapErr(err error, msg string) error { + return fmt.Errorf("%s %w", msg, err) +} diff --git a/proxyd/example.config.toml b/proxyd/example.config.toml new file mode 100644 index 0000000..b54b342 --- /dev/null +++ b/proxyd/example.config.toml @@ -0,0 +1,123 @@ +# List of WS methods to whitelist. +ws_method_whitelist = [ + "eth_subscribe", + "eth_call", + "eth_chainId" +] +# Enable WS on this backend group. There can only be one WS-enabled backend group. +ws_backend_group = "main" + +[server] +# Host for the proxyd RPC server to listen on. +rpc_host = "0.0.0.0" +# Port for the above. +rpc_port = 8080 +# Host for the proxyd WS server to listen on. +ws_host = "0.0.0.0" +# Port for the above +# Set the ws_port to 0 to disable WS +ws_port = 8085 +# Maximum client body size, in bytes, that the server will accept. +max_body_size_bytes = 10485760 +max_concurrent_rpcs = 1000 +# Server log level +log_level = "info" + +[redis] +# URL to a Redis instance. +url = "redis://localhost:6379" + +[metrics] +# Whether or not to enable Prometheus metrics. +enabled = true +# Host for the Prometheus metrics endpoint to listen on. +host = "0.0.0.0" +# Port for the above. +port = 9761 + +[backend] +# How long proxyd should wait for a backend response before timing out. +response_timeout_seconds = 5 +# Maximum response size, in bytes, that proxyd will accept from a backend. +max_response_size_bytes = 5242880 +# Maximum number of times proxyd will try a backend before giving up. +max_retries = 3 +# Number of seconds to wait before trying an unhealthy backend again. +out_of_service_seconds = 600 +# Maximum latency accepted to serve requests, default 10s +max_latency_threshold = "30s" +# Maximum latency accepted to serve requests before degraded, default 5s +max_degraded_latency_threshold = "10s" +# Maximum error rate accepted to serve requests, default 0.5 (i.e. 50%) +max_error_rate_threshold = 0.3 + +[backends] +# A map of backends by name. +[backends.infura] +# The URL to contact the backend at. Will be read from the environment +# if an environment variable prefixed with $ is provided. +rpc_url = "" +# The WS URL to contact the backend at. Will be read from the environment +# if an environment variable prefixed with $ is provided. +ws_url = "" +username = "" +# An HTTP Basic password to authenticate with the backend. Will be read from +# the environment if an environment variable prefixed with $ is provided. +password = "" +max_rps = 3 +max_ws_conns = 1 +# Path to a custom root CA. +ca_file = "" +# Path to a custom client cert file. +client_cert_file = "" +# Path to a custom client key file. +client_key_file = "" +# Allows backends to skip peer count checking, default false +# consensus_skip_peer_count = true +# Specified the target method to get receipts, default "debug_getRawReceipts" +# See https://github.com/ethereum-optimism/optimism/blob/186e46a47647a51a658e699e9ff047d39444c2de/op-node/sources/receipts.go#L186-L253 +consensus_receipts_target = "eth_getBlockReceipts" + +[backends.alchemy] +rpc_url = "" +ws_url = "" +username = "" +password = "" +max_rps = 3 +max_ws_conns = 1 +consensus_receipts_target = "alchemy_getTransactionReceipts" + +[backend_groups] +[backend_groups.main] +backends = ["infura"] +# Enable consensus awareness for backend group, making it act as a load balancer, default false +# consensus_aware = true +# Period in which the backend wont serve requests if banned, default 5m +# consensus_ban_period = "1m" +# Maximum delay for update the backend, default 30s +# consensus_max_update_threshold = "20s" +# Maximum block lag, default 8 +# consensus_max_block_lag = 16 +# Maximum block range (for eth_getLogs method), no default +# consensus_max_block_range = 20000 +# Minimum peer count, default 3 +# consensus_min_peer_count = 4 + +[backend_groups.alchemy] +backends = ["alchemy"] + +# If the authentication group below is in the config, +# proxyd will only accept authenticated requests. +[authentication] +# Mapping of auth key to alias. The alias is used to provide a human- +# readable name for the auth key in monitoring. The auth key will be +# read from the environment if an environment variable prefixed with $ +# is provided. Note that you will need to quote the environment variable +# in order for it to be value TOML, e.g. "$FOO_AUTH_KEY" = "foo_alias". +secret = "test" + +# Mapping of methods to backend groups. +[rpc_method_mappings] +eth_call = "main" +eth_chainId = "main" +eth_blockNumber = "alchemy" diff --git a/proxyd/frontend_rate_limiter.go b/proxyd/frontend_rate_limiter.go new file mode 100644 index 0000000..d0590f0 --- /dev/null +++ b/proxyd/frontend_rate_limiter.go @@ -0,0 +1,139 @@ +package proxyd + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/redis/go-redis/v9" +) + +type FrontendRateLimiter interface { + // Take consumes a key, and a maximum number of requests + // per time interval. It returns a boolean denoting if + // the limit could be taken, or an error if a failure + // occurred in the backing rate limit implementation. + // + // No error will be returned if the limit could not be taken + // as a result of the requestor being over the limit. + Take(ctx context.Context, key string) (bool, error) +} + +// limitedKeys is a wrapper around a map that stores a truncated +// timestamp and a mutex. The map is used to keep track of rate +// limit keys, and their used limits. +type limitedKeys struct { + truncTS int64 + keys map[string]int + mtx sync.Mutex +} + +func newLimitedKeys(t int64) *limitedKeys { + return &limitedKeys{ + truncTS: t, + keys: make(map[string]int), + } +} + +func (l *limitedKeys) Take(key string, max int) bool { + l.mtx.Lock() + defer l.mtx.Unlock() + val, ok := l.keys[key] + if !ok { + l.keys[key] = 0 + val = 0 + } + l.keys[key] = val + 1 + return val < max +} + +// MemoryFrontendRateLimiter is a rate limiter that stores +// all rate limiting information in local memory. It works +// by storing a limitedKeys struct that references the +// truncated timestamp at which the struct was created. If +// the current truncated timestamp doesn't match what's +// referenced, the limit is reset. Otherwise, values in +// a map are incremented to represent the limit. +type MemoryFrontendRateLimiter struct { + currGeneration *limitedKeys + dur time.Duration + max int + mtx sync.Mutex +} + +func NewMemoryFrontendRateLimit(dur time.Duration, max int) FrontendRateLimiter { + return &MemoryFrontendRateLimiter{ + dur: dur, + max: max, + } +} + +func (m *MemoryFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) { + m.mtx.Lock() + // Create truncated timestamp + truncTS := truncateNow(m.dur) + + // If there is no current rate limit map or the rate limit map reference + // a different timestamp, reset limits. + if m.currGeneration == nil || m.currGeneration.truncTS != truncTS { + m.currGeneration = newLimitedKeys(truncTS) + } + + // Pull out the limiter so we can unlock before incrementing the limit. + limiter := m.currGeneration + + m.mtx.Unlock() + + return limiter.Take(key, m.max), nil +} + +// RedisFrontendRateLimiter is a rate limiter that stores data in Redis. +// It uses the basic rate limiter pattern described on the Redis best +// practices website: https://redis.com/redis-best-practices/basic-rate-limiting/. +type RedisFrontendRateLimiter struct { + r *redis.Client + dur time.Duration + max int + prefix string +} + +func NewRedisFrontendRateLimiter(r *redis.Client, dur time.Duration, max int, prefix string) FrontendRateLimiter { + return &RedisFrontendRateLimiter{ + r: r, + dur: dur, + max: max, + prefix: prefix, + } +} + +func (r *RedisFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) { + var incr *redis.IntCmd + truncTS := truncateNow(r.dur) + fullKey := fmt.Sprintf("rate_limit:%s:%s:%d", r.prefix, key, truncTS) + _, err := r.r.Pipelined(ctx, func(pipe redis.Pipeliner) error { + incr = pipe.Incr(ctx, fullKey) + pipe.PExpire(ctx, fullKey, r.dur-time.Millisecond) + return nil + }) + if err != nil { + frontendRateLimitTakeErrors.Inc() + return false, err + } + + return incr.Val()-1 < int64(r.max), nil +} + +type noopFrontendRateLimiter struct{} + +var NoopFrontendRateLimiter = &noopFrontendRateLimiter{} + +func (n *noopFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) { + return true, nil +} + +// truncateNow truncates the current timestamp +// to the specified duration. +func truncateNow(dur time.Duration) int64 { + return time.Now().Truncate(dur).Unix() +} diff --git a/proxyd/frontend_rate_limiter_test.go b/proxyd/frontend_rate_limiter_test.go new file mode 100644 index 0000000..fb5f808 --- /dev/null +++ b/proxyd/frontend_rate_limiter_test.go @@ -0,0 +1,53 @@ +package proxyd + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/alicebob/miniredis" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" +) + +func TestFrontendRateLimiter(t *testing.T) { + redisServer, err := miniredis.Run() + require.NoError(t, err) + defer redisServer.Close() + + redisClient := redis.NewClient(&redis.Options{ + Addr: fmt.Sprintf("127.0.0.1:%s", redisServer.Port()), + }) + + max := 2 + lims := []struct { + name string + frl FrontendRateLimiter + }{ + {"memory", NewMemoryFrontendRateLimit(2*time.Second, max)}, + {"redis", NewRedisFrontendRateLimiter(redisClient, 2*time.Second, max, "")}, + } + + for _, cfg := range lims { + frl := cfg.frl + ctx := context.Background() + t.Run(cfg.name, func(t *testing.T) { + for i := 0; i < 4; i++ { + ok, err := frl.Take(ctx, "foo") + require.NoError(t, err) + require.Equal(t, i < max, ok) + ok, err = frl.Take(ctx, "bar") + require.NoError(t, err) + require.Equal(t, i < max, ok) + } + time.Sleep(2 * time.Second) + for i := 0; i < 4; i++ { + ok, _ := frl.Take(ctx, "foo") + require.Equal(t, i < max, ok) + ok, _ = frl.Take(ctx, "bar") + require.Equal(t, i < max, ok) + } + }) + } +} diff --git a/proxyd/go.mod b/proxyd/go.mod new file mode 100644 index 0000000..088bf9b --- /dev/null +++ b/proxyd/go.mod @@ -0,0 +1,86 @@ +module github.com/ethereum-optimism/optimism/proxyd + +go 1.21 + +require ( + github.com/BurntSushi/toml v1.3.2 + github.com/alicebob/miniredis v2.5.0+incompatible + github.com/emirpasic/gods v1.18.1 + github.com/ethereum/go-ethereum v1.13.15 + github.com/go-redsync/redsync/v4 v4.10.0 + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb + github.com/gorilla/mux v1.8.0 + github.com/gorilla/websocket v1.5.0 + github.com/hashicorp/golang-lru v1.0.2 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.17.0 + github.com/redis/go-redis/v9 v9.2.1 + github.com/rs/cors v1.10.1 + github.com/stretchr/testify v1.8.4 + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/sync v0.5.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/DataDog/zstd v1.5.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/VictoriaMetrics/fastcache v1.12.1 // indirect + github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/consensys/bavard v0.1.13 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/deckarep/golang-set/v2 v2.3.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect + github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect + github.com/getsentry/sentry-go v0.25.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/gomodule/redigo v1.8.9 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.2.4 // indirect + github.com/klauspost/compress v1.17.1 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect + github.com/supranational/blst v0.3.11 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yuin/gopher-lua v1.1.0 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.15.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + rsc.io/tmplfunc v0.0.3 // indirect +) diff --git a/proxyd/go.sum b/proxyd/go.sum new file mode 100644 index 0000000..11a684f --- /dev/null +++ b/proxyd/go.sum @@ -0,0 +1,290 @@ +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= +github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360 h1:x1dzGu9e1FYmkG8mL9emtdWD1EzH/17SijnoLvKvPiM= +github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A= +github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.13.15 h1:U7sSGYGo4SPjP6iNIifNoyIAiNjrmQkz6EwQG+/EZWo= +github.com/ethereum/go-ethereum v1.13.15/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= +github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI= +github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= +github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= +github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= +github.com/go-redsync/redsync/v4 v4.10.0 h1:hTeAak4C73mNBQSTq6KCKDFaiIlfC+z5yTTl8fCJuBs= +github.com/go-redsync/redsync/v4 v4.10.0/go.mod h1:ZfayzutkgeBmEmBlUR3j+rF6kN44UUGtEdfzhBFZTPc= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= +github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= +github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= +github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/redis/go-redis/v9 v9.2.1 h1:WlYJg71ODF0dVspZZCpYmoF1+U1Jjk9Rwd7pq6QmlCg= +github.com/redis/go-redis/v9 v9.2.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo= +github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= +github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM= +github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a h1:WS5nQycV+82Ndezq0UcMcGVG416PZgcJPqI/bLM824A= +github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a/go.mod h1:0KAUfC65le2kMu4fnBxm7Xj3PkQ3MBpJbF5oMmqufBc= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= +github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/proxyd/integration_tests/batch_timeout_test.go b/proxyd/integration_tests/batch_timeout_test.go new file mode 100644 index 0000000..4906c1d --- /dev/null +++ b/proxyd/integration_tests/batch_timeout_test.go @@ -0,0 +1,42 @@ +package integration_tests + +import ( + "net/http" + "os" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/stretchr/testify/require" +) + +const ( + batchTimeoutResponse = `{"error":{"code":-32015,"message":"gateway timeout"},"id":null,"jsonrpc":"2.0"}` +) + +func TestBatchTimeout(t *testing.T) { + slowBackend := NewMockBackend(nil) + defer slowBackend.Close() + + require.NoError(t, os.Setenv("SLOW_BACKEND_RPC_URL", slowBackend.URL())) + + config := ReadConfig("batch_timeout") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + slowBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // check the config. The sleep duration should be at least double the server.timeout_seconds config to prevent flakes + time.Sleep(time.Second * 2) + BatchedResponseHandler(200, goodResponse)(w, r) + })) + res, statusCode, err := client.SendBatchRPC( + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("1", "eth_chainId", nil), + ) + require.NoError(t, err) + require.Equal(t, 504, statusCode) + RequireEqualJSON(t, []byte(batchTimeoutResponse), res) + require.Equal(t, 1, len(slowBackend.Requests())) +} diff --git a/proxyd/integration_tests/batching_test.go b/proxyd/integration_tests/batching_test.go new file mode 100644 index 0000000..c1f8b38 --- /dev/null +++ b/proxyd/integration_tests/batching_test.go @@ -0,0 +1,188 @@ +package integration_tests + +import ( + "net/http" + "os" + "testing" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/stretchr/testify/require" +) + +func TestBatching(t *testing.T) { + config := ReadConfig("batching") + + chainIDResponse1 := `{"jsonrpc": "2.0", "result": "hello1", "id": 1}` + chainIDResponse2 := `{"jsonrpc": "2.0", "result": "hello2", "id": 2}` + chainIDResponse3 := `{"jsonrpc": "2.0", "result": "hello3", "id": 3}` + netVersionResponse1 := `{"jsonrpc": "2.0", "result": "1.0", "id": 1}` + callResponse1 := `{"jsonrpc": "2.0", "result": "ekans1", "id": 1}` + + ethAccountsResponse2 := `{"jsonrpc": "2.0", "result": [], "id": 2}` + + backendResTooLargeResponse1 := `{"error":{"code":-32020,"message":"backend response too large"},"id":1,"jsonrpc":"2.0"}` + backendResTooLargeResponse2 := `{"error":{"code":-32020,"message":"backend response too large"},"id":2,"jsonrpc":"2.0"}` + + type mockResult struct { + method string + id string + result interface{} + } + + chainIDMock1 := mockResult{"eth_chainId", "1", "hello1"} + chainIDMock2 := mockResult{"eth_chainId", "2", "hello2"} + chainIDMock3 := mockResult{"eth_chainId", "3", "hello3"} + netVersionMock1 := mockResult{"net_version", "1", "1.0"} + callMock1 := mockResult{"eth_call", "1", "ekans1"} + + tests := []struct { + name string + handler http.Handler + mocks []mockResult + reqs []*proxyd.RPCReq + expectedRes string + maxUpstreamBatchSize int + numExpectedForwards int + maxResponseSizeBytes int64 + }{ + { + name: "backend returns batches out of order", + mocks: []mockResult{chainIDMock1, chainIDMock2, chainIDMock3}, + reqs: []*proxyd.RPCReq{ + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("2", "eth_chainId", nil), + NewRPCReq("3", "eth_chainId", nil), + }, + expectedRes: asArray(chainIDResponse1, chainIDResponse2, chainIDResponse3), + maxUpstreamBatchSize: 2, + numExpectedForwards: 2, + }, + { + // infura behavior + name: "backend returns single RPC response object as error", + handler: SingleResponseHandler(500, `{"jsonrpc":"2.0","error":{"code":-32001,"message":"internal server error"},"id":1}`), + reqs: []*proxyd.RPCReq{ + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("2", "eth_chainId", nil), + }, + expectedRes: asArray( + `{"error":{"code":-32011,"message":"no backends available for method"},"id":1,"jsonrpc":"2.0"}`, + `{"error":{"code":-32011,"message":"no backends available for method"},"id":2,"jsonrpc":"2.0"}`, + ), + maxUpstreamBatchSize: 10, + numExpectedForwards: 1, + }, + { + name: "backend returns single RPC response object for minibatches", + handler: SingleResponseHandler(500, `{"jsonrpc":"2.0","error":{"code":-32001,"message":"internal server error"},"id":1}`), + reqs: []*proxyd.RPCReq{ + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("2", "eth_chainId", nil), + }, + expectedRes: asArray( + `{"error":{"code":-32011,"message":"no backends available for method"},"id":1,"jsonrpc":"2.0"}`, + `{"error":{"code":-32011,"message":"no backends available for method"},"id":2,"jsonrpc":"2.0"}`, + ), + maxUpstreamBatchSize: 1, + numExpectedForwards: 2, + }, + { + name: "duplicate request ids are on distinct batches", + mocks: []mockResult{ + netVersionMock1, + chainIDMock2, + chainIDMock1, + callMock1, + }, + reqs: []*proxyd.RPCReq{ + NewRPCReq("1", "net_version", nil), + NewRPCReq("2", "eth_chainId", nil), + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("1", "eth_call", nil), + }, + expectedRes: asArray(netVersionResponse1, chainIDResponse2, chainIDResponse1, callResponse1), + maxUpstreamBatchSize: 2, + numExpectedForwards: 3, + }, + { + name: "over max size", + mocks: []mockResult{}, + reqs: []*proxyd.RPCReq{ + NewRPCReq("1", "net_version", nil), + NewRPCReq("2", "eth_chainId", nil), + NewRPCReq("3", "eth_chainId", nil), + NewRPCReq("4", "eth_call", nil), + NewRPCReq("5", "eth_call", nil), + NewRPCReq("6", "eth_call", nil), + }, + expectedRes: "{\"error\":{\"code\":-32014,\"message\":\"over batch size custom message\"},\"id\":null,\"jsonrpc\":\"2.0\"}", + maxUpstreamBatchSize: 2, + numExpectedForwards: 0, + }, + { + name: "eth_accounts does not get forwarded", + mocks: []mockResult{ + callMock1, + }, + reqs: []*proxyd.RPCReq{ + NewRPCReq("1", "eth_call", nil), + NewRPCReq("2", "eth_accounts", nil), + }, + expectedRes: asArray(callResponse1, ethAccountsResponse2), + maxUpstreamBatchSize: 2, + numExpectedForwards: 1, + }, + { + name: "large upstream response gets dropped", + mocks: []mockResult{chainIDMock1, chainIDMock2}, + reqs: []*proxyd.RPCReq{ + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("2", "eth_chainId", nil), + }, + expectedRes: asArray(backendResTooLargeResponse1, backendResTooLargeResponse2), + maxUpstreamBatchSize: 2, + numExpectedForwards: 1, + maxResponseSizeBytes: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config.Server.MaxUpstreamBatchSize = tt.maxUpstreamBatchSize + config.BackendOptions.MaxResponseSizeBytes = tt.maxResponseSizeBytes + + handler := tt.handler + if handler == nil { + router := NewBatchRPCResponseRouter() + for _, mock := range tt.mocks { + router.SetRoute(mock.method, mock.id, mock.result) + } + handler = router + } + + goodBackend := NewMockBackend(handler) + defer goodBackend.Close() + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + res, statusCode, err := client.SendBatchRPC(tt.reqs...) + require.NoError(t, err) + require.Equal(t, http.StatusOK, statusCode) + RequireEqualJSON(t, []byte(tt.expectedRes), res) + + if tt.numExpectedForwards != 0 { + require.Equal(t, tt.numExpectedForwards, len(goodBackend.Requests())) + } + + if handler, ok := handler.(*BatchRPCResponseRouter); ok { + for i, mock := range tt.mocks { + require.Equal(t, 1, handler.GetNumCalls(mock.method, mock.id), i) + } + } + }) + } +} diff --git a/proxyd/integration_tests/caching_test.go b/proxyd/integration_tests/caching_test.go new file mode 100644 index 0000000..e74b85b --- /dev/null +++ b/proxyd/integration_tests/caching_test.go @@ -0,0 +1,275 @@ +package integration_tests + +import ( + "bytes" + "fmt" + "os" + "testing" + "time" + + "github.com/alicebob/miniredis" + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/stretchr/testify/require" +) + +func TestCaching(t *testing.T) { + redis, err := miniredis.Run() + require.NoError(t, err) + defer redis.Close() + + hdlr := NewBatchRPCResponseRouter() + /* cacheable */ + hdlr.SetRoute("eth_chainId", "999", "0x420") + hdlr.SetRoute("net_version", "999", "0x1234") + hdlr.SetRoute("eth_getBlockTransactionCountByHash", "999", "eth_getBlockTransactionCountByHash") + hdlr.SetRoute("eth_getBlockByHash", "999", "eth_getBlockByHash") + hdlr.SetRoute("eth_getTransactionByHash", "999", "eth_getTransactionByHash") + hdlr.SetRoute("eth_getTransactionByBlockHashAndIndex", "999", "eth_getTransactionByBlockHashAndIndex") + hdlr.SetRoute("eth_getUncleByBlockHashAndIndex", "999", "eth_getUncleByBlockHashAndIndex") + hdlr.SetRoute("eth_getTransactionReceipt", "999", "eth_getTransactionReceipt") + hdlr.SetRoute("debug_getRawReceipts", "999", "debug_getRawReceipts") + /* not cacheable */ + hdlr.SetRoute("eth_getBlockByNumber", "999", "eth_getBlockByNumber") + hdlr.SetRoute("eth_blockNumber", "999", "eth_blockNumber") + hdlr.SetRoute("eth_call", "999", "eth_call") + + backend := NewMockBackend(hdlr) + defer backend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL())) + require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port()))) + config := ReadConfig("caching") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + // allow time for the block number fetcher to fire + time.Sleep(1500 * time.Millisecond) + + tests := []struct { + method string + params []interface{} + response string + backendCalls int + }{ + /* cacheable */ + { + "eth_chainId", + nil, + "{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 999}", + 1, + }, + { + "net_version", + nil, + "{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 999}", + 1, + }, + { + "eth_getBlockTransactionCountByHash", + []interface{}{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockTransactionCountByHash\", \"id\": 999}", + 1, + }, + { + "eth_getBlockByHash", + []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByHash\", \"id\": 999}", + 1, + }, + { + "eth_getTransactionByBlockHashAndIndex", + []interface{}{"0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331", "0x55"}, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionByBlockHashAndIndex\", \"id\": 999}", + 1, + }, + { + "eth_getUncleByBlockHashAndIndex", + []interface{}{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", "0x90"}, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getUncleByBlockHashAndIndex\", \"id\": 999}", + 1, + }, + /* not cacheable */ + { + "eth_getBlockByNumber", + []interface{}{ + "0x1", + true, + }, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByNumber\", \"id\": 999}", + 2, + }, + { + "eth_getTransactionReceipt", + []interface{}{"0x85d995eba9763907fdf35cd2034144dd9d53ce32cbec21349d4b12823c6860c5"}, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionReceipt\", \"id\": 999}", + 2, + }, + { + "eth_getTransactionByHash", + []interface{}{"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b"}, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionByHash\", \"id\": 999}", + 2, + }, + { + "eth_call", + []interface{}{ + struct { + To string `json:"to"` + }{ + "0x1234", + }, + "0x60", + }, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}", + 2, + }, + { + "eth_blockNumber", + nil, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_blockNumber\", \"id\": 999}", + 2, + }, + { + "eth_call", + []interface{}{ + struct { + To string `json:"to"` + }{ + "0x1234", + }, + "latest", + }, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}", + 2, + }, + { + "eth_call", + []interface{}{ + struct { + To string `json:"to"` + }{ + "0x1234", + }, + "pending", + }, + "{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}", + 2, + }, + } + for _, tt := range tests { + t.Run(tt.method, func(t *testing.T) { + resRaw, _, err := client.SendRPC(tt.method, tt.params) + require.NoError(t, err) + resCache, _, err := client.SendRPC(tt.method, tt.params) + require.NoError(t, err) + RequireEqualJSON(t, []byte(tt.response), resCache) + RequireEqualJSON(t, resRaw, resCache) + require.Equal(t, tt.backendCalls, countRequests(backend, tt.method)) + backend.Reset() + }) + } + + t.Run("nil responses should not be cached", func(t *testing.T) { + hdlr.SetRoute("eth_getBlockByHash", "999", nil) + resRaw, _, err := client.SendRPC("eth_getBlockByHash", []interface{}{"0x123"}) + require.NoError(t, err) + resCache, _, err := client.SendRPC("eth_getBlockByHash", []interface{}{"0x123"}) + require.NoError(t, err) + RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":null}"), resRaw) + RequireEqualJSON(t, resRaw, resCache) + require.Equal(t, 2, countRequests(backend, "eth_getBlockByHash")) + }) + + t.Run("debug_getRawReceipts with 0 receipts should not be cached", func(t *testing.T) { + backend.Reset() + hdlr.SetRoute("debug_getRawReceipts", "999", []string{}) + resRaw, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560ff"}) + require.NoError(t, err) + resCache, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560ff"}) + require.NoError(t, err) + RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":[]}"), resRaw) + RequireEqualJSON(t, resRaw, resCache) + require.Equal(t, 2, countRequests(backend, "debug_getRawReceipts")) + }) + + t.Run("debug_getRawReceipts with more than 0 receipts should be cached", func(t *testing.T) { + backend.Reset() + hdlr.SetRoute("debug_getRawReceipts", "999", []string{"a"}) + resRaw, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560bb"}) + require.NoError(t, err) + resCache, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560bb"}) + require.NoError(t, err) + RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":[\"a\"]}"), resRaw) + RequireEqualJSON(t, resRaw, resCache) + require.Equal(t, 1, countRequests(backend, "debug_getRawReceipts")) + }) +} + +func TestBatchCaching(t *testing.T) { + redis, err := miniredis.Run() + require.NoError(t, err) + defer redis.Close() + + hdlr := NewBatchRPCResponseRouter() + hdlr.SetRoute("eth_chainId", "1", "0x420") + hdlr.SetRoute("net_version", "1", "0x1234") + hdlr.SetRoute("eth_call", "1", "dummy_call") + hdlr.SetRoute("eth_getBlockByHash", "1", "eth_getBlockByHash") + + backend := NewMockBackend(hdlr) + defer backend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL())) + require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port()))) + + config := ReadConfig("caching") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + // allow time for the block number fetcher to fire + time.Sleep(1500 * time.Millisecond) + + goodChainIdResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 1}" + goodNetVersionResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 1}" + goodEthCallResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"dummy_call\", \"id\": 1}" + goodEthGetBlockByHash := "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByHash\", \"id\": 1}" + + res, _, err := client.SendBatchRPC( + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("1", "net_version", nil), + NewRPCReq("1", "eth_getBlockByHash", []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}), + ) + require.NoError(t, err) + RequireEqualJSON(t, []byte(asArray(goodChainIdResponse, goodNetVersionResponse, goodEthGetBlockByHash)), res) + require.Equal(t, 1, countRequests(backend, "eth_chainId")) + require.Equal(t, 1, countRequests(backend, "net_version")) + require.Equal(t, 1, countRequests(backend, "eth_getBlockByHash")) + + backend.Reset() + res, _, err = client.SendBatchRPC( + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("1", "eth_call", []interface{}{`{"to":"0x1234"}`, "pending"}), + NewRPCReq("1", "net_version", nil), + NewRPCReq("1", "eth_getBlockByHash", []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}), + ) + require.NoError(t, err) + RequireEqualJSON(t, []byte(asArray(goodChainIdResponse, goodEthCallResponse, goodNetVersionResponse, goodEthGetBlockByHash)), res) + require.Equal(t, 0, countRequests(backend, "eth_chainId")) + require.Equal(t, 0, countRequests(backend, "net_version")) + require.Equal(t, 0, countRequests(backend, "eth_getBlockByHash")) + require.Equal(t, 1, countRequests(backend, "eth_call")) +} + +func countRequests(backend *MockBackend, name string) int { + var count int + for _, req := range backend.Requests() { + if bytes.Contains(req.Body, []byte(name)) { + count++ + } + } + return count +} diff --git a/proxyd/integration_tests/consensus_test.go b/proxyd/integration_tests/consensus_test.go new file mode 100644 index 0000000..654b7a5 --- /dev/null +++ b/proxyd/integration_tests/consensus_test.go @@ -0,0 +1,1005 @@ +package integration_tests + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "path" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/ethereum-optimism/optimism/proxyd" + ms "github.com/ethereum-optimism/optimism/proxyd/tools/mockserver/handler" + "github.com/stretchr/testify/require" +) + +type nodeContext struct { + backend *proxyd.Backend // this is the actual backend impl in proxyd + mockBackend *MockBackend // this is the fake backend that we can use to mock responses + handler *ms.MockedHandler // this is where we control the state of mocked responses +} + +func setup(t *testing.T) (map[string]nodeContext, *proxyd.BackendGroup, *ProxydHTTPClient, func()) { + // setup mock servers + node1 := NewMockBackend(nil) + node2 := NewMockBackend(nil) + + dir, err := os.Getwd() + require.NoError(t, err) + + responses := path.Join(dir, "testdata/consensus_responses.yml") + + h1 := ms.MockedHandler{ + Overrides: []*ms.MethodTemplate{}, + Autoload: true, + AutoloadFile: responses, + } + h2 := ms.MockedHandler{ + Overrides: []*ms.MethodTemplate{}, + Autoload: true, + AutoloadFile: responses, + } + + require.NoError(t, os.Setenv("NODE1_URL", node1.URL())) + require.NoError(t, os.Setenv("NODE2_URL", node2.URL())) + + node1.SetHandler(http.HandlerFunc(h1.Handler)) + node2.SetHandler(http.HandlerFunc(h2.Handler)) + + // setup proxyd + config := ReadConfig("consensus") + svr, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + + // expose the proxyd client + client := NewProxydClient("http://127.0.0.1:8545") + + // expose the backend group + bg := svr.BackendGroups["node"] + require.NotNil(t, bg) + require.NotNil(t, bg.Consensus) + require.Equal(t, 2, len(bg.Backends)) // should match config + + // convenient mapping to access the nodes by name + nodes := map[string]nodeContext{ + "node1": { + mockBackend: node1, + backend: bg.Backends[0], + handler: &h1, + }, + "node2": { + mockBackend: node2, + backend: bg.Backends[1], + handler: &h2, + }, + } + + return nodes, bg, client, shutdown +} + +func TestConsensus(t *testing.T) { + nodes, bg, client, shutdown := setup(t) + defer nodes["node1"].mockBackend.Close() + defer nodes["node2"].mockBackend.Close() + defer shutdown() + + ctx := context.Background() + + // poll for updated consensus + update := func() { + for _, be := range bg.Backends { + bg.Consensus.UpdateBackend(ctx, be) + } + bg.Consensus.UpdateBackendGroupConsensus(ctx) + } + + // convenient methods to manipulate state and mock responses + reset := func() { + for _, node := range nodes { + node.handler.ResetOverrides() + node.mockBackend.Reset() + } + bg.Consensus.ClearListeners() + bg.Consensus.Reset() + } + + override := func(node string, method string, block string, response string) { + if _, ok := nodes[node]; !ok { + t.Fatalf("node %s does not exist in the nodes map", node) + } + nodes[node].handler.AddOverride(&ms.MethodTemplate{ + Method: method, + Block: block, + Response: response, + }) + } + + overrideBlock := func(node string, blockRequest string, blockResponse string) { + override(node, + "eth_getBlockByNumber", + blockRequest, + buildResponse(map[string]string{ + "number": blockResponse, + "hash": "hash_" + blockResponse, + })) + } + + overrideBlockHash := func(node string, blockRequest string, number string, hash string) { + override(node, + "eth_getBlockByNumber", + blockRequest, + buildResponse(map[string]string{ + "number": number, + "hash": hash, + })) + } + + overridePeerCount := func(node string, count int) { + override(node, "net_peerCount", "", buildResponse(hexutil.Uint64(count).String())) + } + + overrideNotInSync := func(node string) { + override(node, "eth_syncing", "", buildResponse(map[string]string{ + "startingblock": "0x0", + "currentblock": "0x0", + "highestblock": "0x100", + })) + } + + // force ban node2 and make sure node1 is the only one in consensus + useOnlyNode1 := func() { + overridePeerCount("node2", 0) + update() + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.Equal(t, 1, len(consensusGroup)) + require.Contains(t, consensusGroup, nodes["node1"].backend) + nodes["node1"].mockBackend.Reset() + } + + t.Run("initial consensus", func(t *testing.T) { + reset() + + // unknown consensus at init + require.Equal(t, "0x0", bg.Consensus.GetLatestBlockNumber().String()) + + // first poll + update() + + // as a default we use: + // - latest at 0x101 [257] + // - safe at 0xe1 [225] + // - finalized at 0xc1 [193] + + // consensus at block 0x101 + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + }) + + t.Run("prevent using a backend with low peer count", func(t *testing.T) { + reset() + overridePeerCount("node1", 0) + update() + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.False(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + }) + + t.Run("prevent using a backend lagging behind", func(t *testing.T) { + reset() + // node2 is 8+1 blocks ahead of node1 (0x101 + 8+1 = 0x10a) + overrideBlock("node2", "latest", "0x10a") + update() + + // since we ignored node1, the consensus should be at 0x10a + require.Equal(t, "0x10a", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.False(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + }) + + t.Run("prevent using a backend lagging behind - one before limit", func(t *testing.T) { + reset() + // node2 is 8 blocks ahead of node1 (0x101 + 8 = 0x109) + overrideBlock("node2", "latest", "0x109") + update() + + // both nodes are in consensus with the lowest block + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + require.Equal(t, 2, len(bg.Consensus.GetConsensusGroup())) + }) + + t.Run("prevent using a backend not in sync", func(t *testing.T) { + reset() + // make node1 not in sync + overrideNotInSync("node1") + update() + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.False(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + }) + + t.Run("advance consensus", func(t *testing.T) { + reset() + + // as a default we use: + // - latest at 0x101 [257] + // - safe at 0xe1 [225] + // - finalized at 0xc1 [193] + + update() + + // all nodes start at block 0x101 + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + + // advance latest on node2 to 0x102 + overrideBlock("node2", "latest", "0x102") + + update() + + // consensus should stick to 0x101, since node1 is still lagging there + bg.Consensus.UpdateBackendGroupConsensus(ctx) + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + + // advance latest on node1 to 0x102 + overrideBlock("node1", "latest", "0x102") + + update() + + // all nodes now at 0x102 + require.Equal(t, "0x102", bg.Consensus.GetLatestBlockNumber().String()) + }) + + t.Run("should use lowest safe and finalized", func(t *testing.T) { + reset() + overrideBlock("node2", "finalized", "0xc2") + overrideBlock("node2", "safe", "0xe2") + update() + + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + }) + + t.Run("advance safe and finalized", func(t *testing.T) { + reset() + overrideBlock("node1", "finalized", "0xc2") + overrideBlock("node1", "safe", "0xe2") + overrideBlock("node2", "finalized", "0xc2") + overrideBlock("node2", "safe", "0xe2") + update() + + require.Equal(t, "0xe2", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc2", bg.Consensus.GetFinalizedBlockNumber().String()) + }) + + t.Run("ban backend if error rate is too high", func(t *testing.T) { + reset() + useOnlyNode1() + + // replace node1 handler with one that always returns 500 + oldHandler := nodes["node1"].mockBackend.handler + defer func() { nodes["node1"].mockBackend.handler = oldHandler }() + + nodes["node1"].mockBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(503) + })) + + numberReqs := 10 + for numberReqs > 0 { + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x101", false}) + require.NoError(t, err) + require.Equal(t, 503, statusCode) + numberReqs-- + } + + update() + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 0, len(consensusGroup)) + }) + + t.Run("ban backend if tags are messed - safe < finalized", func(t *testing.T) { + reset() + overrideBlock("node1", "finalized", "0xb1") + overrideBlock("node1", "safe", "0xa1") + update() + + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + }) + + t.Run("ban backend if tags are messed - latest < safe", func(t *testing.T) { + reset() + overrideBlock("node1", "safe", "0xb1") + overrideBlock("node1", "latest", "0xa1") + update() + + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + }) + + t.Run("ban backend if tags are messed - safe dropped", func(t *testing.T) { + reset() + update() + overrideBlock("node1", "safe", "0xb1") + update() + + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + }) + + t.Run("ban backend if tags are messed - finalized dropped", func(t *testing.T) { + reset() + update() + overrideBlock("node1", "finalized", "0xa1") + update() + + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + }) + + t.Run("recover after safe and finalized dropped", func(t *testing.T) { + reset() + useOnlyNode1() + overrideBlock("node1", "latest", "0xd1") + overrideBlock("node1", "safe", "0xb1") + overrideBlock("node1", "finalized", "0x91") + update() + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 0, len(consensusGroup)) + + // unban and see if it recovers + bg.Consensus.Unban(nodes["node1"].backend) + update() + + consensusGroup = bg.Consensus.GetConsensusGroup() + require.Contains(t, consensusGroup, nodes["node1"].backend) + require.False(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + + require.Equal(t, "0xd1", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xb1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0x91", bg.Consensus.GetFinalizedBlockNumber().String()) + }) + + t.Run("latest dropped below safe, then recovered", func(t *testing.T) { + reset() + useOnlyNode1() + overrideBlock("node1", "latest", "0xd1") + update() + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 0, len(consensusGroup)) + + // unban and see if it recovers + bg.Consensus.Unban(nodes["node1"].backend) + overrideBlock("node1", "safe", "0xb1") + overrideBlock("node1", "finalized", "0x91") + update() + + consensusGroup = bg.Consensus.GetConsensusGroup() + require.Contains(t, consensusGroup, nodes["node1"].backend) + require.False(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 1, len(consensusGroup)) + + require.Equal(t, "0xd1", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xb1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0x91", bg.Consensus.GetFinalizedBlockNumber().String()) + }) + + t.Run("latest dropped below safe, and stayed inconsistent", func(t *testing.T) { + reset() + useOnlyNode1() + overrideBlock("node1", "latest", "0xd1") + update() + + consensusGroup := bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 0, len(consensusGroup)) + + // unban and see if it recovers - it should not since the blocks stays the same + bg.Consensus.Unban(nodes["node1"].backend) + update() + + // should be banned again + consensusGroup = bg.Consensus.GetConsensusGroup() + require.NotContains(t, consensusGroup, nodes["node1"].backend) + require.True(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.Equal(t, 0, len(consensusGroup)) + }) + + t.Run("broken consensus", func(t *testing.T) { + reset() + listenerCalled := false + bg.Consensus.AddListener(func() { + listenerCalled = true + }) + update() + + // all nodes start at block 0x101 + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + + // advance latest on both nodes to 0x102 + overrideBlock("node1", "latest", "0x102") + overrideBlock("node2", "latest", "0x102") + + update() + + // at 0x102 + require.Equal(t, "0x102", bg.Consensus.GetLatestBlockNumber().String()) + + // make node2 diverge on hash + overrideBlockHash("node2", "0x102", "0x102", "wrong_hash") + + update() + + // should resolve to 0x101, since 0x102 is out of consensus at the moment + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + + // everybody serving traffic + consensusGroup := bg.Consensus.GetConsensusGroup() + require.Equal(t, 2, len(consensusGroup)) + require.False(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.False(t, bg.Consensus.IsBanned(nodes["node2"].backend)) + + // onConsensusBroken listener was called + require.True(t, listenerCalled) + }) + + t.Run("broken consensus with depth 2", func(t *testing.T) { + reset() + listenerCalled := false + bg.Consensus.AddListener(func() { + listenerCalled = true + }) + update() + + // all nodes start at block 0x101 + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + + // advance latest on both nodes to 0x102 + overrideBlock("node1", "latest", "0x102") + overrideBlock("node2", "latest", "0x102") + + update() + + // at 0x102 + require.Equal(t, "0x102", bg.Consensus.GetLatestBlockNumber().String()) + + // advance latest on both nodes to 0x3 + overrideBlock("node1", "latest", "0x103") + overrideBlock("node2", "latest", "0x103") + + update() + + // at 0x103 + require.Equal(t, "0x103", bg.Consensus.GetLatestBlockNumber().String()) + + // make node2 diverge on hash for blocks 0x102 and 0x103 + overrideBlockHash("node2", "0x102", "0x102", "wrong_hash_0x102") + overrideBlockHash("node2", "0x103", "0x103", "wrong_hash_0x103") + + update() + + // should resolve to 0x101 + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + + // everybody serving traffic + consensusGroup := bg.Consensus.GetConsensusGroup() + require.Equal(t, 2, len(consensusGroup)) + require.False(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.False(t, bg.Consensus.IsBanned(nodes["node2"].backend)) + + // onConsensusBroken listener was called + require.True(t, listenerCalled) + }) + + t.Run("fork in advanced block", func(t *testing.T) { + reset() + listenerCalled := false + bg.Consensus.AddListener(func() { + listenerCalled = true + }) + update() + + // all nodes start at block 0x101 + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + + // make nodes 1 and 2 advance in forks, i.e. they have same block number with different hashes + overrideBlockHash("node1", "0x102", "0x102", "node1_0x102") + overrideBlockHash("node2", "0x102", "0x102", "node2_0x102") + overrideBlockHash("node1", "0x103", "0x103", "node1_0x103") + overrideBlockHash("node2", "0x103", "0x103", "node2_0x103") + overrideBlockHash("node1", "latest", "0x103", "node1_0x103") + overrideBlockHash("node2", "latest", "0x103", "node2_0x103") + + update() + + // should resolve to 0x101, the highest common ancestor + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + + // everybody serving traffic + consensusGroup := bg.Consensus.GetConsensusGroup() + require.Equal(t, 2, len(consensusGroup)) + require.False(t, bg.Consensus.IsBanned(nodes["node1"].backend)) + require.False(t, bg.Consensus.IsBanned(nodes["node2"].backend)) + + // onConsensusBroken listener should not be called + require.False(t, listenerCalled) + }) + + t.Run("load balancing should hit both backends", func(t *testing.T) { + reset() + update() + + require.Equal(t, 2, len(bg.Consensus.GetConsensusGroup())) + + // reset request counts + nodes["node1"].mockBackend.Reset() + nodes["node2"].mockBackend.Reset() + + require.Equal(t, 0, len(nodes["node1"].mockBackend.Requests())) + require.Equal(t, 0, len(nodes["node2"].mockBackend.Requests())) + + // there is a random component to this test, + // since our round-robin implementation shuffles the ordering + // to achieve uniform distribution + + // so we just make 100 requests per backend and expect the number of requests to be somewhat balanced + // i.e. each backend should be hit minimally by at least 50% of the requests + consensusGroup := bg.Consensus.GetConsensusGroup() + + numberReqs := len(consensusGroup) * 100 + for numberReqs > 0 { + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x101", false}) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + numberReqs-- + } + + msg := fmt.Sprintf("n1 %d, n2 %d", + len(nodes["node1"].mockBackend.Requests()), len(nodes["node2"].mockBackend.Requests())) + require.GreaterOrEqual(t, len(nodes["node1"].mockBackend.Requests()), 50, msg) + require.GreaterOrEqual(t, len(nodes["node2"].mockBackend.Requests()), 50, msg) + }) + + t.Run("load balancing should not hit if node is not healthy", func(t *testing.T) { + reset() + useOnlyNode1() + + // reset request counts + nodes["node1"].mockBackend.Reset() + nodes["node2"].mockBackend.Reset() + + require.Equal(t, 0, len(nodes["node1"].mockBackend.Requests())) + require.Equal(t, 0, len(nodes["node1"].mockBackend.Requests())) + + numberReqs := 10 + for numberReqs > 0 { + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x101", false}) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + numberReqs-- + } + + msg := fmt.Sprintf("n1 %d, n2 %d", + len(nodes["node1"].mockBackend.Requests()), len(nodes["node2"].mockBackend.Requests())) + require.Equal(t, len(nodes["node1"].mockBackend.Requests()), 10, msg) + require.Equal(t, len(nodes["node2"].mockBackend.Requests()), 0, msg) + }) + + t.Run("load balancing should not hit if node is degraded", func(t *testing.T) { + reset() + useOnlyNode1() + + // replace node1 handler with one that adds a 500ms delay + oldHandler := nodes["node1"].mockBackend.handler + defer func() { nodes["node1"].mockBackend.handler = oldHandler }() + + nodes["node1"].mockBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(500 * time.Millisecond) + oldHandler.ServeHTTP(w, r) + })) + + update() + + // send 10 requests to make node1 degraded + numberReqs := 10 + for numberReqs > 0 { + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x101", false}) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + numberReqs-- + } + + // bring back node2 + nodes["node2"].handler.ResetOverrides() + update() + + // reset request counts + nodes["node1"].mockBackend.Reset() + nodes["node2"].mockBackend.Reset() + + require.Equal(t, 0, len(nodes["node1"].mockBackend.Requests())) + require.Equal(t, 0, len(nodes["node2"].mockBackend.Requests())) + + numberReqs = 10 + for numberReqs > 0 { + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x101", false}) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + numberReqs-- + } + + msg := fmt.Sprintf("n1 %d, n2 %d", + len(nodes["node1"].mockBackend.Requests()), len(nodes["node2"].mockBackend.Requests())) + require.Equal(t, 0, len(nodes["node1"].mockBackend.Requests()), msg) + require.Equal(t, 10, len(nodes["node2"].mockBackend.Requests()), msg) + }) + + t.Run("rewrite response of eth_blockNumber", func(t *testing.T) { + reset() + update() + + totalRequests := len(nodes["node1"].mockBackend.Requests()) + len(nodes["node2"].mockBackend.Requests()) + require.Equal(t, 2, len(bg.Consensus.GetConsensusGroup())) + + resRaw, statusCode, err := client.SendRPC("eth_blockNumber", nil) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var jsonMap map[string]interface{} + err = json.Unmarshal(resRaw, &jsonMap) + require.NoError(t, err) + require.Equal(t, "0x101", jsonMap["result"]) + + // no extra request hit the backends + require.Equal(t, totalRequests, + len(nodes["node1"].mockBackend.Requests())+len(nodes["node2"].mockBackend.Requests())) + }) + + t.Run("rewrite request of eth_getBlockByNumber for latest", func(t *testing.T) { + reset() + useOnlyNode1() + + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"latest"}) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var jsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &jsonMap) + require.NoError(t, err) + require.Equal(t, "0x101", jsonMap["params"].([]interface{})[0]) + }) + + t.Run("rewrite request of eth_getBlockByNumber for finalized", func(t *testing.T) { + reset() + useOnlyNode1() + + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"finalized"}) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var jsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &jsonMap) + require.NoError(t, err) + require.Equal(t, "0xc1", jsonMap["params"].([]interface{})[0]) + }) + + t.Run("rewrite request of eth_getBlockByNumber for safe", func(t *testing.T) { + reset() + useOnlyNode1() + + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"safe"}) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var jsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &jsonMap) + require.NoError(t, err) + require.Equal(t, "0xe1", jsonMap["params"].([]interface{})[0]) + }) + + t.Run("rewrite request of eth_getBlockByNumber - out of range", func(t *testing.T) { + reset() + useOnlyNode1() + + resRaw, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x300"}) + require.NoError(t, err) + require.Equal(t, 400, statusCode) + + var jsonMap map[string]interface{} + err = json.Unmarshal(resRaw, &jsonMap) + require.NoError(t, err) + require.Equal(t, -32019, int(jsonMap["error"].(map[string]interface{})["code"].(float64))) + require.Equal(t, "block is out of range", jsonMap["error"].(map[string]interface{})["message"]) + }) + + t.Run("batched rewrite", func(t *testing.T) { + reset() + useOnlyNode1() + + resRaw, statusCode, err := client.SendBatchRPC( + NewRPCReq("1", "eth_getBlockByNumber", []interface{}{"latest"}), + NewRPCReq("2", "eth_getBlockByNumber", []interface{}{"0x102"}), + NewRPCReq("3", "eth_getBlockByNumber", []interface{}{"0xe1"})) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var jsonMap []map[string]interface{} + err = json.Unmarshal(resRaw, &jsonMap) + require.NoError(t, err) + require.Equal(t, 3, len(jsonMap)) + + // rewrite latest to 0x101 + require.Equal(t, "0x101", jsonMap[0]["result"].(map[string]interface{})["number"]) + + // out of bounds for block 0x102 + require.Equal(t, -32019, int(jsonMap[1]["error"].(map[string]interface{})["code"].(float64))) + require.Equal(t, "block is out of range", jsonMap[1]["error"].(map[string]interface{})["message"]) + + // dont rewrite for 0xe1 + require.Equal(t, "0xe1", jsonMap[2]["result"].(map[string]interface{})["number"]) + }) + + t.Run("translate consensus_getReceipts to debug_getRawReceipts", func(t *testing.T) { + reset() + useOnlyNode1() + update() + + // reset request counts + nodes["node1"].mockBackend.Reset() + + resRaw, statusCode, err := client.SendRPC("consensus_getReceipts", + []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"}) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var jsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &jsonMap) + require.NoError(t, err) + require.Equal(t, "debug_getRawReceipts", jsonMap["method"]) + require.Equal(t, "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", jsonMap["params"].([]interface{})[0]) + + var resJsonMap map[string]interface{} + err = json.Unmarshal(resRaw, &resJsonMap) + require.NoError(t, err) + + require.Equal(t, "debug_getRawReceipts", resJsonMap["result"].(map[string]interface{})["method"].(string)) + require.Equal(t, "debug_getRawReceipts", resJsonMap["result"].(map[string]interface{})["result"].(map[string]interface{})["_"]) + }) + + t.Run("translate consensus_getReceipts to debug_getRawReceipts with latest block tag", func(t *testing.T) { + reset() + useOnlyNode1() + update() + + // reset request counts + nodes["node1"].mockBackend.Reset() + + resRaw, statusCode, err := client.SendRPC("consensus_getReceipts", + []interface{}{"latest"}) + + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var jsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &jsonMap) + require.NoError(t, err) + require.Equal(t, "debug_getRawReceipts", jsonMap["method"]) + require.Equal(t, "0x101", jsonMap["params"].([]interface{})[0]) + + var resJsonMap map[string]interface{} + err = json.Unmarshal(resRaw, &resJsonMap) + require.NoError(t, err) + + require.Equal(t, "debug_getRawReceipts", resJsonMap["result"].(map[string]interface{})["method"].(string)) + require.Equal(t, "debug_getRawReceipts", resJsonMap["result"].(map[string]interface{})["result"].(map[string]interface{})["_"]) + }) + + t.Run("translate consensus_getReceipts to debug_getRawReceipts with block number", func(t *testing.T) { + reset() + useOnlyNode1() + update() + + // reset request counts + nodes["node1"].mockBackend.Reset() + + resRaw, statusCode, err := client.SendRPC("consensus_getReceipts", + []interface{}{"0x55"}) + + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var jsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &jsonMap) + require.NoError(t, err) + require.Equal(t, "debug_getRawReceipts", jsonMap["method"]) + require.Equal(t, "0x55", jsonMap["params"].([]interface{})[0]) + + var resJsonMap map[string]interface{} + err = json.Unmarshal(resRaw, &resJsonMap) + require.NoError(t, err) + + require.Equal(t, "debug_getRawReceipts", resJsonMap["result"].(map[string]interface{})["method"].(string)) + require.Equal(t, "debug_getRawReceipts", resJsonMap["result"].(map[string]interface{})["result"].(map[string]interface{})["_"]) + }) + + t.Run("translate consensus_getReceipts to alchemy_getTransactionReceipts with block hash", func(t *testing.T) { + reset() + useOnlyNode1() + update() + + // reset request counts + nodes["node1"].mockBackend.Reset() + + nodes["node1"].backend.Override(proxyd.WithConsensusReceiptTarget("alchemy_getTransactionReceipts")) + defer nodes["node1"].backend.Override(proxyd.WithConsensusReceiptTarget("debug_getRawReceipts")) + + resRaw, statusCode, err := client.SendRPC("consensus_getReceipts", + []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"}) + + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var reqJsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &reqJsonMap) + + require.NoError(t, err) + require.Equal(t, "alchemy_getTransactionReceipts", reqJsonMap["method"]) + require.Equal(t, "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", reqJsonMap["params"].([]interface{})[0].(map[string]interface{})["blockHash"]) + + var resJsonMap map[string]interface{} + err = json.Unmarshal(resRaw, &resJsonMap) + require.NoError(t, err) + + require.Equal(t, "alchemy_getTransactionReceipts", resJsonMap["result"].(map[string]interface{})["method"].(string)) + require.Equal(t, "alchemy_getTransactionReceipts", resJsonMap["result"].(map[string]interface{})["result"].(map[string]interface{})["_"]) + }) + + t.Run("translate consensus_getReceipts to alchemy_getTransactionReceipts with block number", func(t *testing.T) { + reset() + useOnlyNode1() + update() + + // reset request counts + nodes["node1"].mockBackend.Reset() + + nodes["node1"].backend.Override(proxyd.WithConsensusReceiptTarget("alchemy_getTransactionReceipts")) + defer nodes["node1"].backend.Override(proxyd.WithConsensusReceiptTarget("debug_getRawReceipts")) + + resRaw, statusCode, err := client.SendRPC("consensus_getReceipts", + []interface{}{"0x55"}) + + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var reqJsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &reqJsonMap) + + require.NoError(t, err) + require.Equal(t, "alchemy_getTransactionReceipts", reqJsonMap["method"]) + require.Equal(t, "0x55", reqJsonMap["params"].([]interface{})[0].(map[string]interface{})["blockNumber"]) + + var resJsonMap map[string]interface{} + err = json.Unmarshal(resRaw, &resJsonMap) + require.NoError(t, err) + + require.Equal(t, "alchemy_getTransactionReceipts", resJsonMap["result"].(map[string]interface{})["method"].(string)) + require.Equal(t, "alchemy_getTransactionReceipts", resJsonMap["result"].(map[string]interface{})["result"].(map[string]interface{})["_"]) + }) + + t.Run("translate consensus_getReceipts to alchemy_getTransactionReceipts with latest block tag", func(t *testing.T) { + reset() + useOnlyNode1() + update() + + // reset request counts + nodes["node1"].mockBackend.Reset() + + nodes["node1"].backend.Override(proxyd.WithConsensusReceiptTarget("alchemy_getTransactionReceipts")) + defer nodes["node1"].backend.Override(proxyd.WithConsensusReceiptTarget("debug_getRawReceipts")) + + resRaw, statusCode, err := client.SendRPC("consensus_getReceipts", + []interface{}{"latest"}) + + require.NoError(t, err) + require.Equal(t, 200, statusCode) + + var reqJsonMap map[string]interface{} + err = json.Unmarshal(nodes["node1"].mockBackend.Requests()[0].Body, &reqJsonMap) + + require.NoError(t, err) + require.Equal(t, "alchemy_getTransactionReceipts", reqJsonMap["method"]) + require.Equal(t, "0x101", reqJsonMap["params"].([]interface{})[0].(map[string]interface{})["blockNumber"]) + + var resJsonMap map[string]interface{} + err = json.Unmarshal(resRaw, &resJsonMap) + require.NoError(t, err) + + require.Equal(t, "alchemy_getTransactionReceipts", resJsonMap["result"].(map[string]interface{})["method"].(string)) + require.Equal(t, "alchemy_getTransactionReceipts", resJsonMap["result"].(map[string]interface{})["result"].(map[string]interface{})["_"]) + }) + + t.Run("translate consensus_getReceipts to unsupported consensus_receipts_target", func(t *testing.T) { + reset() + useOnlyNode1() + + nodes["node1"].backend.Override(proxyd.WithConsensusReceiptTarget("unsupported_consensus_receipts_target")) + defer nodes["node1"].backend.Override(proxyd.WithConsensusReceiptTarget("debug_getRawReceipts")) + + _, statusCode, err := client.SendRPC("consensus_getReceipts", + []interface{}{"latest"}) + + require.NoError(t, err) + require.Equal(t, 400, statusCode) + }) + + t.Run("consensus_getReceipts should not be used in a batch", func(t *testing.T) { + reset() + useOnlyNode1() + + _, statusCode, err := client.SendBatchRPC( + NewRPCReq("1", "eth_getBlockByNumber", []interface{}{"latest"}), + NewRPCReq("2", "consensus_getReceipts", []interface{}{"0x55"}), + NewRPCReq("3", "eth_getBlockByNumber", []interface{}{"0xe1"})) + require.NoError(t, err) + require.Equal(t, 400, statusCode) + }) +} + +func buildResponse(result interface{}) string { + res, err := json.Marshal(proxyd.RPCRes{ + Result: result, + }) + if err != nil { + panic(err) + } + return string(res) +} diff --git a/proxyd/integration_tests/failover_test.go b/proxyd/integration_tests/failover_test.go new file mode 100644 index 0000000..501542a --- /dev/null +++ b/proxyd/integration_tests/failover_test.go @@ -0,0 +1,288 @@ +package integration_tests + +import ( + "fmt" + "net/http" + "os" + "sync/atomic" + "testing" + "time" + + "github.com/alicebob/miniredis" + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/stretchr/testify/require" +) + +const ( + goodResponse = `{"jsonrpc": "2.0", "result": "hello", "id": 999}` + noBackendsResponse = `{"error":{"code":-32011,"message":"no backends available for method"},"id":999,"jsonrpc":"2.0"}` + unexpectedResponse = `{"error":{"code":-32011,"message":"some error"},"id":999,"jsonrpc":"2.0"}` +) + +func TestFailover(t *testing.T) { + goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse)) + defer goodBackend.Close() + badBackend := NewMockBackend(nil) + defer badBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL())) + + config := ReadConfig("failover") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + tests := []struct { + name string + handler http.Handler + }{ + { + "backend responds 200 with non-JSON response", + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + _, _ = w.Write([]byte("this data is not JSON!")) + }), + }, + { + "backend responds with no body", + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + }), + }, + } + codes := []int{ + 300, + 301, + 302, + 401, + 403, + 429, + 500, + 503, + } + for _, code := range codes { + tests = append(tests, struct { + name string + handler http.Handler + }{ + fmt.Sprintf("backend %d", code), + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(code) + }), + }) + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + badBackend.SetHandler(tt.handler) + res, statusCode, err := client.SendRPC("eth_chainId", nil) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(goodResponse), res) + require.Equal(t, 1, len(badBackend.Requests())) + require.Equal(t, 1, len(goodBackend.Requests())) + badBackend.Reset() + goodBackend.Reset() + }) + } + + t.Run("backend times out and falls back to another", func(t *testing.T) { + badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(2 * time.Second) + _, _ = w.Write([]byte("[{}]")) + })) + res, statusCode, err := client.SendRPC("eth_chainId", nil) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(goodResponse), res) + require.Equal(t, 1, len(badBackend.Requests())) + require.Equal(t, 1, len(goodBackend.Requests())) + goodBackend.Reset() + badBackend.Reset() + }) + + t.Run("works with a batch request", func(t *testing.T) { + goodBackend.SetHandler(BatchedResponseHandler(200, goodResponse, goodResponse)) + badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(500) + })) + res, statusCode, err := client.SendBatchRPC( + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("2", "eth_chainId", nil), + ) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse)), res) + require.Equal(t, 1, len(badBackend.Requests())) + require.Equal(t, 1, len(goodBackend.Requests())) + goodBackend.Reset() + badBackend.Reset() + }) +} + +func TestRetries(t *testing.T) { + backend := NewMockBackend(BatchedResponseHandler(200, goodResponse)) + defer backend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL())) + config := ReadConfig("retries") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + attempts := int32(0) + backend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + incremented := atomic.AddInt32(&attempts, 1) + if incremented != 2 { + w.WriteHeader(500) + return + } + BatchedResponseHandler(200, goodResponse)(w, r) + })) + + // test case where request eventually succeeds + res, statusCode, err := client.SendRPC("eth_chainId", nil) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(goodResponse), res) + require.Equal(t, 2, len(backend.Requests())) + + // test case where it does not + backend.Reset() + attempts = -10 + res, statusCode, err = client.SendRPC("eth_chainId", nil) + require.NoError(t, err) + require.Equal(t, 503, statusCode) + RequireEqualJSON(t, []byte(noBackendsResponse), res) + require.Equal(t, 4, len(backend.Requests())) +} + +func TestOutOfServiceInterval(t *testing.T) { + okHandler := BatchedResponseHandler(200, goodResponse) + goodBackend := NewMockBackend(okHandler) + defer goodBackend.Close() + badBackend := NewMockBackend(nil) + defer badBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL())) + + config := ReadConfig("out_of_service_interval") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(503) + })) + + res, statusCode, err := client.SendRPC("eth_chainId", nil) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(goodResponse), res) + require.Equal(t, 2, len(badBackend.Requests())) + require.Equal(t, 1, len(goodBackend.Requests())) + + res, statusCode, err = client.SendRPC("eth_chainId", nil) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(goodResponse), res) + require.Equal(t, 4, len(badBackend.Requests())) + require.Equal(t, 2, len(goodBackend.Requests())) + + _, statusCode, err = client.SendBatchRPC( + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("1", "eth_chainId", nil), + ) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + require.Equal(t, 8, len(badBackend.Requests())) + require.Equal(t, 4, len(goodBackend.Requests())) + + time.Sleep(time.Second) + badBackend.SetHandler(okHandler) + + res, statusCode, err = client.SendRPC("eth_chainId", nil) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(goodResponse), res) + require.Equal(t, 9, len(badBackend.Requests())) + require.Equal(t, 4, len(goodBackend.Requests())) +} + +func TestBatchWithPartialFailover(t *testing.T) { + config := ReadConfig("failover") + config.Server.MaxUpstreamBatchSize = 2 + + goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse, goodResponse)) + defer goodBackend.Close() + badBackend := NewMockBackend(SingleResponseHandler(200, "this data is not JSON!")) + defer badBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL())) + + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + res, statusCode, err := client.SendBatchRPC( + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("2", "eth_chainId", nil), + NewRPCReq("3", "eth_chainId", nil), + NewRPCReq("4", "eth_chainId", nil), + ) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse, goodResponse, goodResponse)), res) + require.Equal(t, 2, len(badBackend.Requests())) + require.Equal(t, 2, len(goodBackend.Requests())) +} + +func TestInfuraFailoverOnUnexpectedResponse(t *testing.T) { + InitLogger() + // Scenario: + // 1. Send batch to BAD_BACKEND (Infura) + // 2. Infura fails completely due to a partially errorneous batch request (one of N+1 request object is invalid) + // 3. Assert that the request batch is re-routed to the failover provider + // 4. Assert that BAD_BACKEND is NOT labeled offline + // 5. Assert that BAD_BACKEND is NOT retried + + redis, err := miniredis.Run() + require.NoError(t, err) + defer redis.Close() + + config := ReadConfig("failover") + config.Server.MaxUpstreamBatchSize = 2 + config.BackendOptions.MaxRetries = 2 + // Setup redis to detect offline backends + config.Redis.URL = fmt.Sprintf("redis://127.0.0.1:%s", redis.Port()) + require.NoError(t, err) + + goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse, goodResponse)) + defer goodBackend.Close() + badBackend := NewMockBackend(SingleResponseHandler(200, unexpectedResponse)) + defer badBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL())) + + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + res, statusCode, err := client.SendBatchRPC( + NewRPCReq("1", "eth_chainId", nil), + NewRPCReq("2", "eth_chainId", nil), + ) + require.NoError(t, err) + require.Equal(t, 200, statusCode) + RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse)), res) + require.Equal(t, 1, len(badBackend.Requests())) + require.Equal(t, 1, len(goodBackend.Requests())) +} diff --git a/proxyd/integration_tests/fallback_test.go b/proxyd/integration_tests/fallback_test.go new file mode 100644 index 0000000..c5b3e48 --- /dev/null +++ b/proxyd/integration_tests/fallback_test.go @@ -0,0 +1,374 @@ +package integration_tests + +import ( + "context" + "fmt" + "net/http" + "os" + "path" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/ethereum-optimism/optimism/proxyd" + ms "github.com/ethereum-optimism/optimism/proxyd/tools/mockserver/handler" + "github.com/stretchr/testify/require" +) + +func setup_failover(t *testing.T) (map[string]nodeContext, *proxyd.BackendGroup, *ProxydHTTPClient, func(), []time.Time, []time.Time) { + // setup mock servers + node1 := NewMockBackend(nil) + node2 := NewMockBackend(nil) + + dir, err := os.Getwd() + require.NoError(t, err) + + responses := path.Join(dir, "testdata/consensus_responses.yml") + + h1 := ms.MockedHandler{ + Overrides: []*ms.MethodTemplate{}, + Autoload: true, + AutoloadFile: responses, + } + h2 := ms.MockedHandler{ + Overrides: []*ms.MethodTemplate{}, + Autoload: true, + AutoloadFile: responses, + } + + require.NoError(t, os.Setenv("NODE1_URL", node1.URL())) + require.NoError(t, os.Setenv("NODE2_URL", node2.URL())) + + node1.SetHandler(http.HandlerFunc(h1.Handler)) + node2.SetHandler(http.HandlerFunc(h2.Handler)) + + // setup proxyd + config := ReadConfig("fallback") + svr, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + + // expose the proxyd client + client := NewProxydClient("http://127.0.0.1:8545") + + // expose the backend group + bg := svr.BackendGroups["node"] + require.NotNil(t, bg) + require.NotNil(t, bg.Consensus) + require.Equal(t, 2, len(bg.Backends)) // should match config + + // convenient mapping to access the nodes by name + nodes := map[string]nodeContext{ + "normal": { + mockBackend: node1, + backend: bg.Backends[0], + handler: &h1, + }, + "fallback": { + mockBackend: node2, + backend: bg.Backends[1], + handler: &h2, + }, + } + normalTimestamps := []time.Time{} + fallbackTimestamps := []time.Time{} + + return nodes, bg, client, shutdown, normalTimestamps, fallbackTimestamps +} + +func TestFallback(t *testing.T) { + nodes, bg, client, shutdown, normalTimestamps, fallbackTimestamps := setup_failover(t) + defer nodes["normal"].mockBackend.Close() + defer nodes["fallback"].mockBackend.Close() + defer shutdown() + + ctx := context.Background() + + // Use Update to Advance the Candidate iteration + update := func() { + for _, be := range bg.Primaries() { + bg.Consensus.UpdateBackend(ctx, be) + } + + for _, be := range bg.Fallbacks() { + healthyCandidates := bg.Consensus.FilterCandidates(bg.Primaries()) + if len(healthyCandidates) == 0 { + bg.Consensus.UpdateBackend(ctx, be) + } + } + + bg.Consensus.UpdateBackendGroupConsensus(ctx) + } + + override := func(node string, method string, block string, response string) { + if _, ok := nodes[node]; !ok { + t.Fatalf("node %s does not exist in the nodes map", node) + } + nodes[node].handler.AddOverride(&ms.MethodTemplate{ + Method: method, + Block: block, + Response: response, + }) + } + + overrideBlock := func(node string, blockRequest string, blockResponse string) { + override(node, + "eth_getBlockByNumber", + blockRequest, + buildResponse(map[string]string{ + "number": blockResponse, + "hash": "hash_" + blockResponse, + })) + } + + overrideBlockHash := func(node string, blockRequest string, number string, hash string) { + override(node, + "eth_getBlockByNumber", + blockRequest, + buildResponse(map[string]string{ + "number": number, + "hash": hash, + })) + } + + overridePeerCount := func(node string, count int) { + override(node, "net_peerCount", "", buildResponse(hexutil.Uint64(count).String())) + } + + overrideNotInSync := func(node string) { + override(node, "eth_syncing", "", buildResponse(map[string]string{ + "startingblock": "0x0", + "currentblock": "0x0", + "highestblock": "0x100", + })) + } + + containsNode := func(backends []*proxyd.Backend, name string) bool { + for _, be := range backends { + // Note: Currently checks for name but would like to expose fallback better + if be.Name == name { + return true + } + } + return false + } + + // TODO: Improvement instead of simple array, + // ensure normal and backend are returned in strict order + recordLastUpdates := func(backends []*proxyd.Backend) []time.Time { + lastUpdated := []time.Time{} + for _, be := range backends { + lastUpdated = append(lastUpdated, bg.Consensus.GetLastUpdate(be)) + } + return lastUpdated + } + + // convenient methods to manipulate state and mock responses + reset := func() { + for _, node := range nodes { + node.handler.ResetOverrides() + node.mockBackend.Reset() + } + bg.Consensus.ClearListeners() + bg.Consensus.Reset() + + normalTimestamps = []time.Time{} + fallbackTimestamps = []time.Time{} + } + + /* + triggerFirstNormalFailure: will trigger consensus group into fallback mode + old consensus group should be returned one time, and fallback group should be enabled + Fallback will be returned subsequent update + */ + triggerFirstNormalFailure := func() { + overridePeerCount("normal", 0) + update() + require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback")) + require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal")) + require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup())) + nodes["fallback"].mockBackend.Reset() + } + + t.Run("Test fallback Mode will not be exited, unless state changes", func(t *testing.T) { + reset() + triggerFirstNormalFailure() + for i := 0; i < 10; i++ { + update() + require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal")) + require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback")) + require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup())) + } + }) + + t.Run("Test Healthy mode will not be exited unless state changes", func(t *testing.T) { + reset() + for i := 0; i < 10; i++ { + update() + require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup())) + require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback")) + require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal")) + + _, statusCode, err := client.SendRPC("eth_getBlockByNumber", []interface{}{"0x101", false}) + + require.Equal(t, 200, statusCode) + require.Nil(t, err, "error not nil") + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + } + // TODO: Remove these, just here so compiler doesn't complain + overrideNotInSync("normal") + overrideBlock("normal", "safe", "0xb1") + overrideBlockHash("fallback", "0x102", "0x102", "wrong_hash") + }) + + t.Run("trigger normal failure, subsequent update return failover in consensus group, and fallback mode enabled", func(t *testing.T) { + reset() + triggerFirstNormalFailure() + update() + require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup())) + require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback")) + require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal")) + }) + + t.Run("trigger healthy -> fallback, update -> healthy", func(t *testing.T) { + reset() + update() + require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup())) + require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal")) + require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback")) + + triggerFirstNormalFailure() + update() + require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup())) + require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback")) + require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal")) + + overridePeerCount("normal", 5) + update() + require.Equal(t, 1, len(bg.Consensus.GetConsensusGroup())) + require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal")) + require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback")) + }) + + t.Run("Ensure fallback is not updated when in normal mode", func(t *testing.T) { + reset() + for i := 0; i < 10; i++ { + update() + ts := recordLastUpdates(bg.Backends) + normalTimestamps = append(normalTimestamps, ts[0]) + fallbackTimestamps = append(fallbackTimestamps, ts[1]) + + require.False(t, normalTimestamps[i].IsZero()) + require.True(t, fallbackTimestamps[i].IsZero()) + + require.True(t, containsNode(bg.Consensus.GetConsensusGroup(), "normal")) + require.False(t, containsNode(bg.Consensus.GetConsensusGroup(), "fallback")) + + // consensus at block 0x101 + require.Equal(t, "0x101", bg.Consensus.GetLatestBlockNumber().String()) + require.Equal(t, "0xe1", bg.Consensus.GetSafeBlockNumber().String()) + require.Equal(t, "0xc1", bg.Consensus.GetFinalizedBlockNumber().String()) + } + }) + + /* + Set Normal backend to Fail -> both backends should be updated + */ + t.Run("Ensure both nodes are quieried in fallback mode", func(t *testing.T) { + reset() + triggerFirstNormalFailure() + for i := 0; i < 10; i++ { + update() + ts := recordLastUpdates(bg.Backends) + normalTimestamps = append(normalTimestamps, ts[0]) + fallbackTimestamps = append(fallbackTimestamps, ts[1]) + + // Both Nodes should be updated again + require.False(t, normalTimestamps[i].IsZero()) + require.False(t, fallbackTimestamps[i].IsZero(), + fmt.Sprintf("Error: Fallback timestamp: %v was not queried on iteratio %d", fallbackTimestamps[i], i), + ) + if i > 0 { + require.Greater(t, normalTimestamps[i], normalTimestamps[i-1]) + require.Greater(t, fallbackTimestamps[i], fallbackTimestamps[i-1]) + } + } + }) + + t.Run("Ensure both nodes are quieried in fallback mode", func(t *testing.T) { + reset() + triggerFirstNormalFailure() + for i := 0; i < 10; i++ { + update() + ts := recordLastUpdates(bg.Backends) + normalTimestamps = append(normalTimestamps, ts[0]) + fallbackTimestamps = append(fallbackTimestamps, ts[1]) + + // Both Nodes should be updated again + require.False(t, normalTimestamps[i].IsZero()) + require.False(t, fallbackTimestamps[i].IsZero(), + fmt.Sprintf("Error: Fallback timestamp: %v was not queried on iteratio %d", fallbackTimestamps[i], i), + ) + if i > 0 { + require.Greater(t, normalTimestamps[i], normalTimestamps[i-1]) + require.Greater(t, fallbackTimestamps[i], fallbackTimestamps[i-1]) + } + } + }) + t.Run("Healthy -> Fallback -> Healthy with timestamps", func(t *testing.T) { + reset() + for i := 0; i < 10; i++ { + update() + ts := recordLastUpdates(bg.Backends) + normalTimestamps = append(normalTimestamps, ts[0]) + fallbackTimestamps = append(fallbackTimestamps, ts[1]) + + // Normal is queried, fallback is not + require.False(t, normalTimestamps[i].IsZero()) + require.True(t, fallbackTimestamps[i].IsZero(), + fmt.Sprintf("Error: Fallback timestamp: %v was not queried on iteratio %d", fallbackTimestamps[i], i), + ) + if i > 0 { + require.Greater(t, normalTimestamps[i], normalTimestamps[i-1]) + // Fallbacks should be zeros + require.Equal(t, fallbackTimestamps[i], fallbackTimestamps[i-1]) + } + } + + offset := 10 + triggerFirstNormalFailure() + for i := 0; i < 10; i++ { + update() + ts := recordLastUpdates(bg.Backends) + normalTimestamps = append(normalTimestamps, ts[0]) + fallbackTimestamps = append(fallbackTimestamps, ts[1]) + + // Both Nodes should be updated again + require.False(t, normalTimestamps[i+offset].IsZero()) + require.False(t, fallbackTimestamps[i+offset].IsZero()) + + require.Greater(t, normalTimestamps[i+offset], normalTimestamps[i+offset-1]) + require.Greater(t, fallbackTimestamps[i+offset], fallbackTimestamps[i+offset-1]) + } + + overridePeerCount("normal", 5) + offset = 20 + for i := 0; i < 10; i++ { + update() + ts := recordLastUpdates(bg.Backends) + normalTimestamps = append(normalTimestamps, ts[0]) + fallbackTimestamps = append(fallbackTimestamps, ts[1]) + + // Normal Node will be updated + require.False(t, normalTimestamps[i+offset].IsZero()) + require.Greater(t, normalTimestamps[i+offset], normalTimestamps[i+offset-1]) + + // fallback should not be updating + if offset+i > 21 { + require.Equal(t, fallbackTimestamps[i+offset], fallbackTimestamps[i+offset-1]) + } + } + }) +} diff --git a/proxyd/integration_tests/max_rpc_conns_test.go b/proxyd/integration_tests/max_rpc_conns_test.go new file mode 100644 index 0000000..5e23364 --- /dev/null +++ b/proxyd/integration_tests/max_rpc_conns_test.go @@ -0,0 +1,79 @@ +package integration_tests + +import ( + "net/http" + "net/http/httptest" + "os" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/stretchr/testify/require" +) + +func TestMaxConcurrentRPCs(t *testing.T) { + var ( + mu sync.Mutex + concurrentRPCs int + maxConcurrentRPCs int + ) + handler := func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + concurrentRPCs++ + if maxConcurrentRPCs < concurrentRPCs { + maxConcurrentRPCs = concurrentRPCs + } + mu.Unlock() + + time.Sleep(time.Second * 2) + BatchedResponseHandler(200, goodResponse)(w, r) + + mu.Lock() + concurrentRPCs-- + mu.Unlock() + } + // We don't use the MockBackend because it serializes requests to the handler + slowBackend := httptest.NewServer(http.HandlerFunc(handler)) + defer slowBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", slowBackend.URL)) + + config := ReadConfig("max_rpc_conns") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + type resWithCodeErr struct { + res []byte + code int + err error + } + resCh := make(chan *resWithCodeErr) + for i := 0; i < 3; i++ { + go func() { + res, code, err := client.SendRPC("eth_chainId", nil) + resCh <- &resWithCodeErr{ + res: res, + code: code, + err: err, + } + }() + } + res1 := <-resCh + res2 := <-resCh + res3 := <-resCh + + require.NoError(t, res1.err) + require.NoError(t, res2.err) + require.NoError(t, res3.err) + require.Equal(t, 200, res1.code) + require.Equal(t, 200, res2.code) + require.Equal(t, 200, res3.code) + RequireEqualJSON(t, []byte(goodResponse), res1.res) + RequireEqualJSON(t, []byte(goodResponse), res2.res) + RequireEqualJSON(t, []byte(goodResponse), res3.res) + + require.EqualValues(t, 2, maxConcurrentRPCs) +} diff --git a/proxyd/integration_tests/mock_backend_test.go b/proxyd/integration_tests/mock_backend_test.go new file mode 100644 index 0000000..bf45d03 --- /dev/null +++ b/proxyd/integration_tests/mock_backend_test.go @@ -0,0 +1,327 @@ +package integration_tests + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/gorilla/websocket" +) + +type RecordedRequest struct { + Method string + Headers http.Header + Body []byte +} + +type MockBackend struct { + handler http.Handler + server *httptest.Server + mtx sync.RWMutex + requests []*RecordedRequest +} + +func SingleResponseHandler(code int, response string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(code) + _, _ = w.Write([]byte(response)) + } +} + +func BatchedResponseHandler(code int, responses ...string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if len(responses) == 1 { + SingleResponseHandler(code, responses[0])(w, r) + return + } + + var body string + body += "[" + for i, response := range responses { + body += response + if i+1 < len(responses) { + body += "," + } + } + body += "]" + SingleResponseHandler(code, body)(w, r) + } +} + +type responseMapping struct { + result interface{} + calls int +} +type BatchRPCResponseRouter struct { + m map[string]map[string]*responseMapping + fallback map[string]interface{} + mtx sync.Mutex +} + +func NewBatchRPCResponseRouter() *BatchRPCResponseRouter { + return &BatchRPCResponseRouter{ + m: make(map[string]map[string]*responseMapping), + fallback: make(map[string]interface{}), + } +} + +func (h *BatchRPCResponseRouter) SetRoute(method string, id string, result interface{}) { + h.mtx.Lock() + defer h.mtx.Unlock() + + switch result.(type) { + case string: + case []string: + case nil: + break + default: + panic("invalid result type") + } + + m := h.m[method] + if m == nil { + m = make(map[string]*responseMapping) + } + m[id] = &responseMapping{result: result} + h.m[method] = m +} + +func (h *BatchRPCResponseRouter) SetFallbackRoute(method string, result interface{}) { + h.mtx.Lock() + defer h.mtx.Unlock() + + switch result.(type) { + case string: + case nil: + break + default: + panic("invalid result type") + } + + h.fallback[method] = result +} + +func (h *BatchRPCResponseRouter) GetNumCalls(method string, id string) int { + h.mtx.Lock() + defer h.mtx.Unlock() + + if m := h.m[method]; m != nil { + if rm := m[id]; rm != nil { + return rm.calls + } + } + return 0 +} + +func (h *BatchRPCResponseRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.mtx.Lock() + defer h.mtx.Unlock() + + body, err := io.ReadAll(r.Body) + if err != nil { + panic(err) + } + + if proxyd.IsBatch(body) { + batch, err := proxyd.ParseBatchRPCReq(body) + if err != nil { + panic(err) + } + out := make([]*proxyd.RPCRes, len(batch)) + for i := range batch { + req, err := proxyd.ParseRPCReq(batch[i]) + if err != nil { + panic(err) + } + + var result interface{} + var resultHasValue bool + + if mappings, exists := h.m[req.Method]; exists { + if rm := mappings[string(req.ID)]; rm != nil { + result = rm.result + resultHasValue = true + rm.calls++ + } + } + if !resultHasValue { + result, resultHasValue = h.fallback[req.Method] + } + if !resultHasValue { + w.WriteHeader(400) + return + } + + out[i] = &proxyd.RPCRes{ + JSONRPC: proxyd.JSONRPCVersion, + Result: result, + ID: req.ID, + } + } + if err := json.NewEncoder(w).Encode(out); err != nil { + panic(err) + } + return + } + + req, err := proxyd.ParseRPCReq(body) + if err != nil { + panic(err) + } + + var result interface{} + var resultHasValue bool + + if mappings, exists := h.m[req.Method]; exists { + if rm := mappings[string(req.ID)]; rm != nil { + result = rm.result + resultHasValue = true + rm.calls++ + } + } + if !resultHasValue { + result, resultHasValue = h.fallback[req.Method] + } + if !resultHasValue { + w.WriteHeader(400) + return + } + + out := &proxyd.RPCRes{ + JSONRPC: proxyd.JSONRPCVersion, + Result: result, + ID: req.ID, + } + enc := json.NewEncoder(w) + if err := enc.Encode(out); err != nil { + panic(err) + } +} + +func NewMockBackend(handler http.Handler) *MockBackend { + mb := &MockBackend{ + handler: handler, + } + mb.server = httptest.NewServer(http.HandlerFunc(mb.wrappedHandler)) + return mb +} + +func (m *MockBackend) URL() string { + return m.server.URL +} + +func (m *MockBackend) Close() { + m.server.Close() +} + +func (m *MockBackend) SetHandler(handler http.Handler) { + m.mtx.Lock() + m.handler = handler + m.mtx.Unlock() +} + +func (m *MockBackend) Reset() { + m.mtx.Lock() + m.requests = nil + m.mtx.Unlock() +} + +func (m *MockBackend) Requests() []*RecordedRequest { + m.mtx.RLock() + defer m.mtx.RUnlock() + out := make([]*RecordedRequest, len(m.requests)) + copy(out, m.requests) + return out +} + +func (m *MockBackend) wrappedHandler(w http.ResponseWriter, r *http.Request) { + m.mtx.Lock() + body, err := io.ReadAll(r.Body) + if err != nil { + panic(err) + } + clone := r.Clone(context.Background()) + clone.Body = io.NopCloser(bytes.NewReader(body)) + m.requests = append(m.requests, &RecordedRequest{ + Method: r.Method, + Headers: r.Header.Clone(), + Body: body, + }) + m.handler.ServeHTTP(w, clone) + m.mtx.Unlock() +} + +type MockWSBackend struct { + connCB MockWSBackendOnConnect + msgCB MockWSBackendOnMessage + closeCB MockWSBackendOnClose + server *httptest.Server + upgrader websocket.Upgrader + conns []*websocket.Conn + connsMu sync.Mutex +} + +type MockWSBackendOnConnect func(conn *websocket.Conn) +type MockWSBackendOnMessage func(conn *websocket.Conn, msgType int, data []byte) +type MockWSBackendOnClose func(conn *websocket.Conn, err error) + +func NewMockWSBackend( + connCB MockWSBackendOnConnect, + msgCB MockWSBackendOnMessage, + closeCB MockWSBackendOnClose, +) *MockWSBackend { + mb := &MockWSBackend{ + connCB: connCB, + msgCB: msgCB, + closeCB: closeCB, + } + mb.server = httptest.NewServer(mb) + return mb +} + +func (m *MockWSBackend) ServeHTTP(w http.ResponseWriter, r *http.Request) { + conn, err := m.upgrader.Upgrade(w, r, nil) + if err != nil { + panic(err) + } + if m.connCB != nil { + m.connCB(conn) + } + go func() { + for { + mType, msg, err := conn.ReadMessage() + if err != nil { + if m.closeCB != nil { + m.closeCB(conn, err) + } + return + } + if m.msgCB != nil { + m.msgCB(conn, mType, msg) + } + } + }() + m.connsMu.Lock() + m.conns = append(m.conns, conn) + m.connsMu.Unlock() +} + +func (m *MockWSBackend) URL() string { + return strings.Replace(m.server.URL, "http://", "ws://", 1) +} + +func (m *MockWSBackend) Close() { + m.server.Close() + + m.connsMu.Lock() + for _, conn := range m.conns { + conn.Close() + } + m.connsMu.Unlock() +} diff --git a/proxyd/integration_tests/rate_limit_test.go b/proxyd/integration_tests/rate_limit_test.go new file mode 100644 index 0000000..4e17f62 --- /dev/null +++ b/proxyd/integration_tests/rate_limit_test.go @@ -0,0 +1,170 @@ +package integration_tests + +import ( + "encoding/json" + "net/http" + "os" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/stretchr/testify/require" +) + +type resWithCode struct { + code int + res []byte +} + +const frontendOverLimitResponse = `{"error":{"code":-32016,"message":"over rate limit with special message"},"id":null,"jsonrpc":"2.0"}` +const frontendOverLimitResponseWithID = `{"error":{"code":-32016,"message":"over rate limit with special message"},"id":999,"jsonrpc":"2.0"}` + +var ethChainID = "eth_chainId" + +func TestFrontendMaxRPSLimit(t *testing.T) { + goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse)) + defer goodBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + + config := ReadConfig("frontend_rate_limit") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + t.Run("non-exempt over limit", func(t *testing.T) { + client := NewProxydClient("http://127.0.0.1:8545") + limitedRes, codes := spamReqs(t, client, ethChainID, 429, 3) + require.Equal(t, 1, codes[429]) + require.Equal(t, 2, codes[200]) + RequireEqualJSON(t, []byte(frontendOverLimitResponse), limitedRes) + }) + + t.Run("exempt user agent over limit", func(t *testing.T) { + h := make(http.Header) + h.Set("User-Agent", "exempt_agent") + client := NewProxydClientWithHeaders("http://127.0.0.1:8545", h) + _, codes := spamReqs(t, client, ethChainID, 429, 3) + require.Equal(t, 3, codes[200]) + }) + + t.Run("exempt origin over limit", func(t *testing.T) { + h := make(http.Header) + h.Set("Origin", "exempt_origin") + client := NewProxydClientWithHeaders("http://127.0.0.1:8545", h) + _, codes := spamReqs(t, client, ethChainID, 429, 3) + require.Equal(t, 3, codes[200]) + }) + + t.Run("multiple xff", func(t *testing.T) { + h1 := make(http.Header) + h1.Set("X-Forwarded-For", "0.0.0.0") + h2 := make(http.Header) + h2.Set("X-Forwarded-For", "1.1.1.1") + client1 := NewProxydClientWithHeaders("http://127.0.0.1:8545", h1) + client2 := NewProxydClientWithHeaders("http://127.0.0.1:8545", h2) + _, codes := spamReqs(t, client1, ethChainID, 429, 3) + require.Equal(t, 1, codes[429]) + require.Equal(t, 2, codes[200]) + _, code, err := client2.SendRPC(ethChainID, nil) + require.Equal(t, 200, code) + require.NoError(t, err) + time.Sleep(time.Second) + _, code, err = client2.SendRPC(ethChainID, nil) + require.Equal(t, 200, code) + require.NoError(t, err) + }) + + time.Sleep(time.Second) + + t.Run("RPC override", func(t *testing.T) { + client := NewProxydClient("http://127.0.0.1:8545") + limitedRes, codes := spamReqs(t, client, "eth_foobar", 429, 2) + // use 2 and 1 here since the limit for eth_foobar is 1 + require.Equal(t, 1, codes[429]) + require.Equal(t, 1, codes[200]) + RequireEqualJSON(t, []byte(frontendOverLimitResponseWithID), limitedRes) + }) + + time.Sleep(time.Second) + + t.Run("RPC override in batch", func(t *testing.T) { + client := NewProxydClient("http://127.0.0.1:8545") + req := NewRPCReq("123", "eth_foobar", nil) + out, code, err := client.SendBatchRPC(req, req, req) + require.NoError(t, err) + var res []proxyd.RPCRes + require.NoError(t, json.Unmarshal(out, &res)) + + expCode := proxyd.ErrOverRateLimit.Code + require.Equal(t, 200, code) + require.Equal(t, 3, len(res)) + require.Nil(t, res[0].Error) + require.Equal(t, expCode, res[1].Error.Code) + require.Equal(t, expCode, res[2].Error.Code) + }) + + time.Sleep(time.Second) + + t.Run("RPC override in batch exempt", func(t *testing.T) { + h := make(http.Header) + h.Set("User-Agent", "exempt_agent") + client := NewProxydClientWithHeaders("http://127.0.0.1:8545", h) + req := NewRPCReq("123", "eth_foobar", nil) + out, code, err := client.SendBatchRPC(req, req, req) + require.NoError(t, err) + var res []proxyd.RPCRes + require.NoError(t, json.Unmarshal(out, &res)) + + require.Equal(t, 200, code) + require.Equal(t, 3, len(res)) + require.Nil(t, res[0].Error) + require.Nil(t, res[1].Error) + require.Nil(t, res[2].Error) + }) + + time.Sleep(time.Second) + + t.Run("global RPC override", func(t *testing.T) { + h := make(http.Header) + h.Set("User-Agent", "exempt_agent") + client := NewProxydClientWithHeaders("http://127.0.0.1:8545", h) + limitedRes, codes := spamReqs(t, client, "eth_baz", 429, 2) + // use 1 and 1 here since the limit for eth_baz is 1 + require.Equal(t, 1, codes[429]) + require.Equal(t, 1, codes[200]) + RequireEqualJSON(t, []byte(frontendOverLimitResponseWithID), limitedRes) + }) +} + +func spamReqs(t *testing.T, client *ProxydHTTPClient, method string, limCode int, n int) ([]byte, map[int]int) { + resCh := make(chan *resWithCode) + for i := 0; i < n; i++ { + go func() { + res, code, err := client.SendRPC(method, nil) + require.NoError(t, err) + resCh <- &resWithCode{ + code: code, + res: res, + } + }() + } + + codes := make(map[int]int) + var limitedRes []byte + for i := 0; i < n; i++ { + res := <-resCh + code := res.code + if codes[code] == 0 { + codes[code] = 1 + } else { + codes[code] += 1 + } + + if code == limCode { + limitedRes = res.res + } + } + + return limitedRes, codes +} diff --git a/proxyd/integration_tests/sender_rate_limit_test.go b/proxyd/integration_tests/sender_rate_limit_test.go new file mode 100644 index 0000000..20c5f0a --- /dev/null +++ b/proxyd/integration_tests/sender_rate_limit_test.go @@ -0,0 +1,126 @@ +package integration_tests + +import ( + "bufio" + "fmt" + "math" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/stretchr/testify/require" +) + +const txHex1 = "0x02f8b28201a406849502f931849502f931830147f9948f3ddd0fbf3e78ca1d6c" + + "d17379ed88e261249b5280b84447e7ef2400000000000000000000000089c8b1" + + "b2774201bac50f627403eac1b732459cf7000000000000000000000000000000" + + "0000000000000000056bc75e2d63100000c080a0473c95566026c312c9664cd6" + + "1145d2f3e759d49209fe96011ac012884ec5b017a0763b58f6fa6096e6ba28ee" + + "08bfac58f58fb3b8bcef5af98578bdeaddf40bde42" + +const txHex2 = "0x02f8758201a48217fd84773594008504a817c80082520894be53e587975603" + + "a13d0923d0aa6d37c5233dd750865af3107a400080c080a04aefbd5819c35729" + + "138fe26b6ae1783ebf08d249b356c2f920345db97877f3f7a008d5ae92560a3c" + + "65f723439887205713af7ce7d7f6b24fba198f2afa03435867" + +const dummyRes = `{"id": 123, "jsonrpc": "2.0", "result": "dummy"}` + +const limRes = `{"error":{"code":-32017,"message":"sender is over rate limit"},"id":1,"jsonrpc":"2.0"}` + +func TestSenderRateLimitValidation(t *testing.T) { + goodBackend := NewMockBackend(SingleResponseHandler(200, dummyRes)) + defer goodBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + + config := ReadConfig("sender_rate_limit") + + // Don't perform rate limiting in this test since we're only testing + // validation. + config.SenderRateLimit.Limit = math.MaxInt + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + f, err := os.Open("testdata/testdata.txt") + require.NoError(t, err) + defer f.Close() + + scanner := bufio.NewScanner(f) + scanner.Scan() // skip header + for scanner.Scan() { + record := strings.Split(scanner.Text(), "|") + name, body, expResponseBody := record[0], record[1], record[2] + require.NoError(t, err) + t.Run(name, func(t *testing.T) { + res, _, err := client.SendRequest([]byte(body)) + require.NoError(t, err) + RequireEqualJSON(t, []byte(expResponseBody), res) + }) + } +} + +func TestSenderRateLimitLimiting(t *testing.T) { + goodBackend := NewMockBackend(SingleResponseHandler(200, dummyRes)) + defer goodBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + + config := ReadConfig("sender_rate_limit") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + // Two separate requests from the same sender + // should be rate limited. + res1, code1, err := client.SendRequest(makeSendRawTransaction(txHex1)) + require.NoError(t, err) + RequireEqualJSON(t, []byte(dummyRes), res1) + require.Equal(t, 200, code1) + res2, code2, err := client.SendRequest(makeSendRawTransaction(txHex1)) + require.NoError(t, err) + RequireEqualJSON(t, []byte(limRes), res2) + require.Equal(t, 429, code2) + + // Clear the limiter. + time.Sleep(1100 * time.Millisecond) + + // Two separate requests from different senders + // should not be rate limited. + res1, code1, err = client.SendRequest(makeSendRawTransaction(txHex1)) + require.NoError(t, err) + res2, code2, err = client.SendRequest(makeSendRawTransaction(txHex2)) + require.NoError(t, err) + RequireEqualJSON(t, []byte(dummyRes), res1) + require.Equal(t, 200, code1) + RequireEqualJSON(t, []byte(dummyRes), res2) + require.Equal(t, 200, code2) + + // Clear the limiter. + time.Sleep(1100 * time.Millisecond) + + // A batch request should rate limit within the batch itself. + batch := []byte(fmt.Sprintf( + `[%s, %s, %s]`, + makeSendRawTransaction(txHex1), + makeSendRawTransaction(txHex1), + makeSendRawTransaction(txHex2), + )) + res, code, err := client.SendRequest(batch) + require.NoError(t, err) + require.Equal(t, 200, code) + RequireEqualJSON(t, []byte(fmt.Sprintf( + `[%s, %s, %s]`, + dummyRes, + limRes, + dummyRes, + )), res) +} + +func makeSendRawTransaction(dataHex string) []byte { + return []byte(`{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["` + dataHex + `"],"id":1}`) +} diff --git a/proxyd/integration_tests/smoke_test.go b/proxyd/integration_tests/smoke_test.go new file mode 100644 index 0000000..5fed757 --- /dev/null +++ b/proxyd/integration_tests/smoke_test.go @@ -0,0 +1,51 @@ +package integration_tests + +import ( + "fmt" + "io" + "os" + "strings" + "testing" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestInitProxyd(t *testing.T) { + goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse)) + defer goodBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + + config := ReadConfig("smoke") + + sysStdOut := os.Stdout + r, w, err := os.Pipe() + require.NoError(t, err) + os.Stdout = w + + proxyd.SetLogLevel(log.LevelInfo) + + defer func() { + w.Close() + out, _ := io.ReadAll(r) + require.True(t, strings.Contains(string(out), "started proxyd")) + require.True(t, strings.Contains(string(out), "shutting down proxyd")) + fmt.Println(string(out)) + os.Stdout = sysStdOut + }() + + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + t.Run("initialization", func(t *testing.T) { + client := NewProxydClient("http://127.0.0.1:8545") + res, code, err := client.SendRPC(ethChainID, nil) + require.NoError(t, err) + require.Equal(t, 200, code) + require.NotNil(t, res) + }) + +} diff --git a/proxyd/integration_tests/testdata/batch_timeout.toml b/proxyd/integration_tests/testdata/batch_timeout.toml new file mode 100644 index 0000000..4238aaf --- /dev/null +++ b/proxyd/integration_tests/testdata/batch_timeout.toml @@ -0,0 +1,20 @@ +[server] +rpc_port = 8545 +timeout_seconds = 1 +max_upstream_batch_size = 1 + +[backend] +response_timeout_seconds = 1 +max_retries = 3 + +[backends] +[backends.slow] +rpc_url = "$SLOW_BACKEND_RPC_URL" +ws_url = "$SLOW_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["slow"] + +[rpc_method_mappings] +eth_chainId = "main" diff --git a/proxyd/integration_tests/testdata/batching.toml b/proxyd/integration_tests/testdata/batching.toml new file mode 100644 index 0000000..4762835 --- /dev/null +++ b/proxyd/integration_tests/testdata/batching.toml @@ -0,0 +1,23 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" +net_version = "main" +eth_call = "main" + +[batch] +error_message = "over batch size custom message" +max_size = 5 \ No newline at end of file diff --git a/proxyd/integration_tests/testdata/caching.toml b/proxyd/integration_tests/testdata/caching.toml new file mode 100644 index 0000000..41bc65b --- /dev/null +++ b/proxyd/integration_tests/testdata/caching.toml @@ -0,0 +1,36 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 + +[redis] +url = "$REDIS_URL" +namespace = "proxyd" + +[cache] +enabled = true + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" +net_version = "main" +eth_getBlockByNumber = "main" +eth_blockNumber = "main" +eth_call = "main" +eth_getBlockTransactionCountByHash = "main" +eth_getUncleCountByBlockHash = "main" +eth_getBlockByHash = "main" +eth_getTransactionByHash = "main" +eth_getTransactionByBlockHashAndIndex = "main" +eth_getUncleByBlockHashAndIndex = "main" +eth_getTransactionReceipt = "main" +debug_getRawReceipts = "main" diff --git a/proxyd/integration_tests/testdata/consensus.toml b/proxyd/integration_tests/testdata/consensus.toml new file mode 100644 index 0000000..bb13036 --- /dev/null +++ b/proxyd/integration_tests/testdata/consensus.toml @@ -0,0 +1,30 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 +max_degraded_latency_threshold = "30ms" + +[backends] +[backends.node1] +rpc_url = "$NODE1_URL" + +[backends.node2] +rpc_url = "$NODE2_URL" + +[backend_groups] +[backend_groups.node] +backends = ["node1", "node2"] +consensus_aware = true +consensus_handler = "noop" # allow more control over the consensus poller for tests +consensus_ban_period = "1m" +consensus_max_update_threshold = "2m" +consensus_max_block_lag = 8 +consensus_min_peer_count = 4 + +[rpc_method_mappings] +eth_call = "node" +eth_chainId = "node" +eth_blockNumber = "node" +eth_getBlockByNumber = "node" +consensus_getReceipts = "node" diff --git a/proxyd/integration_tests/testdata/consensus_responses.yml b/proxyd/integration_tests/testdata/consensus_responses.yml new file mode 100644 index 0000000..642c334 --- /dev/null +++ b/proxyd/integration_tests/testdata/consensus_responses.yml @@ -0,0 +1,234 @@ +- method: eth_chainId + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": "hello", + } +- method: net_peerCount + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": "0x10" + } +- method: eth_syncing + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": false + } +- method: eth_getBlockByNumber + block: latest + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x101", + "number": "0x101" + } + } +- method: eth_getBlockByNumber + block: 0x101 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x101", + "number": "0x101" + } + } +- method: eth_getBlockByNumber + block: 0x102 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x102", + "number": "0x102" + } + } +- method: eth_getBlockByNumber + block: 0x103 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x103", + "number": "0x103" + } + } +- method: eth_getBlockByNumber + block: 0x10a + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x10a", + "number": "0x10a" + } + } +- method: eth_getBlockByNumber + block: 0x132 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x132", + "number": "0x132" + } + } +- method: eth_getBlockByNumber + block: 0x133 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x133", + "number": "0x133" + } + } +- method: eth_getBlockByNumber + block: 0x134 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x134", + "number": "0x134" + } + } +- method: eth_getBlockByNumber + block: 0x200 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x200", + "number": "0x200" + } + } +- method: eth_getBlockByNumber + block: 0x91 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0x91", + "number": "0x91" + } + } +- method: eth_getBlockByNumber + block: safe + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0xe1", + "number": "0xe1" + } + } +- method: eth_getBlockByNumber + block: 0xe1 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0xe1", + "number": "0xe1" + } + } +- method: eth_getBlockByNumber + block: finalized + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0xc1", + "number": "0xc1" + } + } +- method: eth_getBlockByNumber + block: 0xc1 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0xc1", + "number": "0xc1" + } + } +- method: eth_getBlockByNumber + block: 0xd1 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash_0xd1", + "number": "0xd1" + } + } +- method: debug_getRawReceipts + block: 0x55 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "_": "debug_getRawReceipts" + } + } +- method: debug_getRawReceipts + block: 0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "_": "debug_getRawReceipts" + } + } +- method: debug_getRawReceipts + block: 0x101 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "_": "debug_getRawReceipts" + } + } +- method: eth_getTransactionReceipt + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "_": "eth_getTransactionReceipt" + } + } +- method: alchemy_getTransactionReceipts + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "_": "alchemy_getTransactionReceipts" + } + } diff --git a/proxyd/integration_tests/testdata/failover.toml b/proxyd/integration_tests/testdata/failover.toml new file mode 100644 index 0000000..80ff990 --- /dev/null +++ b/proxyd/integration_tests/testdata/failover.toml @@ -0,0 +1,20 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" +[backends.bad] +rpc_url = "$BAD_BACKEND_RPC_URL" +ws_url = "$BAD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["bad", "good"] + +[rpc_method_mappings] +eth_chainId = "main" \ No newline at end of file diff --git a/proxyd/integration_tests/testdata/fallback.toml b/proxyd/integration_tests/testdata/fallback.toml new file mode 100644 index 0000000..c801ca3 --- /dev/null +++ b/proxyd/integration_tests/testdata/fallback.toml @@ -0,0 +1,31 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 +max_degraded_latency_threshold = "30ms" + +[backends] +[backends.normal] +rpc_url = "$NODE1_URL" + +[backends.fallback] +rpc_url = "$NODE2_URL" + +[backend_groups] +[backend_groups.node] +backends = ["normal", "fallback"] +consensus_aware = true +consensus_handler = "noop" # allow more control over the consensus poller for tests +consensus_ban_period = "1m" +consensus_max_update_threshold = "2m" +consensus_max_block_lag = 8 +consensus_min_peer_count = 4 +fallbacks = ["fallback"] + +[rpc_method_mappings] +eth_call = "node" +eth_chainId = "node" +eth_blockNumber = "node" +eth_getBlockByNumber = "node" +consensus_getReceipts = "node" diff --git a/proxyd/integration_tests/testdata/frontend_rate_limit.toml b/proxyd/integration_tests/testdata/frontend_rate_limit.toml new file mode 100644 index 0000000..8aa9d19 --- /dev/null +++ b/proxyd/integration_tests/testdata/frontend_rate_limit.toml @@ -0,0 +1,35 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" +eth_foobar = "main" +eth_baz = "main" + +[rate_limit] +base_rate = 2 +base_interval = "1s" +exempt_origins = ["exempt_origin"] +exempt_user_agents = ["exempt_agent"] +error_message = "over rate limit with special message" + +[rate_limit.method_overrides.eth_foobar] +limit = 1 +interval = "1s" + +[rate_limit.method_overrides.eth_baz] +limit = 1 +interval = "1s" +global = true \ No newline at end of file diff --git a/proxyd/integration_tests/testdata/max_rpc_conns.toml b/proxyd/integration_tests/testdata/max_rpc_conns.toml new file mode 100644 index 0000000..68d7c19 --- /dev/null +++ b/proxyd/integration_tests/testdata/max_rpc_conns.toml @@ -0,0 +1,19 @@ +[server] +rpc_port = 8545 +max_concurrent_rpcs = 2 + +[backend] +# this should cover blocked requests due to max_concurrent_rpcs +response_timeout_seconds = 12 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" diff --git a/proxyd/integration_tests/testdata/out_of_service_interval.toml b/proxyd/integration_tests/testdata/out_of_service_interval.toml new file mode 100644 index 0000000..157fa06 --- /dev/null +++ b/proxyd/integration_tests/testdata/out_of_service_interval.toml @@ -0,0 +1,22 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 +max_retries = 1 +out_of_service_seconds = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" +[backends.bad] +rpc_url = "$BAD_BACKEND_RPC_URL" +ws_url = "$BAD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["bad", "good"] + +[rpc_method_mappings] +eth_chainId = "main" diff --git a/proxyd/integration_tests/testdata/retries.toml b/proxyd/integration_tests/testdata/retries.toml new file mode 100644 index 0000000..dc9466d --- /dev/null +++ b/proxyd/integration_tests/testdata/retries.toml @@ -0,0 +1,18 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 +max_retries = 3 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" \ No newline at end of file diff --git a/proxyd/integration_tests/testdata/sender_rate_limit.toml b/proxyd/integration_tests/testdata/sender_rate_limit.toml new file mode 100644 index 0000000..c99959d --- /dev/null +++ b/proxyd/integration_tests/testdata/sender_rate_limit.toml @@ -0,0 +1,24 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" +eth_sendRawTransaction = "main" + +[sender_rate_limit] +allowed_chain_ids = [0, 420] # adding 0 allows pre-EIP-155 transactions +enabled = true +interval = "1s" +limit = 1 diff --git a/proxyd/integration_tests/testdata/size_limits.toml b/proxyd/integration_tests/testdata/size_limits.toml new file mode 100644 index 0000000..bd4afab --- /dev/null +++ b/proxyd/integration_tests/testdata/size_limits.toml @@ -0,0 +1,21 @@ +whitelist_error_message = "rpc method is not whitelisted custom message" + +[server] +rpc_port = 8545 +max_request_body_size_bytes = 150 + +[backend] +response_timeout_seconds = 1 +max_response_size_bytes = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" \ No newline at end of file diff --git a/proxyd/integration_tests/testdata/smoke.toml b/proxyd/integration_tests/testdata/smoke.toml new file mode 100644 index 0000000..a2187a2 --- /dev/null +++ b/proxyd/integration_tests/testdata/smoke.toml @@ -0,0 +1,18 @@ +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" + diff --git a/proxyd/integration_tests/testdata/testdata.txt b/proxyd/integration_tests/testdata/testdata.txt new file mode 100644 index 0000000..4bdd635 --- /dev/null +++ b/proxyd/integration_tests/testdata/testdata.txt @@ -0,0 +1,14 @@ +name|body|responseBody +not json|not json|{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null} +not json-rpc|{"foo":"bar"}|{"jsonrpc":"2.0","error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null} +missing fields json-rpc|{"jsonrpc":"2.0"}|{"jsonrpc":"2.0","error":{"code":-32600,"message":"no method specified"},"id":null} +bad method json-rpc|{"jsonrpc":"2.0","method":"eth_notSendRawTransaction","id":1}|{"jsonrpc":"2.0","error":{"code":-32601,"message":"rpc method is not whitelisted"},"id":1} +no transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":[],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"missing value for required argument 0"},"id":1} +invalid transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0xf6806872fcc650ad4e77e0629206426cd183d751e9ddcc8d5e77"],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"rlp: value size exceeds available input length"},"id":1} +invalid transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x1234"],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"transaction type not supported"},"id":1} +valid transaction data - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8748201a415843b9aca31843b9aca3182520894f80267194936da1e98db10bce06f3147d580a62e880de0b6b3a764000080c001a0b50ee053102360ff5fedf0933b912b7e140c90fe57fa07a0cebe70dbd72339dda072974cb7bfe5c3dc54dde110e2b049408ccab8a879949c3b4d42a3a7555a618b"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"} +valid transaction data - contract call|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8b28201a406849502f931849502f931830147f9948f3ddd0fbf3e78ca1d6cd17379ed88e261249b5280b84447e7ef2400000000000000000000000089c8b1b2774201bac50f627403eac1b732459cf70000000000000000000000000000000000000000000000056bc75e2d63100000c080a0473c95566026c312c9664cd61145d2f3e759d49209fe96011ac012884ec5b017a0763b58f6fa6096e6ba28ee08bfac58f58fb3b8bcef5af98578bdeaddf40bde42"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"} +valid chain id - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8748201a415843b9aca31843b9aca3182520894f80267194936da1e98db10bce06f3147d580a62e880de0b6b3a764000080c001a0b50ee053102360ff5fedf0933b912b7e140c90fe57fa07a0cebe70dbd72339dda072974cb7bfe5c3dc54dde110e2b049408ccab8a879949c3b4d42a3a7555a618b"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"} +invalid chain id - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f87683ab41308217af84773594008504a817c80082520894be53e587975603a13d0923d0aa6d37c5233dd750865af3107a400080c001a04ae265f17e882b922d39f0f0cb058a6378df1dc89da8b8165ab6bc53851b426aa0682079486be2aa23bc7514477473362cc7d63afa12c99f7d8fb15e68d69d9a48"],"id":1}|{"jsonrpc":"2.0","error":{"code":-32000,"message":"invalid sender"},"id":1} +no chain id (pre eip-155) - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0xf865808609184e72a00082271094000000000000000000000000000000000000000001001ba0d937ddb66e7788f917864b8e6974cac376b091154db1c25ff8429a6e61016e74a054ced39349e7658b7efceccfabc461e02418eb510124377949cfae8ccf1831af"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"} +batch with mixed results|[{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f87683ab41308217af84773594008504a817c80082520894be53e587975603a13d0923d0aa6d37c5233dd750865af3107a400080c001a04ae265f17e882b922d39f0f0cb058a6378df1dc89da8b8165ab6bc53851b426aa0682079486be2aa23bc7514477473362cc7d63afa12c99f7d8fb15e68d69d9a48"],"id":1},{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8748201a415843b9aca31843b9aca3182520894f80267194936da1e98db10bce06f3147d580a62e880de0b6b3a764000080c001a0b50ee053102360ff5fedf0933b912b7e140c90fe57fa07a0cebe70dbd72339dda072974cb7bfe5c3dc54dde110e2b049408ccab8a879949c3b4d42a3a7555a618b"],"id":1},{"bad":"json"},{"jsonrpc":"2.0","method":"eth_fooTheBar","params":[],"id":123}]|[{"jsonrpc":"2.0","error":{"code":-32000,"message":"invalid sender"},"id":1},{"id": 123, "jsonrpc": "2.0", "result": "dummy"},{"jsonrpc":"2.0","error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null},{"jsonrpc":"2.0","error":{"code":-32601,"message":"rpc method is not whitelisted"},"id":123}] diff --git a/proxyd/integration_tests/testdata/whitelist.toml b/proxyd/integration_tests/testdata/whitelist.toml new file mode 100644 index 0000000..4a65248 --- /dev/null +++ b/proxyd/integration_tests/testdata/whitelist.toml @@ -0,0 +1,19 @@ +whitelist_error_message = "rpc method is not whitelisted custom message" + +[server] +rpc_port = 8545 + +[backend] +response_timeout_seconds = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" \ No newline at end of file diff --git a/proxyd/integration_tests/testdata/ws.toml b/proxyd/integration_tests/testdata/ws.toml new file mode 100644 index 0000000..4642e6b --- /dev/null +++ b/proxyd/integration_tests/testdata/ws.toml @@ -0,0 +1,28 @@ +whitelist_error_message = "rpc method is not whitelisted" + +ws_backend_group = "main" + +ws_method_whitelist = [ + "eth_subscribe", + "eth_accounts" +] + +[server] +rpc_port = 8545 +ws_port = 8546 + +[backend] +response_timeout_seconds = 1 + +[backends] +[backends.good] +rpc_url = "$GOOD_BACKEND_RPC_URL" +ws_url = "$GOOD_BACKEND_RPC_URL" +max_ws_conns = 1 + +[backend_groups] +[backend_groups.main] +backends = ["good"] + +[rpc_method_mappings] +eth_chainId = "main" diff --git a/proxyd/integration_tests/util_test.go b/proxyd/integration_tests/util_test.go new file mode 100644 index 0000000..36edce1 --- /dev/null +++ b/proxyd/integration_tests/util_test.go @@ -0,0 +1,191 @@ +package integration_tests + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slog" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/proxyd" +) + +type ProxydHTTPClient struct { + url string + headers http.Header +} + +func NewProxydClient(url string) *ProxydHTTPClient { + return NewProxydClientWithHeaders(url, make(http.Header)) +} + +func NewProxydClientWithHeaders(url string, headers http.Header) *ProxydHTTPClient { + clonedHeaders := headers.Clone() + clonedHeaders.Set("Content-Type", "application/json") + return &ProxydHTTPClient{ + url: url, + headers: clonedHeaders, + } +} + +func (p *ProxydHTTPClient) SendRPC(method string, params []interface{}) ([]byte, int, error) { + rpcReq := NewRPCReq("999", method, params) + body, err := json.Marshal(rpcReq) + if err != nil { + panic(err) + } + return p.SendRequest(body) +} + +func (p *ProxydHTTPClient) SendBatchRPC(reqs ...*proxyd.RPCReq) ([]byte, int, error) { + body, err := json.Marshal(reqs) + if err != nil { + panic(err) + } + return p.SendRequest(body) +} + +func (p *ProxydHTTPClient) SendRequest(body []byte) ([]byte, int, error) { + req, err := http.NewRequest("POST", p.url, bytes.NewReader(body)) + if err != nil { + panic(err) + } + req.Header = p.headers + + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, -1, err + } + defer res.Body.Close() + code := res.StatusCode + resBody, err := io.ReadAll(res.Body) + if err != nil { + panic(err) + } + return resBody, code, nil +} + +func RequireEqualJSON(t *testing.T, expected []byte, actual []byte) { + expJSON := canonicalizeJSON(t, expected) + actJSON := canonicalizeJSON(t, actual) + require.Equal(t, string(expJSON), string(actJSON)) +} + +func canonicalizeJSON(t *testing.T, in []byte) []byte { + var any interface{} + if in[0] == '[' { + any = make([]interface{}, 0) + } else { + any = make(map[string]interface{}) + } + + err := json.Unmarshal(in, &any) + require.NoError(t, err) + out, err := json.Marshal(any) + require.NoError(t, err) + return out +} + +func ReadConfig(name string) *proxyd.Config { + config := new(proxyd.Config) + _, err := toml.DecodeFile(fmt.Sprintf("testdata/%s.toml", name), config) + if err != nil { + panic(err) + } + return config +} + +func NewRPCReq(id string, method string, params []interface{}) *proxyd.RPCReq { + jsonParams, err := json.Marshal(params) + if err != nil { + panic(err) + } + + return &proxyd.RPCReq{ + JSONRPC: proxyd.JSONRPCVersion, + Method: method, + Params: jsonParams, + ID: []byte(id), + } +} + +type ProxydWSClient struct { + conn *websocket.Conn + msgCB ProxydWSClientOnMessage + closeCB ProxydWSClientOnClose +} + +type WSMessage struct { + Type int + Body []byte +} + +type ( + ProxydWSClientOnMessage func(msgType int, data []byte) + ProxydWSClientOnClose func(err error) +) + +func NewProxydWSClient( + url string, + msgCB ProxydWSClientOnMessage, + closeCB ProxydWSClientOnClose, +) (*ProxydWSClient, error) { + conn, _, err := websocket.DefaultDialer.Dial(url, nil) // nolint:bodyclose + if err != nil { + return nil, err + } + + c := &ProxydWSClient{ + conn: conn, + msgCB: msgCB, + closeCB: closeCB, + } + go c.readPump() + return c, nil +} + +func (h *ProxydWSClient) readPump() { + for { + mType, msg, err := h.conn.ReadMessage() + if err != nil { + if h.closeCB != nil { + h.closeCB(err) + } + return + } + if h.msgCB != nil { + h.msgCB(mType, msg) + } + } +} + +func (h *ProxydWSClient) HardClose() { + h.conn.Close() +} + +func (h *ProxydWSClient) SoftClose() error { + return h.WriteMessage(websocket.CloseMessage, nil) +} + +func (h *ProxydWSClient) WriteMessage(msgType int, msg []byte) error { + return h.conn.WriteMessage(msgType, msg) +} + +func (h *ProxydWSClient) WriteControlMessage(msgType int, msg []byte) error { + return h.conn.WriteControl(msgType, msg, time.Now().Add(time.Minute)) +} + +func InitLogger() { + log.SetDefault(log.NewLogger(slog.NewJSONHandler( + os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}))) +} diff --git a/proxyd/integration_tests/validation_test.go b/proxyd/integration_tests/validation_test.go new file mode 100644 index 0000000..95cfc29 --- /dev/null +++ b/proxyd/integration_tests/validation_test.go @@ -0,0 +1,258 @@ +package integration_tests + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/stretchr/testify/require" +) + +const ( + notWhitelistedResponse = `{"jsonrpc":"2.0","error":{"code":-32601,"message":"rpc method is not whitelisted custom message"},"id":999}` + parseErrResponse = `{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}` + invalidJSONRPCVersionResponse = `{"error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null,"jsonrpc":"2.0"}` + invalidIDResponse = `{"error":{"code":-32600,"message":"invalid ID"},"id":null,"jsonrpc":"2.0"}` + invalidMethodResponse = `{"error":{"code":-32600,"message":"no method specified"},"id":null,"jsonrpc":"2.0"}` + invalidBatchLenResponse = `{"error":{"code":-32600,"message":"must specify at least one batch call"},"id":null,"jsonrpc":"2.0"}` +) + +func TestSingleRPCValidation(t *testing.T) { + goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse)) + defer goodBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + + config := ReadConfig("whitelist") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + tests := []struct { + name string + body string + res string + code int + }{ + { + "body not JSON", + "this ain't an RPC call", + parseErrResponse, + 400, + }, + { + "body not RPC", + "{\"not\": \"rpc\"}", + invalidJSONRPCVersionResponse, + 400, + }, + { + "body missing RPC ID", + "{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}", + invalidIDResponse, + 400, + }, + { + "body has array ID", + "{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}", + invalidIDResponse, + 400, + }, + { + "body has object ID", + "{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}", + invalidIDResponse, + 400, + }, + { + "bad method", + "{\"jsonrpc\": \"2.0\", \"method\": 7, \"params\": [42, 23], \"id\": 1}", + parseErrResponse, + 400, + }, + { + "bad JSON-RPC", + "{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}", + invalidJSONRPCVersionResponse, + 400, + }, + { + "omitted method", + "{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}", + invalidMethodResponse, + 400, + }, + { + "not whitelisted method", + "{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}", + notWhitelistedResponse, + 403, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, code, err := client.SendRequest([]byte(tt.body)) + require.NoError(t, err) + RequireEqualJSON(t, []byte(tt.res), res) + require.Equal(t, tt.code, code) + require.Equal(t, 0, len(goodBackend.Requests())) + }) + } +} + +func TestBatchRPCValidation(t *testing.T) { + goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse)) + defer goodBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + + config := ReadConfig("whitelist") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + tests := []struct { + name string + body string + res string + code int + reqCount int + }{ + { + "empty batch", + "[]", + invalidBatchLenResponse, + 400, + 0, + }, + { + "bad json", + "[{,]", + parseErrResponse, + 400, + 0, + }, + { + "not object in batch", + "[123]", + asArray(parseErrResponse), + 200, + 0, + }, + { + "body not RPC", + "[{\"not\": \"rpc\"}]", + asArray(invalidJSONRPCVersionResponse), + 200, + 0, + }, + { + "body missing RPC ID", + "[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}]", + asArray(invalidIDResponse), + 200, + 0, + }, + { + "body has array ID", + "[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}]", + asArray(invalidIDResponse), + 200, + 0, + }, + { + "body has object ID", + "[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}]", + asArray(invalidIDResponse), + 200, + 0, + }, + // this happens because we can't deserialize the method into a non + // string value, and it blows up the parsing for the whole request. + { + "bad method", + "[{\"error\":{\"code\":-32600,\"message\":\"invalid request\"},\"id\":null,\"jsonrpc\":\"2.0\"}]", + asArray(invalidMethodResponse), + 200, + 0, + }, + { + "bad JSON-RPC", + "[{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}]", + asArray(invalidJSONRPCVersionResponse), + 200, + 0, + }, + { + "omitted method", + "[{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}]", + asArray(invalidMethodResponse), + 200, + 0, + }, + { + "not whitelisted method", + "[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}]", + asArray(notWhitelistedResponse), + 200, + 0, + }, + { + "mixed", + asArray( + "{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}", + "{\"jsonrpc\": \"2.0\", \"method\": \"eth_chainId\", \"params\": [], \"id\": 123}", + "123", + ), + asArray( + notWhitelistedResponse, + goodResponse, + parseErrResponse, + ), + 200, + 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, code, err := client.SendRequest([]byte(tt.body)) + require.NoError(t, err) + RequireEqualJSON(t, []byte(tt.res), res) + require.Equal(t, tt.code, code) + require.Equal(t, tt.reqCount, len(goodBackend.Requests())) + }) + } +} + +func TestSizeLimits(t *testing.T) { + goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse)) + defer goodBackend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL())) + + config := ReadConfig("size_limits") + client := NewProxydClient("http://127.0.0.1:8545") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + payload := strings.Repeat("barf", 1024*1024) + out, code, err := client.SendRequest([]byte(fmt.Sprintf(`{"jsonrpc": "2.0", "method": "eth_chainId", "params": [%s], "id": 1}`, payload))) + require.NoError(t, err) + require.Equal(t, `{"jsonrpc":"2.0","error":{"code":-32021,"message":"request body too large"},"id":null}`, strings.TrimSpace(string(out))) + require.Equal(t, 413, code) + + // The default response is already over the size limit in size_limits.toml. + out, code, err = client.SendRequest([]byte(`{"jsonrpc": "2.0", "method": "eth_chainId", "params": [], "id": 1}`)) + require.NoError(t, err) + require.Equal(t, `{"jsonrpc":"2.0","error":{"code":-32020,"message":"backend response too large"},"id":1}`, strings.TrimSpace(string(out))) + require.Equal(t, 500, code) +} + +func asArray(in ...string) string { + return "[" + strings.Join(in, ",") + "]" +} diff --git a/proxyd/integration_tests/ws_test.go b/proxyd/integration_tests/ws_test.go new file mode 100644 index 0000000..d52cfab --- /dev/null +++ b/proxyd/integration_tests/ws_test.go @@ -0,0 +1,241 @@ +package integration_tests + +import ( + "os" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/proxyd" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" +) + +type backendHandler struct { + msgCB atomic.Value + closeCB atomic.Value +} + +func (b *backendHandler) MsgCB(conn *websocket.Conn, msgType int, data []byte) { + cb := b.msgCB.Load() + if cb == nil { + return + } + cb.(MockWSBackendOnMessage)(conn, msgType, data) +} + +func (b *backendHandler) SetMsgCB(cb MockWSBackendOnMessage) { + b.msgCB.Store(cb) +} + +func (b *backendHandler) CloseCB(conn *websocket.Conn, err error) { + cb := b.closeCB.Load() + if cb == nil { + return + } + cb.(MockWSBackendOnClose)(conn, err) +} + +func (b *backendHandler) SetCloseCB(cb MockWSBackendOnClose) { + b.closeCB.Store(cb) +} + +type clientHandler struct { + msgCB atomic.Value +} + +func (c *clientHandler) MsgCB(msgType int, data []byte) { + cb := c.msgCB.Load().(ProxydWSClientOnMessage) + if cb == nil { + return + } + cb(msgType, data) +} + +func (c *clientHandler) SetMsgCB(cb ProxydWSClientOnMessage) { + c.msgCB.Store(cb) +} + +func TestWS(t *testing.T) { + backendHdlr := new(backendHandler) + clientHdlr := new(clientHandler) + + backend := NewMockWSBackend(nil, func(conn *websocket.Conn, msgType int, data []byte) { + backendHdlr.MsgCB(conn, msgType, data) + }, func(conn *websocket.Conn, err error) { + backendHdlr.CloseCB(conn, err) + }) + defer backend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL())) + + config := ReadConfig("ws") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + client, err := NewProxydWSClient("ws://127.0.0.1:8546", func(msgType int, data []byte) { + clientHdlr.MsgCB(msgType, data) + }, nil) + defer client.HardClose() + require.NoError(t, err) + defer shutdown() + + tests := []struct { + name string + backendRes string + expRes string + clientReq string + }{ + { + "ok response", + "{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":\"0xcd0c3e8af590364c09d0fa6a1210faf5\"}", + "{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":\"0xcd0c3e8af590364c09d0fa6a1210faf5\"}", + "{\"id\": 1, \"method\": \"eth_subscribe\", \"params\": [\"newHeads\"]}", + }, + { + "garbage backend response", + "gibblegabble", + "{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32013,\"message\":\"backend returned an invalid response\"},\"id\":null}", + "{\"id\": 1, \"method\": \"eth_subscribe\", \"params\": [\"newHeads\"]}", + }, + { + "blacklisted RPC", + "}", + "{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32601,\"message\":\"rpc method is not whitelisted\"},\"id\":1}", + "{\"id\": 1, \"method\": \"eth_whatever\", \"params\": []}", + }, + { + "garbage client request", + "{}", + "{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32700,\"message\":\"parse error\"},\"id\":null}", + "barf", + }, + { + "invalid client request", + "{}", + "{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32700,\"message\":\"parse error\"},\"id\":null}", + "{\"jsonrpc\": \"2.0\", \"method\": true}", + }, + { + "eth_accounts", + "{}", + "{\"jsonrpc\":\"2.0\",\"result\":[],\"id\":1}", + "{\"jsonrpc\": \"2.0\", \"method\": \"eth_accounts\", \"id\": 1}", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + timeout := time.NewTicker(10 * time.Second) + doneCh := make(chan struct{}, 1) + backendHdlr.SetMsgCB(func(conn *websocket.Conn, msgType int, data []byte) { + require.NoError(t, conn.WriteMessage(websocket.TextMessage, []byte(tt.backendRes))) + }) + clientHdlr.SetMsgCB(func(msgType int, data []byte) { + require.Equal(t, tt.expRes, string(data)) + doneCh <- struct{}{} + }) + require.NoError(t, client.WriteMessage( + websocket.TextMessage, + []byte(tt.clientReq), + )) + select { + case <-timeout.C: + t.Fatalf("timed out") + case <-doneCh: + return + } + }) + } +} + +func TestWSClientClosure(t *testing.T) { + backendHdlr := new(backendHandler) + clientHdlr := new(clientHandler) + + backend := NewMockWSBackend(nil, func(conn *websocket.Conn, msgType int, data []byte) { + backendHdlr.MsgCB(conn, msgType, data) + }, func(conn *websocket.Conn, err error) { + backendHdlr.CloseCB(conn, err) + }) + defer backend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL())) + + config := ReadConfig("ws") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + for _, closeType := range []string{"soft", "hard"} { + t.Run(closeType, func(t *testing.T) { + client, err := NewProxydWSClient("ws://127.0.0.1:8546", func(msgType int, data []byte) { + clientHdlr.MsgCB(msgType, data) + }, nil) + require.NoError(t, err) + + timeout := time.NewTicker(30 * time.Second) + doneCh := make(chan struct{}, 1) + backendHdlr.SetCloseCB(func(conn *websocket.Conn, err error) { + doneCh <- struct{}{} + }) + + if closeType == "soft" { + require.NoError(t, client.SoftClose()) + } else { + client.HardClose() + } + + select { + case <-timeout.C: + t.Fatalf("timed out") + case <-doneCh: + return + } + }) + } +} + +func TestWSClientExceedReadLimit(t *testing.T) { + backendHdlr := new(backendHandler) + clientHdlr := new(clientHandler) + + backend := NewMockWSBackend(nil, func(conn *websocket.Conn, msgType int, data []byte) { + backendHdlr.MsgCB(conn, msgType, data) + }, func(conn *websocket.Conn, err error) { + backendHdlr.CloseCB(conn, err) + }) + defer backend.Close() + + require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL())) + + config := ReadConfig("ws") + _, shutdown, err := proxyd.Start(config) + require.NoError(t, err) + defer shutdown() + + client, err := NewProxydWSClient("ws://127.0.0.1:8546", func(msgType int, data []byte) { + clientHdlr.MsgCB(msgType, data) + }, nil) + require.NoError(t, err) + + closed := false + originalHandler := client.conn.CloseHandler() + client.conn.SetCloseHandler(func(code int, text string) error { + closed = true + return originalHandler(code, text) + }) + + backendHdlr.SetMsgCB(func(conn *websocket.Conn, msgType int, data []byte) { + t.Fatalf("backend should not get the large message") + }) + + payload := strings.Repeat("barf", 1024*1024) + clientReq := "{\"id\": 1, \"method\": \"eth_subscribe\", \"params\": [\"" + payload + "\"]}" + err = client.WriteMessage( + websocket.TextMessage, + []byte(clientReq), + ) + require.Error(t, err) + require.True(t, closed) + +} diff --git a/proxyd/methods.go b/proxyd/methods.go new file mode 100644 index 0000000..08ea773 --- /dev/null +++ b/proxyd/methods.go @@ -0,0 +1,92 @@ +package proxyd + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/log" +) + +type RPCMethodHandler interface { + GetRPCMethod(context.Context, *RPCReq) (*RPCRes, error) + PutRPCMethod(context.Context, *RPCReq, *RPCRes) error +} + +type StaticMethodHandler struct { + cache Cache + m sync.RWMutex + filterGet func(*RPCReq) bool + filterPut func(*RPCReq, *RPCRes) bool +} + +func (e *StaticMethodHandler) key(req *RPCReq) string { + // signature is the hashed json.RawMessage param contents + h := sha256.New() + h.Write(req.Params) + signature := fmt.Sprintf("%x", h.Sum(nil)) + return strings.Join([]string{"cache", req.Method, signature}, ":") +} + +func (e *StaticMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) { + if e.cache == nil { + return nil, nil + } + if e.filterGet != nil && !e.filterGet(req) { + return nil, nil + } + + e.m.RLock() + defer e.m.RUnlock() + + key := e.key(req) + val, err := e.cache.Get(ctx, key) + if err != nil { + log.Error("error reading from cache", "key", key, "method", req.Method, "err", err) + return nil, err + } + if val == "" { + return nil, nil + } + + var result interface{} + if err := json.Unmarshal([]byte(val), &result); err != nil { + log.Error("error unmarshalling value from cache", "key", key, "method", req.Method, "err", err) + return nil, err + } + return &RPCRes{ + JSONRPC: req.JSONRPC, + Result: result, + ID: req.ID, + }, nil +} + +func (e *StaticMethodHandler) PutRPCMethod(ctx context.Context, req *RPCReq, res *RPCRes) error { + if e.cache == nil { + return nil + } + // if there is a filter on get, we don't want to cache it because its irretrievable + if e.filterGet != nil && !e.filterGet(req) { + return nil + } + // response filter + if e.filterPut != nil && !e.filterPut(req, res) { + return nil + } + + e.m.Lock() + defer e.m.Unlock() + + key := e.key(req) + value := mustMarshalJSON(res.Result) + + err := e.cache.Put(ctx, key, string(value)) + if err != nil { + log.Error("error putting into cache", "key", key, "method", req.Method, "err", err) + return err + } + return nil +} diff --git a/proxyd/metrics.go b/proxyd/metrics.go new file mode 100644 index 0000000..4046af0 --- /dev/null +++ b/proxyd/metrics.go @@ -0,0 +1,601 @@ +package proxyd + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +const ( + MetricsNamespace = "proxyd" + + RPCRequestSourceHTTP = "http" + RPCRequestSourceWS = "ws" + + BackendProxyd = "proxyd" + SourceClient = "client" + SourceBackend = "backend" + MethodUnknown = "unknown" +) + +var PayloadSizeBuckets = []float64{10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000} +var MillisecondDurationBuckets = []float64{1, 10, 50, 100, 500, 1000, 5000, 10000, 100000} + +var ( + rpcRequestsTotal = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "rpc_requests_total", + Help: "Count of total client RPC requests.", + }) + + rpcForwardsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "rpc_forwards_total", + Help: "Count of total RPC requests forwarded to each backend.", + }, []string{ + "auth", + "backend_name", + "method_name", + "source", + }) + + rpcBackendHTTPResponseCodesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "rpc_backend_http_response_codes_total", + Help: "Count of total backend responses by HTTP status code.", + }, []string{ + "auth", + "backend_name", + "method_name", + "status_code", + "batched", + }) + + rpcErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "rpc_errors_total", + Help: "Count of total RPC errors.", + }, []string{ + "auth", + "backend_name", + "method_name", + "error_code", + }) + + rpcSpecialErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "rpc_special_errors_total", + Help: "Count of total special RPC errors.", + }, []string{ + "auth", + "backend_name", + "method_name", + "error_type", + }) + + rpcBackendRequestDurationSumm = promauto.NewSummaryVec(prometheus.SummaryOpts{ + Namespace: MetricsNamespace, + Name: "rpc_backend_request_duration_seconds", + Help: "Summary of backend response times broken down by backend and method name.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001}, + }, []string{ + "backend_name", + "method_name", + "batched", + }) + + activeClientWsConnsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "active_client_ws_conns", + Help: "Gauge of active client WS connections.", + }, []string{ + "auth", + }) + + activeBackendWsConnsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "active_backend_ws_conns", + Help: "Gauge of active backend WS connections.", + }, []string{ + "backend_name", + }) + + unserviceableRequestsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "unserviceable_requests_total", + Help: "Count of total requests that were rejected due to no backends being available.", + }, []string{ + "auth", + "request_source", + }) + + httpResponseCodesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "http_response_codes_total", + Help: "Count of total HTTP response codes.", + }, []string{ + "status_code", + }) + + httpRequestDurationSumm = promauto.NewSummary(prometheus.SummaryOpts{ + Namespace: MetricsNamespace, + Name: "http_request_duration_seconds", + Help: "Summary of HTTP request durations, in seconds.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001}, + }) + + wsMessagesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "ws_messages_total", + Help: "Count of total websocket messages including protocol control.", + }, []string{ + "auth", + "backend_name", + "source", + }) + + redisErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "redis_errors_total", + Help: "Count of total Redis errors.", + }, []string{ + "source", + }) + + requestPayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: MetricsNamespace, + Name: "request_payload_sizes", + Help: "Histogram of client request payload sizes.", + Buckets: PayloadSizeBuckets, + }, []string{ + "auth", + }) + + responsePayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: MetricsNamespace, + Name: "response_payload_sizes", + Help: "Histogram of client response payload sizes.", + Buckets: PayloadSizeBuckets, + }, []string{ + "auth", + }) + + cacheHitsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "cache_hits_total", + Help: "Number of cache hits.", + }, []string{ + "method", + }) + + cacheMissesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "cache_misses_total", + Help: "Number of cache misses.", + }, []string{ + "method", + }) + + cacheErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "cache_errors_total", + Help: "Number of cache errors.", + }, []string{ + "method", + }) + + batchRPCShortCircuitsTotal = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "batch_rpc_short_circuits_total", + Help: "Count of total batch RPC short-circuits.", + }) + + rpcSpecialErrors = []string{ + "nonce too low", + "gas price too high", + "gas price too low", + "invalid parameters", + } + + redisCacheDurationSumm = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: MetricsNamespace, + Name: "redis_cache_duration_milliseconds", + Help: "Histogram of Redis command durations, in milliseconds.", + Buckets: MillisecondDurationBuckets, + }, []string{"command"}) + + tooManyRequestErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "too_many_request_errors_total", + Help: "Count of request timeouts due to too many concurrent RPCs.", + }, []string{ + "backend_name", + }) + + batchSizeHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: MetricsNamespace, + Name: "batch_size_summary", + Help: "Summary of batch sizes", + Buckets: []float64{ + 1, + 5, + 10, + 25, + 50, + 100, + }, + }) + + frontendRateLimitTakeErrors = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "rate_limit_take_errors", + Help: "Count of errors taking frontend rate limits", + }) + + consensusLatestBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_latest_block", + Help: "Consensus latest block", + }, []string{ + "backend_group_name", + }) + + consensusSafeBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_safe_block", + Help: "Consensus safe block", + }, []string{ + "backend_group_name", + }) + + consensusFinalizedBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_finalized_block", + Help: "Consensus finalized block", + }, []string{ + "backend_group_name", + }) + + consensusHAError = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_ha_error", + Help: "Consensus HA error count", + }, []string{ + "error", + }) + + consensusHALatestBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_ha_latest_block", + Help: "Consensus HA latest block", + }, []string{ + "backend_group_name", + "leader", + }) + + consensusHASafeBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_ha_safe_block", + Help: "Consensus HA safe block", + }, []string{ + "backend_group_name", + "leader", + }) + + consensusHAFinalizedBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_ha_finalized_block", + Help: "Consensus HA finalized block", + }, []string{ + "backend_group_name", + "leader", + }) + + backendLatestBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "backend_latest_block", + Help: "Current latest block observed per backend", + }, []string{ + "backend_name", + }) + + backendSafeBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "backend_safe_block", + Help: "Current safe block observed per backend", + }, []string{ + "backend_name", + }) + + backendFinalizedBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "backend_finalized_block", + Help: "Current finalized block observed per backend", + }, []string{ + "backend_name", + }) + + backendUnexpectedBlockTagsBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "backend_unexpected_block_tags", + Help: "Bool gauge for unexpected block tags", + }, []string{ + "backend_name", + }) + + consensusGroupCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_count", + Help: "Consensus group serving traffic count", + }, []string{ + "backend_group_name", + }) + + consensusGroupFilteredCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_filtered_count", + Help: "Consensus group filtered out from serving traffic count", + }, []string{ + "backend_group_name", + }) + + consensusGroupTotalCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "group_consensus_total_count", + Help: "Total count of candidates to be part of consensus group", + }, []string{ + "backend_group_name", + }) + + consensusBannedBackends = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "consensus_backend_banned", + Help: "Bool gauge for banned backends", + }, []string{ + "backend_name", + }) + + consensusPeerCountBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "consensus_backend_peer_count", + Help: "Peer count", + }, []string{ + "backend_name", + }) + + consensusInSyncBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "consensus_backend_in_sync", + Help: "Bool gauge for backends in sync", + }, []string{ + "backend_name", + }) + + consensusUpdateDelayBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "consensus_backend_update_delay", + Help: "Delay (ms) for backend update", + }, []string{ + "backend_name", + }) + + avgLatencyBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "backend_avg_latency", + Help: "Average latency per backend", + }, []string{ + "backend_name", + }) + + degradedBackends = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "backend_degraded", + Help: "Bool gauge for degraded backends", + }, []string{ + "backend_name", + }) + + networkErrorRateBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "backend_error_rate", + Help: "Request error rate per backend", + }, []string{ + "backend_name", + }) + + healthyPrimaryCandidates = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "healthy_candidates", + Help: "Record the number of healthy primary candidates", + }, []string{ + "backend_group_name", + }) + + backendGroupFallbackBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: MetricsNamespace, + Name: "backend_group_fallback_backenend", + Help: "Bool gauge for if a backend is a fallback for a backend group", + }, []string{ + "backend_group", + "backend_name", + "fallback", + }) +) + +func RecordRedisError(source string) { + redisErrorsTotal.WithLabelValues(source).Inc() +} + +func RecordRPCError(ctx context.Context, backendName, method string, err error) { + rpcErr, ok := err.(*RPCErr) + var code int + if ok { + MaybeRecordSpecialRPCError(ctx, backendName, method, rpcErr) + code = rpcErr.Code + } else { + code = -1 + } + + rpcErrorsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, strconv.Itoa(code)).Inc() +} + +func RecordWSMessage(ctx context.Context, backendName, source string) { + wsMessagesTotal.WithLabelValues(GetAuthCtx(ctx), backendName, source).Inc() +} + +func RecordUnserviceableRequest(ctx context.Context, source string) { + unserviceableRequestsTotal.WithLabelValues(GetAuthCtx(ctx), source).Inc() +} + +func RecordRPCForward(ctx context.Context, backendName, method, source string) { + rpcForwardsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, source).Inc() +} + +func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string, rpcErr *RPCErr) { + errMsg := strings.ToLower(rpcErr.Message) + for _, errStr := range rpcSpecialErrors { + if strings.Contains(errMsg, errStr) { + rpcSpecialErrorsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, errStr).Inc() + return + } + } +} + +func RecordRequestPayloadSize(ctx context.Context, payloadSize int) { + requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize)) +} + +func RecordResponsePayloadSize(ctx context.Context, payloadSize int) { + responsePayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize)) +} + +func RecordCacheHit(method string) { + cacheHitsTotal.WithLabelValues(method).Inc() +} + +func RecordCacheMiss(method string) { + cacheMissesTotal.WithLabelValues(method).Inc() +} + +func RecordCacheError(method string) { + cacheErrorsTotal.WithLabelValues(method).Inc() +} + +func RecordBatchSize(size int) { + batchSizeHistogram.Observe(float64(size)) +} + +var nonAlphanumericRegex = regexp.MustCompile(`[^a-zA-Z ]+`) + +func RecordGroupConsensusError(group *BackendGroup, label string, err error) { + errClean := nonAlphanumericRegex.ReplaceAllString(err.Error(), "") + errClean = strings.ReplaceAll(errClean, " ", "_") + errClean = strings.ReplaceAll(errClean, "__", "_") + label = fmt.Sprintf("%s.%s", label, errClean) + consensusHAError.WithLabelValues(label).Inc() +} + +func RecordGroupConsensusHALatestBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) { + consensusHALatestBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber)) +} + +func RecordGroupConsensusHASafeBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) { + consensusHASafeBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber)) +} + +func RecordGroupConsensusHAFinalizedBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) { + consensusHAFinalizedBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber)) +} + +func RecordGroupConsensusLatestBlock(group *BackendGroup, blockNumber hexutil.Uint64) { + consensusLatestBlock.WithLabelValues(group.Name).Set(float64(blockNumber)) +} + +func RecordGroupConsensusSafeBlock(group *BackendGroup, blockNumber hexutil.Uint64) { + consensusSafeBlock.WithLabelValues(group.Name).Set(float64(blockNumber)) +} + +func RecordGroupConsensusFinalizedBlock(group *BackendGroup, blockNumber hexutil.Uint64) { + consensusFinalizedBlock.WithLabelValues(group.Name).Set(float64(blockNumber)) +} + +func RecordGroupConsensusCount(group *BackendGroup, count int) { + consensusGroupCount.WithLabelValues(group.Name).Set(float64(count)) +} + +func RecordGroupConsensusFilteredCount(group *BackendGroup, count int) { + consensusGroupFilteredCount.WithLabelValues(group.Name).Set(float64(count)) +} + +func RecordGroupTotalCount(group *BackendGroup, count int) { + consensusGroupTotalCount.WithLabelValues(group.Name).Set(float64(count)) +} + +func RecordBackendLatestBlock(b *Backend, blockNumber hexutil.Uint64) { + backendLatestBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber)) +} + +func RecordBackendSafeBlock(b *Backend, blockNumber hexutil.Uint64) { + backendSafeBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber)) +} + +func RecordBackendFinalizedBlock(b *Backend, blockNumber hexutil.Uint64) { + backendFinalizedBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber)) +} + +func RecordBackendUnexpectedBlockTags(b *Backend, unexpected bool) { + backendUnexpectedBlockTagsBackend.WithLabelValues(b.Name).Set(boolToFloat64(unexpected)) +} + +func RecordConsensusBackendBanned(b *Backend, banned bool) { + consensusBannedBackends.WithLabelValues(b.Name).Set(boolToFloat64(banned)) +} + +func RecordHealthyCandidates(b *BackendGroup, candidates int) { + healthyPrimaryCandidates.WithLabelValues(b.Name).Set(float64(candidates)) +} + +func RecordConsensusBackendPeerCount(b *Backend, peerCount uint64) { + consensusPeerCountBackend.WithLabelValues(b.Name).Set(float64(peerCount)) +} + +func RecordConsensusBackendInSync(b *Backend, inSync bool) { + consensusInSyncBackend.WithLabelValues(b.Name).Set(boolToFloat64(inSync)) +} + +func RecordConsensusBackendUpdateDelay(b *Backend, lastUpdate time.Time) { + // avoid recording the delay for the first update + if lastUpdate.IsZero() { + return + } + delay := time.Since(lastUpdate) + consensusUpdateDelayBackend.WithLabelValues(b.Name).Set(float64(delay.Milliseconds())) +} + +func RecordBackendNetworkLatencyAverageSlidingWindow(b *Backend, avgLatency time.Duration) { + avgLatencyBackend.WithLabelValues(b.Name).Set(float64(avgLatency.Milliseconds())) + degradedBackends.WithLabelValues(b.Name).Set(boolToFloat64(b.IsDegraded())) +} + +func RecordBackendNetworkErrorRateSlidingWindow(b *Backend, rate float64) { + networkErrorRateBackend.WithLabelValues(b.Name).Set(rate) +} + +func RecordBackendGroupFallbacks(bg *BackendGroup, name string, fallback bool) { + backendGroupFallbackBackend.WithLabelValues(bg.Name, name, strconv.FormatBool(fallback)).Set(boolToFloat64(fallback)) +} + +func boolToFloat64(b bool) float64 { + if b { + return 1 + } + return 0 +} diff --git a/proxyd/pkg/avg-sliding-window/sliding.go b/proxyd/pkg/avg-sliding-window/sliding.go new file mode 100644 index 0000000..70c40be --- /dev/null +++ b/proxyd/pkg/avg-sliding-window/sliding.go @@ -0,0 +1,188 @@ +package avg_sliding_window + +import ( + "sync" + "time" + + lm "github.com/emirpasic/gods/maps/linkedhashmap" +) + +type Clock interface { + Now() time.Time +} + +// DefaultClock provides a clock that gets current time from the system time +type DefaultClock struct{} + +func NewDefaultClock() *DefaultClock { + return &DefaultClock{} +} +func (c DefaultClock) Now() time.Time { + return time.Now() +} + +// AdjustableClock provides a static clock to easily override the system time +type AdjustableClock struct { + now time.Time +} + +func NewAdjustableClock(now time.Time) *AdjustableClock { + return &AdjustableClock{now: now} +} +func (c *AdjustableClock) Now() time.Time { + return c.now +} +func (c *AdjustableClock) Set(now time.Time) { + c.now = now +} + +type bucket struct { + sum float64 + qty uint +} + +// AvgSlidingWindow calculates moving averages efficiently. +// Data points are rounded to nearest bucket of size `bucketSize`, +// and evicted when they are too old based on `windowLength` +type AvgSlidingWindow struct { + mux sync.Mutex + bucketSize time.Duration + windowLength time.Duration + clock Clock + buckets *lm.Map + qty uint + sum float64 +} + +type SlidingWindowOpts func(sw *AvgSlidingWindow) + +func NewSlidingWindow(opts ...SlidingWindowOpts) *AvgSlidingWindow { + sw := &AvgSlidingWindow{ + buckets: lm.New(), + } + for _, opt := range opts { + opt(sw) + } + if sw.bucketSize == 0 { + sw.bucketSize = time.Second + } + if sw.windowLength == 0 { + sw.windowLength = 5 * time.Minute + } + if sw.clock == nil { + sw.clock = NewDefaultClock() + } + return sw +} + +func WithWindowLength(windowLength time.Duration) SlidingWindowOpts { + return func(sw *AvgSlidingWindow) { + sw.windowLength = windowLength + } +} + +func WithBucketSize(bucketSize time.Duration) SlidingWindowOpts { + return func(sw *AvgSlidingWindow) { + sw.bucketSize = bucketSize + } +} + +func WithClock(clock Clock) SlidingWindowOpts { + return func(sw *AvgSlidingWindow) { + sw.clock = clock + } +} + +func (sw *AvgSlidingWindow) inWindow(t time.Time) bool { + now := sw.clock.Now().Round(sw.bucketSize) + windowStart := now.Add(-sw.windowLength) + return windowStart.Before(t) && !t.After(now) +} + +// Add inserts a new data point into the window, with value `val` and the current time +func (sw *AvgSlidingWindow) Add(val float64) { + t := sw.clock.Now() + sw.AddWithTime(t, val) +} + +// Incr is an alias to insert a data point with value float64(1) and the current time +func (sw *AvgSlidingWindow) Incr() { + sw.Add(1) +} + +// AddWithTime inserts a new data point into the window, with value `val` and time `t` +func (sw *AvgSlidingWindow) AddWithTime(t time.Time, val float64) { + sw.advance() + + defer sw.mux.Unlock() + sw.mux.Lock() + + key := t.Round(sw.bucketSize) + if !sw.inWindow(key) { + return + } + + var b *bucket + current, found := sw.buckets.Get(key) + if !found { + b = &bucket{} + } else { + b = current.(*bucket) + } + + // update bucket + bsum := b.sum + b.qty += 1 + b.sum = bsum + val + + // update window + wsum := sw.sum + sw.qty += 1 + sw.sum = wsum - bsum + b.sum + sw.buckets.Put(key, b) +} + +// advance evicts old data points +func (sw *AvgSlidingWindow) advance() { + defer sw.mux.Unlock() + sw.mux.Lock() + now := sw.clock.Now().Round(sw.bucketSize) + windowStart := now.Add(-sw.windowLength) + keys := sw.buckets.Keys() + for _, k := range keys { + if k.(time.Time).After(windowStart) { + break + } + val, _ := sw.buckets.Get(k) + b := val.(*bucket) + sw.buckets.Remove(k) + if sw.buckets.Size() > 0 { + sw.qty -= b.qty + sw.sum = sw.sum - b.sum + } else { + sw.qty = 0 + sw.sum = 0.0 + } + } +} + +// Avg retrieves the current average for the sliding window +func (sw *AvgSlidingWindow) Avg() float64 { + sw.advance() + if sw.qty == 0 { + return 0 + } + return sw.sum / float64(sw.qty) +} + +// Sum retrieves the current sum for the sliding window +func (sw *AvgSlidingWindow) Sum() float64 { + sw.advance() + return sw.sum +} + +// Count retrieves the data point count for the sliding window +func (sw *AvgSlidingWindow) Count() uint { + sw.advance() + return sw.qty +} diff --git a/proxyd/pkg/avg-sliding-window/sliding_test.go b/proxyd/pkg/avg-sliding-window/sliding_test.go new file mode 100644 index 0000000..37074db --- /dev/null +++ b/proxyd/pkg/avg-sliding-window/sliding_test.go @@ -0,0 +1,277 @@ +package avg_sliding_window + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestSlidingWindow_AddWithTime_Single(t *testing.T) { + now := ts("2023-04-21 15:04:05") + clock := NewAdjustableClock(now) + + sw := NewSlidingWindow( + WithWindowLength(10*time.Second), + WithBucketSize(time.Second), + WithClock(clock)) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 5) + require.Equal(t, 5.0, sw.Avg()) + require.Equal(t, 5.0, sw.Sum()) + require.Equal(t, 1, int(sw.Count())) + require.Equal(t, 1, sw.buckets.Size()) + require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 5.0, sw.buckets.Values()[0].(*bucket).sum) +} + +func TestSlidingWindow_AddWithTime_TwoValues_SameBucket(t *testing.T) { + now := ts("2023-04-21 15:04:05") + clock := NewAdjustableClock(now) + + sw := NewSlidingWindow( + WithWindowLength(10*time.Second), + WithBucketSize(time.Second), + WithClock(clock)) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 5) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 5) + require.Equal(t, 5.0, sw.Avg()) + require.Equal(t, 10.0, sw.Sum()) + require.Equal(t, 2, int(sw.Count())) + require.Equal(t, 1, sw.buckets.Size()) + require.Equal(t, 2, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 10.0, sw.buckets.Values()[0].(*bucket).sum) +} + +func TestSlidingWindow_AddWithTime_ThreeValues_SameBucket(t *testing.T) { + now := ts("2023-04-21 15:04:05") + clock := NewAdjustableClock(now) + + sw := NewSlidingWindow( + WithWindowLength(10*time.Second), + WithBucketSize(time.Second), + WithClock(clock)) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 4) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 5) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 6) + require.Equal(t, 5.0, sw.Avg()) + require.Equal(t, 15.0, sw.Sum()) + require.Equal(t, 3, int(sw.Count())) + require.Equal(t, 1, sw.buckets.Size()) + require.Equal(t, 15.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 3, int(sw.buckets.Values()[0].(*bucket).qty)) +} + +func TestSlidingWindow_AddWithTime_ThreeValues_ThreeBuckets(t *testing.T) { + now := ts("2023-04-21 15:04:05") + clock := NewAdjustableClock(now) + + sw := NewSlidingWindow( + WithWindowLength(10*time.Second), + WithBucketSize(time.Second), + WithClock(clock)) + sw.AddWithTime(ts("2023-04-21 15:04:01"), 4) + sw.AddWithTime(ts("2023-04-21 15:04:02"), 5) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 6) + require.Equal(t, 5.0, sw.Avg()) + require.Equal(t, 15.0, sw.Sum()) + require.Equal(t, 3, int(sw.Count())) + require.Equal(t, 3, sw.buckets.Size()) + require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 4.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty)) + require.Equal(t, 5.0, sw.buckets.Values()[1].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[2].(*bucket).qty)) + require.Equal(t, 6.0, sw.buckets.Values()[2].(*bucket).sum) +} + +func TestSlidingWindow_AddWithTime_OutWindow(t *testing.T) { + now := ts("2023-04-21 15:04:05") + clock := NewAdjustableClock(now) + + sw := NewSlidingWindow( + WithWindowLength(10*time.Second), + WithBucketSize(time.Second), + WithClock(clock)) + sw.AddWithTime(ts("2023-04-21 15:03:55"), 1000) + sw.AddWithTime(ts("2023-04-21 15:04:01"), 4) + sw.AddWithTime(ts("2023-04-21 15:04:02"), 5) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 6) + require.Equal(t, 5.0, sw.Avg()) + require.Equal(t, 15.0, sw.Sum()) + require.Equal(t, 3, int(sw.Count())) + require.Equal(t, 3, sw.buckets.Size()) + require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 4.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty)) + require.Equal(t, 5.0, sw.buckets.Values()[1].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[2].(*bucket).qty)) + require.Equal(t, 6.0, sw.buckets.Values()[2].(*bucket).sum) +} + +func TestSlidingWindow_AdvanceClock(t *testing.T) { + now := ts("2023-04-21 15:04:05") + clock := NewAdjustableClock(now) + + sw := NewSlidingWindow( + WithWindowLength(10*time.Second), + WithBucketSize(time.Second), + WithClock(clock)) + sw.AddWithTime(ts("2023-04-21 15:04:01"), 4) + sw.AddWithTime(ts("2023-04-21 15:04:02"), 5) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 6) + require.Equal(t, 5.0, sw.Avg()) + require.Equal(t, 15.0, sw.Sum()) + require.Equal(t, 3, int(sw.Count())) + require.Equal(t, 3, sw.buckets.Size()) + + require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 4.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty)) + require.Equal(t, 5.0, sw.buckets.Values()[1].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[2].(*bucket).qty)) + require.Equal(t, 6.0, sw.buckets.Values()[2].(*bucket).sum) + + // up until 15:04:05 we had 3 buckets + // let's advance the clock to 15:04:11 and the first data point should be evicted + clock.Set(ts("2023-04-21 15:04:11")) + require.Equal(t, 5.5, sw.Avg()) + require.Equal(t, 11.0, sw.Sum()) + require.Equal(t, 2, int(sw.Count())) + require.Equal(t, 2, sw.buckets.Size()) + require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 5.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty)) + require.Equal(t, 6.0, sw.buckets.Values()[1].(*bucket).sum) + + // let's advance the clock to 15:04:12 and another data point should be evicted + clock.Set(ts("2023-04-21 15:04:12")) + require.Equal(t, 6.0, sw.Avg()) + require.Equal(t, 6.0, sw.Sum()) + require.Equal(t, 1, int(sw.Count())) + require.Equal(t, 1, sw.buckets.Size()) + require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 6.0, sw.buckets.Values()[0].(*bucket).sum) + + // let's advance the clock to 15:04:25 and all data point should be evicted + clock.Set(ts("2023-04-21 15:04:25")) + require.Equal(t, 0.0, sw.Avg()) + require.Equal(t, 0.0, sw.Sum()) + require.Equal(t, 0, int(sw.Count())) + require.Equal(t, 0, sw.buckets.Size()) +} + +func TestSlidingWindow_MultipleValPerBucket(t *testing.T) { + now := ts("2023-04-21 15:04:05") + clock := NewAdjustableClock(now) + + sw := NewSlidingWindow( + WithWindowLength(10*time.Second), + WithBucketSize(time.Second), + WithClock(clock)) + sw.AddWithTime(ts("2023-04-21 15:04:01"), 4) + sw.AddWithTime(ts("2023-04-21 15:04:01"), 12) + sw.AddWithTime(ts("2023-04-21 15:04:02"), 5) + sw.AddWithTime(ts("2023-04-21 15:04:02"), 15) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 6) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 3) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 1) + sw.AddWithTime(ts("2023-04-21 15:04:05"), 3) + require.Equal(t, 6.125, sw.Avg()) + require.Equal(t, 49.0, sw.Sum()) + require.Equal(t, 8, int(sw.Count())) + require.Equal(t, 3, sw.buckets.Size()) + require.Equal(t, 2, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 16.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 2, int(sw.buckets.Values()[1].(*bucket).qty)) + require.Equal(t, 20.0, sw.buckets.Values()[1].(*bucket).sum) + require.Equal(t, 4, int(sw.buckets.Values()[2].(*bucket).qty)) + require.Equal(t, 13.0, sw.buckets.Values()[2].(*bucket).sum) + + // up until 15:04:05 we had 3 buckets + // let's advance the clock to 15:04:11 and the first data point should be evicted + clock.Set(ts("2023-04-21 15:04:11")) + require.Equal(t, 5.5, sw.Avg()) + require.Equal(t, 33.0, sw.Sum()) + require.Equal(t, 6, int(sw.Count())) + require.Equal(t, 2, sw.buckets.Size()) + require.Equal(t, 2, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 20.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 4, int(sw.buckets.Values()[1].(*bucket).qty)) + require.Equal(t, 13.0, sw.buckets.Values()[1].(*bucket).sum) + + // let's advance the clock to 15:04:12 and another data point should be evicted + clock.Set(ts("2023-04-21 15:04:12")) + require.Equal(t, 3.25, sw.Avg()) + require.Equal(t, 13.0, sw.Sum()) + require.Equal(t, 4, int(sw.Count())) + require.Equal(t, 1, sw.buckets.Size()) + require.Equal(t, 4, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 13.0, sw.buckets.Values()[0].(*bucket).sum) + + // let's advance the clock to 15:04:25 and all data point should be evicted + clock.Set(ts("2023-04-21 15:04:25")) + require.Equal(t, 0.0, sw.Avg()) + require.Equal(t, 0, sw.buckets.Size()) +} + +func TestSlidingWindow_CustomBucket(t *testing.T) { + now := ts("2023-04-21 15:04:05") + clock := NewAdjustableClock(now) + + sw := NewSlidingWindow( + WithWindowLength(30*time.Second), + WithBucketSize(10*time.Second), + WithClock(clock)) + sw.AddWithTime(ts("2023-04-21 15:03:49"), 5) // key: 03:50, sum: 5.0 + sw.AddWithTime(ts("2023-04-21 15:04:02"), 15) // key: 04:00 + sw.AddWithTime(ts("2023-04-21 15:04:03"), 5) // key: 04:00 + sw.AddWithTime(ts("2023-04-21 15:04:04"), 1) // key: 04:00, sum: 21.0 + sw.AddWithTime(ts("2023-04-21 15:04:05"), 3) // key: 04:10, sum: 3.0 + require.Equal(t, 5.8, sw.Avg()) + require.Equal(t, 29.0, sw.Sum()) + require.Equal(t, 5, int(sw.Count())) + require.Equal(t, 3, sw.buckets.Size()) + require.Equal(t, 5.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 21.0, sw.buckets.Values()[1].(*bucket).sum) + require.Equal(t, 3, int(sw.buckets.Values()[1].(*bucket).qty)) + require.Equal(t, 3.0, sw.buckets.Values()[2].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[2].(*bucket).qty)) + + // up until 15:04:05 we had 3 buckets + // let's advance the clock to 15:04:21 and the first data point should be evicted + clock.Set(ts("2023-04-21 15:04:21")) + require.Equal(t, 6.0, sw.Avg()) + require.Equal(t, 24.0, sw.Sum()) + require.Equal(t, 4, int(sw.Count())) + require.Equal(t, 2, sw.buckets.Size()) + require.Equal(t, 21.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 3, int(sw.buckets.Values()[0].(*bucket).qty)) + require.Equal(t, 3.0, sw.buckets.Values()[1].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[1].(*bucket).qty)) + + // let's advance the clock to 15:04:32 and another data point should be evicted + clock.Set(ts("2023-04-21 15:04:32")) + require.Equal(t, 3.0, sw.Avg()) + require.Equal(t, 3.0, sw.Sum()) + require.Equal(t, 1, sw.buckets.Size()) + require.Equal(t, 1, int(sw.Count())) + require.Equal(t, 3.0, sw.buckets.Values()[0].(*bucket).sum) + require.Equal(t, 1, int(sw.buckets.Values()[0].(*bucket).qty)) + + // let's advance the clock to 15:04:46 and all data point should be evicted + clock.Set(ts("2023-04-21 15:04:46")) + require.Equal(t, 0.0, sw.Avg()) + require.Equal(t, 0.0, sw.Sum()) + require.Equal(t, 0, int(sw.Count())) + require.Equal(t, 0, sw.buckets.Size()) +} + +// ts is a convenient method that must parse a time.Time from a string in format `"2006-01-02 15:04:05"` +func ts(s string) time.Time { + t, err := time.Parse(time.DateTime, s) + if err != nil { + panic(err) + } + return t +} diff --git a/proxyd/proxyd.go b/proxyd/proxyd.go new file mode 100644 index 0000000..402909b --- /dev/null +++ b/proxyd/proxyd.go @@ -0,0 +1,472 @@ +package proxyd + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "os" + "time" + + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/log" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/redis/go-redis/v9" + "golang.org/x/exp/slog" + "golang.org/x/sync/semaphore" +) + +func SetLogLevel(logLevel slog.Leveler) { + log.SetDefault(log.NewLogger(slog.NewJSONHandler( + os.Stdout, &slog.HandlerOptions{Level: logLevel}))) +} + +func Start(config *Config) (*Server, func(), error) { + if len(config.Backends) == 0 { + return nil, nil, errors.New("must define at least one backend") + } + if len(config.BackendGroups) == 0 { + return nil, nil, errors.New("must define at least one backend group") + } + if len(config.RPCMethodMappings) == 0 { + return nil, nil, errors.New("must define at least one RPC method mapping") + } + + for authKey := range config.Authentication { + if authKey == "none" { + return nil, nil, errors.New("cannot use none as an auth key") + } + } + + var redisClient *redis.Client + if config.Redis.URL != "" { + rURL, err := ReadFromEnvOrConfig(config.Redis.URL) + if err != nil { + return nil, nil, err + } + redisClient, err = NewRedisClient(rURL) + if err != nil { + return nil, nil, err + } + } + + if redisClient == nil && config.RateLimit.UseRedis { + return nil, nil, errors.New("must specify a Redis URL if UseRedis is true in rate limit config") + } + + // While modifying shared globals is a bad practice, the alternative + // is to clone these errors on every invocation. This is inefficient. + // We'd also have to make sure that errors.Is and errors.As continue + // to function properly on the cloned errors. + if config.RateLimit.ErrorMessage != "" { + ErrOverRateLimit.Message = config.RateLimit.ErrorMessage + } + if config.WhitelistErrorMessage != "" { + ErrMethodNotWhitelisted.Message = config.WhitelistErrorMessage + } + if config.BatchConfig.ErrorMessage != "" { + ErrTooManyBatchRequests.Message = config.BatchConfig.ErrorMessage + } + + if config.SenderRateLimit.Enabled { + if config.SenderRateLimit.Limit <= 0 { + return nil, nil, errors.New("limit in sender_rate_limit must be > 0") + } + if time.Duration(config.SenderRateLimit.Interval) < time.Second { + return nil, nil, errors.New("interval in sender_rate_limit must be >= 1s") + } + } + + maxConcurrentRPCs := config.Server.MaxConcurrentRPCs + if maxConcurrentRPCs == 0 { + maxConcurrentRPCs = math.MaxInt64 + } + rpcRequestSemaphore := semaphore.NewWeighted(maxConcurrentRPCs) + + backendNames := make([]string, 0) + backendsByName := make(map[string]*Backend) + for name, cfg := range config.Backends { + opts := make([]BackendOpt, 0) + + rpcURL, err := ReadFromEnvOrConfig(cfg.RPCURL) + if err != nil { + return nil, nil, err + } + wsURL, err := ReadFromEnvOrConfig(cfg.WSURL) + if err != nil { + return nil, nil, err + } + if rpcURL == "" { + return nil, nil, fmt.Errorf("must define an RPC URL for backend %s", name) + } + + if config.BackendOptions.ResponseTimeoutSeconds != 0 { + timeout := secondsToDuration(config.BackendOptions.ResponseTimeoutSeconds) + opts = append(opts, WithTimeout(timeout)) + } + if config.BackendOptions.MaxRetries != 0 { + opts = append(opts, WithMaxRetries(config.BackendOptions.MaxRetries)) + } + if config.BackendOptions.MaxResponseSizeBytes != 0 { + opts = append(opts, WithMaxResponseSize(config.BackendOptions.MaxResponseSizeBytes)) + } + if config.BackendOptions.OutOfServiceSeconds != 0 { + opts = append(opts, WithOutOfServiceDuration(secondsToDuration(config.BackendOptions.OutOfServiceSeconds))) + } + if config.BackendOptions.MaxDegradedLatencyThreshold > 0 { + opts = append(opts, WithMaxDegradedLatencyThreshold(time.Duration(config.BackendOptions.MaxDegradedLatencyThreshold))) + } + if config.BackendOptions.MaxLatencyThreshold > 0 { + opts = append(opts, WithMaxLatencyThreshold(time.Duration(config.BackendOptions.MaxLatencyThreshold))) + } + if config.BackendOptions.MaxErrorRateThreshold > 0 { + opts = append(opts, WithMaxErrorRateThreshold(config.BackendOptions.MaxErrorRateThreshold)) + } + if cfg.MaxRPS != 0 { + opts = append(opts, WithMaxRPS(cfg.MaxRPS)) + } + if cfg.MaxWSConns != 0 { + opts = append(opts, WithMaxWSConns(cfg.MaxWSConns)) + } + if cfg.Password != "" { + passwordVal, err := ReadFromEnvOrConfig(cfg.Password) + if err != nil { + return nil, nil, err + } + opts = append(opts, WithBasicAuth(cfg.Username, passwordVal)) + } + + headers := map[string]string{} + for headerName, headerValue := range cfg.Headers { + headerValue, err := ReadFromEnvOrConfig(headerValue) + if err != nil { + return nil, nil, err + } + + headers[headerName] = headerValue + } + opts = append(opts, WithHeaders(headers)) + + tlsConfig, err := configureBackendTLS(cfg) + if err != nil { + return nil, nil, err + } + if tlsConfig != nil { + log.Info("using custom TLS config for backend", "name", name) + opts = append(opts, WithTLSConfig(tlsConfig)) + } + if cfg.StripTrailingXFF { + opts = append(opts, WithStrippedTrailingXFF()) + } + opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP"))) + opts = append(opts, WithConsensusSkipPeerCountCheck(cfg.ConsensusSkipPeerCountCheck)) + opts = append(opts, WithConsensusForcedCandidate(cfg.ConsensusForcedCandidate)) + opts = append(opts, WithWeight(cfg.Weight)) + + receiptsTarget, err := ReadFromEnvOrConfig(cfg.ConsensusReceiptsTarget) + if err != nil { + return nil, nil, err + } + receiptsTarget, err = validateReceiptsTarget(receiptsTarget) + if err != nil { + return nil, nil, err + } + opts = append(opts, WithConsensusReceiptTarget(receiptsTarget)) + + back := NewBackend(name, rpcURL, wsURL, rpcRequestSemaphore, opts...) + backendNames = append(backendNames, name) + backendsByName[name] = back + log.Info("configured backend", + "name", name, + "backend_names", backendNames, + "rpc_url", rpcURL, + "ws_url", wsURL) + } + + backendGroups := make(map[string]*BackendGroup) + for bgName, bg := range config.BackendGroups { + backends := make([]*Backend, 0) + fallbackBackends := make(map[string]bool) + fallbackCount := 0 + for _, bName := range bg.Backends { + if backendsByName[bName] == nil { + return nil, nil, fmt.Errorf("backend %s is not defined", bName) + } + backends = append(backends, backendsByName[bName]) + + for _, fb := range bg.Fallbacks { + if bName == fb { + fallbackBackends[bName] = true + log.Info("configured backend as fallback", + "backend_name", bName, + "backend_group", bgName, + ) + fallbackCount++ + } + } + + if _, ok := fallbackBackends[bName]; !ok { + fallbackBackends[bName] = false + log.Info("configured backend as primary", + "backend_name", bName, + "backend_group", bgName, + ) + } + } + + if fallbackCount != len(bg.Fallbacks) { + return nil, nil, + fmt.Errorf( + "error: number of fallbacks instantiated (%d) did not match configured (%d) for backend group %s", + fallbackCount, len(bg.Fallbacks), bgName, + ) + } + + backendGroups[bgName] = &BackendGroup{ + Name: bgName, + Backends: backends, + WeightedRouting: bg.WeightedRouting, + FallbackBackends: fallbackBackends, + } + } + + var wsBackendGroup *BackendGroup + if config.WSBackendGroup != "" { + wsBackendGroup = backendGroups[config.WSBackendGroup] + if wsBackendGroup == nil { + return nil, nil, fmt.Errorf("ws backend group %s does not exist", config.WSBackendGroup) + } + } + + if wsBackendGroup == nil && config.Server.WSPort != 0 { + return nil, nil, fmt.Errorf("a ws port was defined, but no ws group was defined") + } + + for _, bg := range config.RPCMethodMappings { + if backendGroups[bg] == nil { + return nil, nil, fmt.Errorf("undefined backend group %s", bg) + } + } + + var resolvedAuth map[string]string + + if config.Authentication != nil { + resolvedAuth = make(map[string]string) + for secret, alias := range config.Authentication { + resolvedSecret, err := ReadFromEnvOrConfig(secret) + if err != nil { + return nil, nil, err + } + resolvedAuth[resolvedSecret] = alias + } + } + + var ( + cache Cache + rpcCache RPCCache + ) + if config.Cache.Enabled { + if redisClient == nil { + log.Warn("redis is not configured, using in-memory cache") + cache = newMemoryCache() + } else { + ttl := defaultCacheTtl + if config.Cache.TTL != 0 { + ttl = time.Duration(config.Cache.TTL) + } + cache = newRedisCache(redisClient, config.Redis.Namespace, ttl) + } + rpcCache = newRPCCache(newCacheWithCompression(cache)) + } + + srv, err := NewServer( + backendGroups, + wsBackendGroup, + NewStringSetFromStrings(config.WSMethodWhitelist), + config.RPCMethodMappings, + config.Server.MaxBodySizeBytes, + resolvedAuth, + secondsToDuration(config.Server.TimeoutSeconds), + config.Server.MaxUpstreamBatchSize, + config.Server.EnableXServedByHeader, + rpcCache, + config.RateLimit, + config.SenderRateLimit, + config.Server.EnableRequestLog, + config.Server.MaxRequestBodyLogLen, + config.BatchConfig.MaxSize, + redisClient, + ) + if err != nil { + return nil, nil, fmt.Errorf("error creating server: %w", err) + } + + // Enable to support browser websocket connections. + // See https://pkg.go.dev/github.com/gorilla/websocket#hdr-Origin_Considerations + if config.Server.AllowAllOrigins { + srv.upgrader.CheckOrigin = func(r *http.Request) bool { + return true + } + } + + if config.Metrics.Enabled { + addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port) + log.Info("starting metrics server", "addr", addr) + go func() { + if err := http.ListenAndServe(addr, promhttp.Handler()); err != nil { + log.Error("error starting metrics server", "err", err) + } + }() + } + + // To allow integration tests to cleanly come up, wait + // 10ms to give the below goroutines enough time to + // encounter an error creating their servers + errTimer := time.NewTimer(10 * time.Millisecond) + + if config.Server.RPCPort != 0 { + go func() { + if err := srv.RPCListenAndServe(config.Server.RPCHost, config.Server.RPCPort); err != nil { + if errors.Is(err, http.ErrServerClosed) { + log.Info("RPC server shut down") + return + } + log.Crit("error starting RPC server", "err", err) + } + }() + } + + if config.Server.WSPort != 0 { + go func() { + if err := srv.WSListenAndServe(config.Server.WSHost, config.Server.WSPort); err != nil { + if errors.Is(err, http.ErrServerClosed) { + log.Info("WS server shut down") + return + } + log.Crit("error starting WS server", "err", err) + } + }() + } else { + log.Info("WS server not enabled (ws_port is set to 0)") + } + + for bgName, bg := range backendGroups { + bgcfg := config.BackendGroups[bgName] + if bgcfg.ConsensusAware { + log.Info("creating poller for consensus aware backend_group", "name", bgName) + + copts := make([]ConsensusOpt, 0) + + if bgcfg.ConsensusAsyncHandler == "noop" { + copts = append(copts, WithAsyncHandler(NewNoopAsyncHandler())) + } + if bgcfg.ConsensusBanPeriod > 0 { + copts = append(copts, WithBanPeriod(time.Duration(bgcfg.ConsensusBanPeriod))) + } + if bgcfg.ConsensusMaxUpdateThreshold > 0 { + copts = append(copts, WithMaxUpdateThreshold(time.Duration(bgcfg.ConsensusMaxUpdateThreshold))) + } + if bgcfg.ConsensusMaxBlockLag > 0 { + copts = append(copts, WithMaxBlockLag(bgcfg.ConsensusMaxBlockLag)) + } + if bgcfg.ConsensusMinPeerCount > 0 { + copts = append(copts, WithMinPeerCount(uint64(bgcfg.ConsensusMinPeerCount))) + } + if bgcfg.ConsensusMaxBlockRange > 0 { + copts = append(copts, WithMaxBlockRange(bgcfg.ConsensusMaxBlockRange)) + } + if bgcfg.ConsensusPollerInterval > 0 { + copts = append(copts, WithPollerInterval(time.Duration(bgcfg.ConsensusPollerInterval))) + } + + for _, be := range bgcfg.Backends { + if fallback, ok := bg.FallbackBackends[be]; !ok { + log.Crit("error backend not found in backend fallback configurations", "backend_name", be) + } else { + log.Debug("configuring new backend for group", "backend_group", bgName, "backend_name", be, "fallback", fallback) + RecordBackendGroupFallbacks(bg, be, fallback) + } + } + + var tracker ConsensusTracker + if bgcfg.ConsensusHA { + if bgcfg.ConsensusHARedis.URL == "" { + log.Crit("must specify a consensus_ha_redis config when consensus_ha is true") + } + topts := make([]RedisConsensusTrackerOpt, 0) + if bgcfg.ConsensusHALockPeriod > 0 { + topts = append(topts, WithLockPeriod(time.Duration(bgcfg.ConsensusHALockPeriod))) + } + if bgcfg.ConsensusHAHeartbeatInterval > 0 { + topts = append(topts, WithHeartbeatInterval(time.Duration(bgcfg.ConsensusHAHeartbeatInterval))) + } + consensusHARedisClient, err := NewRedisClient(bgcfg.ConsensusHARedis.URL) + if err != nil { + return nil, nil, err + } + ns := fmt.Sprintf("%s:%s", bgcfg.ConsensusHARedis.Namespace, bg.Name) + tracker = NewRedisConsensusTracker(context.Background(), consensusHARedisClient, bg, ns, topts...) + copts = append(copts, WithTracker(tracker)) + } + + cp := NewConsensusPoller(bg, copts...) + bg.Consensus = cp + + if bgcfg.ConsensusHA { + tracker.(*RedisConsensusTracker).Init() + } + } + } + + <-errTimer.C + log.Info("started proxyd") + + shutdownFunc := func() { + log.Info("shutting down proxyd") + srv.Shutdown() + log.Info("goodbye") + } + + return srv, shutdownFunc, nil +} + +func validateReceiptsTarget(val string) (string, error) { + if val == "" { + val = ReceiptsTargetDebugGetRawReceipts + } + switch val { + case ReceiptsTargetDebugGetRawReceipts, + ReceiptsTargetAlchemyGetTransactionReceipts, + ReceiptsTargetEthGetTransactionReceipts, + ReceiptsTargetParityGetTransactionReceipts: + return val, nil + default: + return "", fmt.Errorf("invalid receipts target: %s", val) + } +} + +func secondsToDuration(seconds int) time.Duration { + return time.Duration(seconds) * time.Second +} + +func configureBackendTLS(cfg *BackendConfig) (*tls.Config, error) { + if cfg.CAFile == "" { + return nil, nil + } + + tlsConfig, err := CreateTLSClient(cfg.CAFile) + if err != nil { + return nil, err + } + + if cfg.ClientCertFile != "" && cfg.ClientKeyFile != "" { + cert, err := ParseKeyPair(cfg.ClientCertFile, cfg.ClientKeyFile) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} diff --git a/proxyd/reader.go b/proxyd/reader.go new file mode 100644 index 0000000..b16301f --- /dev/null +++ b/proxyd/reader.go @@ -0,0 +1,32 @@ +package proxyd + +import ( + "errors" + "io" +) + +var ErrLimitReaderOverLimit = errors.New("over read limit") + +func LimitReader(r io.Reader, n int64) io.Reader { return &LimitedReader{r, n} } + +// A LimitedReader reads from R but limits the amount of +// data returned to just N bytes. Each call to Read +// updates N to reflect the new amount remaining. +// Unlike the standard library version, Read returns +// ErrLimitReaderOverLimit when N <= 0. +type LimitedReader struct { + R io.Reader // underlying reader + N int64 // max bytes remaining +} + +func (l *LimitedReader) Read(p []byte) (int, error) { + if l.N <= 0 { + return 0, ErrLimitReaderOverLimit + } + if int64(len(p)) > l.N { + p = p[0:l.N] + } + n, err := l.R.Read(p) + l.N -= int64(n) + return n, err +} diff --git a/proxyd/reader_test.go b/proxyd/reader_test.go new file mode 100644 index 0000000..396b9bf --- /dev/null +++ b/proxyd/reader_test.go @@ -0,0 +1,44 @@ +package proxyd + +import ( + "io" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLimitReader(t *testing.T) { + data := "hellohellohellohello" + r := LimitReader(strings.NewReader(data), 3) + buf := make([]byte, 3) + + // Buffer reads OK + n, err := r.Read(buf) + require.NoError(t, err) + require.Equal(t, 3, n) + + // Buffer is over limit + n, err = r.Read(buf) + require.Equal(t, ErrLimitReaderOverLimit, err) + require.Equal(t, 0, n) + + // Buffer on initial read is over size + buf = make([]byte, 16) + r = LimitReader(strings.NewReader(data), 3) + n, err = r.Read(buf) + require.NoError(t, err) + require.Equal(t, 3, n) + + // test with read all where the limit is less than the data + r = LimitReader(strings.NewReader(data), 3) + out, err := io.ReadAll(r) + require.Equal(t, ErrLimitReaderOverLimit, err) + require.Equal(t, "hel", string(out)) + + // test with read all where the limit is more than the data + r = LimitReader(strings.NewReader(data), 21) + out, err = io.ReadAll(r) + require.NoError(t, err) + require.Equal(t, data, string(out)) +} diff --git a/proxyd/redis.go b/proxyd/redis.go new file mode 100644 index 0000000..bd15f52 --- /dev/null +++ b/proxyd/redis.go @@ -0,0 +1,22 @@ +package proxyd + +import ( + "context" + "time" + + "github.com/redis/go-redis/v9" +) + +func NewRedisClient(url string) (*redis.Client, error) { + opts, err := redis.ParseURL(url) + if err != nil { + return nil, err + } + client := redis.NewClient(opts) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := client.Ping(ctx).Err(); err != nil { + return nil, wrapErr(err, "error connecting to redis") + } + return client, nil +} diff --git a/proxyd/rewriter.go b/proxyd/rewriter.go new file mode 100644 index 0000000..605787e --- /dev/null +++ b/proxyd/rewriter.go @@ -0,0 +1,310 @@ +package proxyd + +import ( + "encoding/json" + "errors" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" +) + +type RewriteContext struct { + latest hexutil.Uint64 + safe hexutil.Uint64 + finalized hexutil.Uint64 + maxBlockRange uint64 +} + +type RewriteResult uint8 + +const ( + // RewriteNone means request should be forwarded as-is + RewriteNone RewriteResult = iota + + // RewriteOverrideError means there was an error attempting to rewrite + RewriteOverrideError + + // RewriteOverrideRequest means the modified request should be forwarded to the backend + RewriteOverrideRequest + + // RewriteOverrideResponse means to skip calling the backend and serve the overridden response + RewriteOverrideResponse +) + +var ( + ErrRewriteBlockOutOfRange = errors.New("block is out of range") + ErrRewriteRangeTooLarge = errors.New("block range is too large") +) + +// RewriteTags modifies the request and the response based on block tags +func RewriteTags(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) { + rw, err := RewriteResponse(rctx, req, res) + if rw == RewriteOverrideResponse { + return rw, err + } + return RewriteRequest(rctx, req, res) +} + +// RewriteResponse modifies the response object to comply with the rewrite context +// after the method has been called at the backend +// RewriteResult informs the decision of the rewrite +func RewriteResponse(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) { + switch req.Method { + case "eth_blockNumber": + res.Result = rctx.latest + return RewriteOverrideResponse, nil + } + return RewriteNone, nil +} + +// RewriteRequest modifies the request object to comply with the rewrite context +// before the method has been called at the backend +// it returns false if nothing was changed +func RewriteRequest(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) { + switch req.Method { + case "eth_getLogs", + "eth_newFilter": + return rewriteRange(rctx, req, res, 0) + case "debug_getRawReceipts", "consensus_getReceipts": + return rewriteParam(rctx, req, res, 0, true, false) + case "eth_getBalance", + "eth_getCode", + "eth_getTransactionCount", + "eth_call": + return rewriteParam(rctx, req, res, 1, false, true) + case "eth_getStorageAt", + "eth_getProof": + return rewriteParam(rctx, req, res, 2, false, true) + case "eth_getBlockTransactionCountByNumber", + "eth_getUncleCountByBlockNumber", + "eth_getBlockByNumber", + "eth_getTransactionByBlockNumberAndIndex", + "eth_getUncleByBlockNumberAndIndex": + return rewriteParam(rctx, req, res, 0, false, false) + } + return RewriteNone, nil +} + +func rewriteParam(rctx RewriteContext, req *RPCReq, res *RPCRes, pos int, required bool, blockNrOrHash bool) (RewriteResult, error) { + var p []interface{} + err := json.Unmarshal(req.Params, &p) + if err != nil { + return RewriteOverrideError, err + } + + // we assume latest if the param is missing, + // and we don't rewrite if there is not enough params + if len(p) == pos && !required { + p = append(p, "latest") + } else if len(p) <= pos { + return RewriteNone, nil + } + + // support for https://eips.ethereum.org/EIPS/eip-1898 + var val interface{} + var rw bool + if blockNrOrHash { + bnh, err := remarshalBlockNumberOrHash(p[pos]) + if err != nil { + // fallback to string + s, ok := p[pos].(string) + if ok { + val, rw, err = rewriteTag(rctx, s) + if err != nil { + return RewriteOverrideError, err + } + } else { + return RewriteOverrideError, errors.New("expected BlockNumberOrHash or string") + } + } else { + val, rw, err = rewriteTagBlockNumberOrHash(rctx, bnh) + if err != nil { + return RewriteOverrideError, err + } + } + } else { + s, ok := p[pos].(string) + if !ok { + return RewriteOverrideError, errors.New("expected string") + } + + val, rw, err = rewriteTag(rctx, s) + if err != nil { + return RewriteOverrideError, err + } + } + + if rw { + p[pos] = val + paramRaw, err := json.Marshal(p) + if err != nil { + return RewriteOverrideError, err + } + req.Params = paramRaw + return RewriteOverrideRequest, nil + } + return RewriteNone, nil +} + +func rewriteRange(rctx RewriteContext, req *RPCReq, res *RPCRes, pos int) (RewriteResult, error) { + var p []map[string]interface{} + err := json.Unmarshal(req.Params, &p) + if err != nil { + return RewriteOverrideError, err + } + + // if either fromBlock or toBlock is defined, default the other to "latest" if unset + _, hasFrom := p[pos]["fromBlock"] + _, hasTo := p[pos]["toBlock"] + if hasFrom && !hasTo { + p[pos]["toBlock"] = "latest" + } else if hasTo && !hasFrom { + p[pos]["fromBlock"] = "latest" + } + + modifiedFrom, err := rewriteTagMap(rctx, p[pos], "fromBlock") + if err != nil { + return RewriteOverrideError, err + } + + modifiedTo, err := rewriteTagMap(rctx, p[pos], "toBlock") + if err != nil { + return RewriteOverrideError, err + } + + if rctx.maxBlockRange > 0 && (hasFrom || hasTo) { + from, err := blockNumber(p[pos], "fromBlock", uint64(rctx.latest)) + if err != nil { + return RewriteOverrideError, err + } + to, err := blockNumber(p[pos], "toBlock", uint64(rctx.latest)) + if err != nil { + return RewriteOverrideError, err + } + if to-from > rctx.maxBlockRange { + return RewriteOverrideError, ErrRewriteRangeTooLarge + } + } + + // if any of the fields the request have been changed, re-marshal the params + if modifiedFrom || modifiedTo { + paramsRaw, err := json.Marshal(p) + req.Params = paramsRaw + if err != nil { + return RewriteOverrideError, err + } + return RewriteOverrideRequest, nil + } + + return RewriteNone, nil +} + +func blockNumber(m map[string]interface{}, key string, latest uint64) (uint64, error) { + current, ok := m[key].(string) + if !ok { + return 0, errors.New("expected string") + } + // the latest/safe/finalized tags are already replaced by rewriteTag + if current == "earliest" { + return 0, nil + } + if current == "pending" { + return latest + 1, nil + } + return hexutil.DecodeUint64(current) +} + +func rewriteTagMap(rctx RewriteContext, m map[string]interface{}, key string) (bool, error) { + if m[key] == nil || m[key] == "" { + return false, nil + } + + current, ok := m[key].(string) + if !ok { + return false, errors.New("expected string") + } + + val, rw, err := rewriteTag(rctx, current) + if err != nil { + return false, err + } + if rw { + m[key] = val + return true, nil + } + + return false, nil +} + +func remarshalBlockNumberOrHash(current interface{}) (*rpc.BlockNumberOrHash, error) { + jv, err := json.Marshal(current) + if err != nil { + return nil, err + } + + var bnh rpc.BlockNumberOrHash + err = bnh.UnmarshalJSON(jv) + if err != nil { + return nil, err + } + + return &bnh, nil +} + +func rewriteTag(rctx RewriteContext, current string) (string, bool, error) { + bnh, err := remarshalBlockNumberOrHash(current) + if err != nil { + return "", false, err + } + + // this is a hash, not a block + if bnh.BlockNumber == nil { + return current, false, nil + } + + switch *bnh.BlockNumber { + case rpc.PendingBlockNumber, + rpc.EarliestBlockNumber: + return current, false, nil + case rpc.FinalizedBlockNumber: + return rctx.finalized.String(), true, nil + case rpc.SafeBlockNumber: + return rctx.safe.String(), true, nil + case rpc.LatestBlockNumber: + return rctx.latest.String(), true, nil + default: + if bnh.BlockNumber.Int64() > int64(rctx.latest) { + return "", false, ErrRewriteBlockOutOfRange + } + } + + return current, false, nil +} + +func rewriteTagBlockNumberOrHash(rctx RewriteContext, current *rpc.BlockNumberOrHash) (*rpc.BlockNumberOrHash, bool, error) { + // this is a hash, not a block number + if current.BlockNumber == nil { + return current, false, nil + } + + switch *current.BlockNumber { + case rpc.PendingBlockNumber, + rpc.EarliestBlockNumber: + return current, false, nil + case rpc.FinalizedBlockNumber: + bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.finalized)) + return &bn, true, nil + case rpc.SafeBlockNumber: + bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.safe)) + return &bn, true, nil + case rpc.LatestBlockNumber: + bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.latest)) + return &bn, true, nil + default: + if current.BlockNumber.Int64() > int64(rctx.latest) { + return nil, false, ErrRewriteBlockOutOfRange + } + } + + return current, false, nil +} diff --git a/proxyd/rewriter_test.go b/proxyd/rewriter_test.go new file mode 100644 index 0000000..1f0d80b --- /dev/null +++ b/proxyd/rewriter_test.go @@ -0,0 +1,717 @@ +package proxyd + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/require" +) + +type args struct { + rctx RewriteContext + req *RPCReq + res *RPCRes +} + +type rewriteTest struct { + name string + args args + expected RewriteResult + expectedErr error + check func(*testing.T, args) +} + +func TestRewriteRequest(t *testing.T) { + tests := []rewriteTest{ + /* range scoped */ + { + name: "eth_getLogs fromBlock latest", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "latest"}})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []map[string]interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"]) + }, + }, + { + name: "eth_getLogs fromBlock within range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(55).String()}})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []map[string]interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, hexutil.Uint64(55).String(), p[0]["fromBlock"]) + }, + }, + { + name: "eth_getLogs fromBlock out of range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(111).String()}})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteBlockOutOfRange, + }, + { + name: "eth_getLogs toBlock latest", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": "latest"}})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []map[string]interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"]) + }, + }, + { + name: "eth_getLogs toBlock within range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": hexutil.Uint64(55).String()}})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []map[string]interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, hexutil.Uint64(55).String(), p[0]["toBlock"]) + }, + }, + { + name: "eth_getLogs toBlock out of range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": hexutil.Uint64(111).String()}})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteBlockOutOfRange, + }, + { + name: "eth_getLogs fromBlock, toBlock latest", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "latest", "toBlock": "latest"}})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []map[string]interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"]) + require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"]) + }, + }, + { + name: "eth_getLogs fromBlock, toBlock within range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(55).String(), "toBlock": hexutil.Uint64(77).String()}})}, + res: nil, + }, + expected: RewriteNone, + check: func(t *testing.T, args args) { + var p []map[string]interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, hexutil.Uint64(55).String(), p[0]["fromBlock"]) + require.Equal(t, hexutil.Uint64(77).String(), p[0]["toBlock"]) + }, + }, + { + name: "eth_getLogs fromBlock, toBlock out of range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(111).String(), "toBlock": hexutil.Uint64(222).String()}})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteBlockOutOfRange, + }, + { + name: "eth_getLogs fromBlock -> toBlock above max range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(20).String(), "toBlock": hexutil.Uint64(80).String()}})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteRangeTooLarge, + }, + { + name: "eth_getLogs earliest -> latest above max range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest", "toBlock": "latest"}})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteRangeTooLarge, + }, + { + name: "eth_getLogs earliest -> pending above max range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest", "toBlock": "pending"}})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteRangeTooLarge, + }, + { + name: "eth_getLogs earliest -> default above max range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest"}})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteRangeTooLarge, + }, + { + name: "eth_getLogs default -> latest within range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, + req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": "latest"}})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []map[string]interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"]) + require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"]) + }, + }, + /* required parameter at pos 0 */ + { + name: "debug_getRawReceipts latest", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{"latest"})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 1, len(p)) + require.Equal(t, hexutil.Uint64(100).String(), p[0]) + }, + }, + { + name: "debug_getRawReceipts within range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{hexutil.Uint64(55).String()})}, + res: nil, + }, + expected: RewriteNone, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 1, len(p)) + require.Equal(t, hexutil.Uint64(55).String(), p[0]) + }, + }, + { + name: "debug_getRawReceipts out of range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{hexutil.Uint64(111).String()})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteBlockOutOfRange, + }, + { + name: "debug_getRawReceipts missing parameter", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{})}, + res: nil, + }, + expected: RewriteNone, + }, + { + name: "debug_getRawReceipts with block hash", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"})}, + res: nil, + }, + expected: RewriteNone, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 1, len(p)) + require.Equal(t, "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", p[0]) + }, + }, + /* default block parameter */ + { + name: "eth_getCode omit block, should add", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123"})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 2, len(p)) + require.Equal(t, "0x123", p[0]) + bnh, err := remarshalBlockNumberOrHash(p[1]) + require.Nil(t, err) + require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) + }, + }, + { + name: "eth_getCode not enough params, should do nothing", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{})}, + res: nil, + }, + expected: RewriteNone, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 0, len(p)) + }, + }, + { + name: "eth_getCode latest", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", "latest"})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 2, len(p)) + require.Equal(t, "0x123", p[0]) + bnh, err := remarshalBlockNumberOrHash(p[1]) + require.Nil(t, err) + require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) + }, + }, + { + name: "eth_getCode within range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", hexutil.Uint64(55).String()})}, + res: nil, + }, + expected: RewriteNone, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 2, len(p)) + require.Equal(t, "0x123", p[0]) + require.Equal(t, hexutil.Uint64(55).String(), p[1]) + }, + }, + { + name: "eth_getCode out of range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", hexutil.Uint64(111).String()})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteBlockOutOfRange, + }, + /* default block parameter, at position 2 */ + { + name: "eth_getStorageAt omit block, should add", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5"})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 3, len(p)) + require.Equal(t, "0x123", p[0]) + require.Equal(t, "5", p[1]) + bnh, err := remarshalBlockNumberOrHash(p[2]) + require.Nil(t, err) + require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) + }, + }, + { + name: "eth_getStorageAt latest", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", "latest"})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 3, len(p)) + require.Equal(t, "0x123", p[0]) + require.Equal(t, "5", p[1]) + bnh, err := remarshalBlockNumberOrHash(p[2]) + require.Nil(t, err) + require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) + }, + }, + { + name: "eth_getStorageAt within range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", hexutil.Uint64(55).String()})}, + res: nil, + }, + expected: RewriteNone, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 3, len(p)) + require.Equal(t, "0x123", p[0]) + require.Equal(t, "5", p[1]) + require.Equal(t, hexutil.Uint64(55).String(), p[2]) + }, + }, + { + name: "eth_getStorageAt out of range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", hexutil.Uint64(111).String()})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteBlockOutOfRange, + }, + /* default block parameter, at position 0 */ + { + name: "eth_getBlockByNumber omit block, should add", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 1, len(p)) + require.Equal(t, hexutil.Uint64(100).String(), p[0]) + }, + }, + { + name: "eth_getBlockByNumber latest", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"latest"})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 1, len(p)) + require.Equal(t, hexutil.Uint64(100).String(), p[0]) + }, + }, + { + name: "eth_getBlockByNumber finalized", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100), finalized: hexutil.Uint64(55)}, + req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"finalized"})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 1, len(p)) + require.Equal(t, hexutil.Uint64(55).String(), p[0]) + }, + }, + { + name: "eth_getBlockByNumber safe", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100), safe: hexutil.Uint64(50)}, + req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"safe"})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 1, len(p)) + require.Equal(t, hexutil.Uint64(50).String(), p[0]) + }, + }, + { + name: "eth_getBlockByNumber within range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{hexutil.Uint64(55).String()})}, + res: nil, + }, + expected: RewriteNone, + check: func(t *testing.T, args args) { + var p []string + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 1, len(p)) + require.Equal(t, hexutil.Uint64(55).String(), p[0]) + }, + }, + { + name: "eth_getBlockByNumber out of range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{hexutil.Uint64(111).String()})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteBlockOutOfRange, + }, + { + name: "eth_getStorageAt using rpc.BlockNumberOrHash", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{ + "0xae851f927ee40de99aabb7461c00f9622ab91d60", + "0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08", + "0x1c4840bcb3de3ac403c0075b46c2c47d4396c5b624b6e1b2874ec04e8879b483"})}, + res: nil, + }, + expected: RewriteNone, + }, + // eip1898 + { + name: "eth_getStorageAt using rpc.BlockNumberOrHash at genesis (blockNumber)", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{ + "0xae851f927ee40de99aabb7461c00f9622ab91d60", + "10", + map[string]interface{}{ + "blockNumber": "0x0", + }})}, + res: nil, + }, + expected: RewriteNone, + }, + { + name: "eth_getStorageAt using rpc.BlockNumberOrHash at genesis (hash)", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{ + "0xae851f927ee40de99aabb7461c00f9622ab91d60", + "10", + map[string]interface{}{ + "blockHash": "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", + "requireCanonical": true, + }})}, + res: nil, + }, + expected: RewriteNone, + check: func(t *testing.T, args args) { + var p []interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 3, len(p)) + require.Equal(t, "0xae851f927ee40de99aabb7461c00f9622ab91d60", p[0]) + require.Equal(t, "10", p[1]) + bnh, err := remarshalBlockNumberOrHash(p[2]) + require.Nil(t, err) + require.Equal(t, rpc.BlockNumberOrHashWithHash(common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"), true), *bnh) + require.True(t, bnh.RequireCanonical) + }, + }, + { + name: "eth_getStorageAt using rpc.BlockNumberOrHash at latest (blockNumber)", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{ + "0xae851f927ee40de99aabb7461c00f9622ab91d60", + "10", + map[string]interface{}{ + "blockNumber": "latest", + }})}, + res: nil, + }, + expected: RewriteOverrideRequest, + check: func(t *testing.T, args args) { + var p []interface{} + err := json.Unmarshal(args.req.Params, &p) + require.Nil(t, err) + require.Equal(t, 3, len(p)) + require.Equal(t, "0xae851f927ee40de99aabb7461c00f9622ab91d60", p[0]) + require.Equal(t, "10", p[1]) + bnh, err := remarshalBlockNumberOrHash(p[2]) + require.Nil(t, err) + require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) + }, + }, + { + name: "eth_getStorageAt using rpc.BlockNumberOrHash out of range", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{ + "0xae851f927ee40de99aabb7461c00f9622ab91d60", + "10", + map[string]interface{}{ + "blockNumber": "0x111", + }})}, + res: nil, + }, + expected: RewriteOverrideError, + expectedErr: ErrRewriteBlockOutOfRange, + }, + } + + // generalize tests for other methods with same interface and behavior + tests = generalize(tests, "eth_getLogs", "eth_newFilter") + tests = generalize(tests, "eth_getCode", "eth_getBalance") + tests = generalize(tests, "eth_getCode", "eth_getTransactionCount") + tests = generalize(tests, "eth_getCode", "eth_call") + tests = generalize(tests, "eth_getBlockByNumber", "eth_getBlockTransactionCountByNumber") + tests = generalize(tests, "eth_getBlockByNumber", "eth_getUncleCountByBlockNumber") + tests = generalize(tests, "eth_getBlockByNumber", "eth_getTransactionByBlockNumberAndIndex") + tests = generalize(tests, "eth_getBlockByNumber", "eth_getUncleByBlockNumberAndIndex") + tests = generalize(tests, "eth_getStorageSlotAt", "eth_getProof") + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := RewriteRequest(tt.args.rctx, tt.args.req, tt.args.res) + if result != RewriteOverrideError { + require.Nil(t, err) + require.Equal(t, tt.expected, result) + } else { + require.Equal(t, tt.expectedErr, err) + } + if tt.check != nil { + tt.check(t, tt.args) + } + }) + } +} + +func generalize(tests []rewriteTest, baseMethod string, generalizedMethod string) []rewriteTest { + newCases := make([]rewriteTest, 0) + for _, t := range tests { + if t.args.req.Method == baseMethod { + newName := strings.Replace(t.name, baseMethod, generalizedMethod, -1) + var req *RPCReq + var res *RPCRes + + if t.args.req != nil { + req = &RPCReq{ + JSONRPC: t.args.req.JSONRPC, + Method: generalizedMethod, + Params: t.args.req.Params, + ID: t.args.req.ID, + } + } + + if t.args.res != nil { + res = &RPCRes{ + JSONRPC: t.args.res.JSONRPC, + Result: t.args.res.Result, + Error: t.args.res.Error, + ID: t.args.res.ID, + } + } + newCases = append(newCases, rewriteTest{ + name: newName, + args: args{ + rctx: t.args.rctx, + req: req, + res: res, + }, + expected: t.expected, + expectedErr: t.expectedErr, + check: t.check, + }) + } + } + return append(tests, newCases...) +} + +func TestRewriteResponse(t *testing.T) { + type args struct { + rctx RewriteContext + req *RPCReq + res *RPCRes + } + tests := []struct { + name string + args args + expected RewriteResult + check func(*testing.T, args) + }{ + { + name: "eth_blockNumber latest", + args: args{ + rctx: RewriteContext{latest: hexutil.Uint64(100)}, + req: &RPCReq{Method: "eth_blockNumber"}, + res: &RPCRes{Result: hexutil.Uint64(200)}, + }, + expected: RewriteOverrideResponse, + check: func(t *testing.T, args args) { + require.Equal(t, args.res.Result, hexutil.Uint64(100)) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := RewriteResponse(tt.args.rctx, tt.args.req, tt.args.res) + require.Nil(t, err) + require.Equal(t, tt.expected, result) + if tt.check != nil { + tt.check(t, tt.args) + } + }) + } +} diff --git a/proxyd/rpc.go b/proxyd/rpc.go new file mode 100644 index 0000000..902e266 --- /dev/null +++ b/proxyd/rpc.go @@ -0,0 +1,170 @@ +package proxyd + +import ( + "encoding/json" + "io" + "strings" +) + +type RPCReq struct { + JSONRPC string `json:"jsonrpc"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` + ID json.RawMessage `json:"id"` +} + +type RPCRes struct { + JSONRPC string + Result interface{} + Error *RPCErr + ID json.RawMessage +} + +type rpcResJSON struct { + JSONRPC string `json:"jsonrpc"` + Result interface{} `json:"result,omitempty"` + Error *RPCErr `json:"error,omitempty"` + ID json.RawMessage `json:"id"` +} + +type nullResultRPCRes struct { + JSONRPC string `json:"jsonrpc"` + Result interface{} `json:"result"` + ID json.RawMessage `json:"id"` +} + +func (r *RPCRes) IsError() bool { + return r.Error != nil +} + +func (r *RPCRes) MarshalJSON() ([]byte, error) { + if r.Result == nil && r.Error == nil { + return json.Marshal(&nullResultRPCRes{ + JSONRPC: r.JSONRPC, + Result: nil, + ID: r.ID, + }) + } + + return json.Marshal(&rpcResJSON{ + JSONRPC: r.JSONRPC, + Result: r.Result, + Error: r.Error, + ID: r.ID, + }) +} + +type RPCErr struct { + Code int `json:"code"` + Message string `json:"message"` + Data string `json:"data,omitempty"` + HTTPErrorCode int `json:"-"` +} + +func (r *RPCErr) Error() string { + return r.Message +} + +func (r *RPCErr) Clone() *RPCErr { + return &RPCErr{ + Code: r.Code, + Message: r.Message, + HTTPErrorCode: r.HTTPErrorCode, + } +} + +func IsValidID(id json.RawMessage) bool { + // handle the case where the ID is a string + if strings.HasPrefix(string(id), "\"") && strings.HasSuffix(string(id), "\"") { + return len(id) > 2 + } + + // technically allows a boolean/null ID, but so does Geth + // https://github.com/ethereum/go-ethereum/blob/master/rpc/json.go#L72 + return len(id) > 0 && id[0] != '{' && id[0] != '[' +} + +func ParseRPCReq(body []byte) (*RPCReq, error) { + req := new(RPCReq) + if err := json.Unmarshal(body, req); err != nil { + return nil, ErrParseErr + } + + return req, nil +} + +func ParseBatchRPCReq(body []byte) ([]json.RawMessage, error) { + batch := make([]json.RawMessage, 0) + if err := json.Unmarshal(body, &batch); err != nil { + return nil, err + } + + return batch, nil +} + +func ParseRPCRes(r io.Reader) (*RPCRes, error) { + body, err := io.ReadAll(r) + if err != nil { + return nil, wrapErr(err, "error reading RPC response") + } + + res := new(RPCRes) + if err := json.Unmarshal(body, res); err != nil { + return nil, wrapErr(err, "error unmarshalling RPC response") + } + + return res, nil +} + +func ValidateRPCReq(req *RPCReq) error { + if req.JSONRPC != JSONRPCVersion { + return ErrInvalidRequest("invalid JSON-RPC version") + } + + if req.Method == "" { + return ErrInvalidRequest("no method specified") + } + + if !IsValidID(req.ID) { + return ErrInvalidRequest("invalid ID") + } + + return nil +} + +func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes { + var rpcErr *RPCErr + if rr, ok := err.(*RPCErr); ok { + rpcErr = rr + } else { + rpcErr = &RPCErr{ + Code: JSONRPCErrorInternal, + Message: err.Error(), + } + } + + return &RPCRes{ + JSONRPC: JSONRPCVersion, + Error: rpcErr, + ID: id, + } +} + +func NewRPCRes(id json.RawMessage, result interface{}) *RPCRes { + return &RPCRes{ + JSONRPC: JSONRPCVersion, + Result: result, + ID: id, + } +} + +func IsBatch(raw []byte) bool { + for _, c := range raw { + // skip insignificant whitespace (http://www.ietf.org/rfc/rfc4627.txt) + if c == 0x20 || c == 0x09 || c == 0x0a || c == 0x0d { + continue + } + return c == '[' + } + return false +} diff --git a/proxyd/rpc_test.go b/proxyd/rpc_test.go new file mode 100644 index 0000000..e30fe93 --- /dev/null +++ b/proxyd/rpc_test.go @@ -0,0 +1,89 @@ +package proxyd + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRPCResJSON(t *testing.T) { + tests := []struct { + name string + in *RPCRes + out string + }{ + { + "string result", + &RPCRes{ + JSONRPC: JSONRPCVersion, + Result: "foobar", + ID: []byte("123"), + }, + `{"jsonrpc":"2.0","result":"foobar","id":123}`, + }, + { + "object result", + &RPCRes{ + JSONRPC: JSONRPCVersion, + Result: struct { + Str string `json:"str"` + }{ + "test", + }, + ID: []byte("123"), + }, + `{"jsonrpc":"2.0","result":{"str":"test"},"id":123}`, + }, + { + "nil result", + &RPCRes{ + JSONRPC: JSONRPCVersion, + Result: nil, + ID: []byte("123"), + }, + `{"jsonrpc":"2.0","result":null,"id":123}`, + }, + { + "error result without data", + &RPCRes{ + JSONRPC: JSONRPCVersion, + Error: &RPCErr{ + Code: 1234, + Message: "test err", + }, + ID: []byte("123"), + }, + `{"jsonrpc":"2.0","error":{"code":1234,"message":"test err"},"id":123}`, + }, + { + "error result with data", + &RPCRes{ + JSONRPC: JSONRPCVersion, + Error: &RPCErr{ + Code: 1234, + Message: "test err", + Data: "revert", + }, + ID: []byte("123"), + }, + `{"jsonrpc":"2.0","error":{"code":1234,"message":"test err","data":"revert"},"id":123}`, + }, + { + "string ID", + &RPCRes{ + JSONRPC: JSONRPCVersion, + Result: "foobar", + ID: []byte("\"123\""), + }, + `{"jsonrpc":"2.0","result":"foobar","id":"123"}`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out, err := json.Marshal(tt.in) + require.NoError(t, err) + require.Equal(t, tt.out, string(out)) + }) + } +} diff --git a/proxyd/server.go b/proxyd/server.go new file mode 100644 index 0000000..c663f42 --- /dev/null +++ b/proxyd/server.go @@ -0,0 +1,877 @@ +package proxyd + +import ( + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "math/big" + "net/http" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/gorilla/mux" + "github.com/gorilla/websocket" + "github.com/prometheus/client_golang/prometheus" + "github.com/redis/go-redis/v9" + "github.com/rs/cors" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +const ( + ContextKeyAuth = "authorization" + ContextKeyReqID = "req_id" + ContextKeyXForwardedFor = "x_forwarded_for" + DefaultMaxBatchRPCCallsLimit = 100 + MaxBatchRPCCallsHardLimit = 1000 + cacheStatusHdr = "X-Proxyd-Cache-Status" + defaultRPCTimeout = 10 * time.Second + defaultBodySizeLimit = 256 * opt.KiB + defaultWSHandshakeTimeout = 10 * time.Second + defaultWSReadTimeout = 2 * time.Minute + defaultWSWriteTimeout = 10 * time.Second + defaultCacheTtl = 1 * time.Hour + maxRequestBodyLogLen = 2000 + defaultMaxUpstreamBatchSize = 10 + defaultRateLimitHeader = "X-Forwarded-For" +) + +var emptyArrayResponse = json.RawMessage("[]") + +type Server struct { + BackendGroups map[string]*BackendGroup + wsBackendGroup *BackendGroup + wsMethodWhitelist *StringSet + rpcMethodMappings map[string]string + maxBodySize int64 + enableRequestLog bool + maxRequestBodyLogLen int + authenticatedPaths map[string]string + timeout time.Duration + maxUpstreamBatchSize int + maxBatchSize int + enableServedByHeader bool + upgrader *websocket.Upgrader + mainLim FrontendRateLimiter + overrideLims map[string]FrontendRateLimiter + senderLim FrontendRateLimiter + allowedChainIds []*big.Int + limExemptOrigins []*regexp.Regexp + limExemptUserAgents []*regexp.Regexp + globallyLimitedMethods map[string]bool + rpcServer *http.Server + wsServer *http.Server + cache RPCCache + srvMu sync.Mutex + rateLimitHeader string +} + +type limiterFunc func(method string) bool + +func NewServer( + backendGroups map[string]*BackendGroup, + wsBackendGroup *BackendGroup, + wsMethodWhitelist *StringSet, + rpcMethodMappings map[string]string, + maxBodySize int64, + authenticatedPaths map[string]string, + timeout time.Duration, + maxUpstreamBatchSize int, + enableServedByHeader bool, + cache RPCCache, + rateLimitConfig RateLimitConfig, + senderRateLimitConfig SenderRateLimitConfig, + enableRequestLog bool, + maxRequestBodyLogLen int, + maxBatchSize int, + redisClient *redis.Client, +) (*Server, error) { + if cache == nil { + cache = &NoopRPCCache{} + } + + if maxBodySize == 0 { + maxBodySize = defaultBodySizeLimit + } + + if timeout == 0 { + timeout = defaultRPCTimeout + } + + if maxUpstreamBatchSize == 0 { + maxUpstreamBatchSize = defaultMaxUpstreamBatchSize + } + + if maxBatchSize == 0 { + maxBatchSize = DefaultMaxBatchRPCCallsLimit + } + + if maxBatchSize > MaxBatchRPCCallsHardLimit { + maxBatchSize = MaxBatchRPCCallsHardLimit + } + + limiterFactory := func(dur time.Duration, max int, prefix string) FrontendRateLimiter { + if rateLimitConfig.UseRedis { + return NewRedisFrontendRateLimiter(redisClient, dur, max, prefix) + } + + return NewMemoryFrontendRateLimit(dur, max) + } + + var mainLim FrontendRateLimiter + limExemptOrigins := make([]*regexp.Regexp, 0) + limExemptUserAgents := make([]*regexp.Regexp, 0) + if rateLimitConfig.BaseRate > 0 { + mainLim = limiterFactory(time.Duration(rateLimitConfig.BaseInterval), rateLimitConfig.BaseRate, "main") + for _, origin := range rateLimitConfig.ExemptOrigins { + pattern, err := regexp.Compile(origin) + if err != nil { + return nil, err + } + limExemptOrigins = append(limExemptOrigins, pattern) + } + for _, agent := range rateLimitConfig.ExemptUserAgents { + pattern, err := regexp.Compile(agent) + if err != nil { + return nil, err + } + limExemptUserAgents = append(limExemptUserAgents, pattern) + } + } else { + mainLim = NoopFrontendRateLimiter + } + + overrideLims := make(map[string]FrontendRateLimiter) + globalMethodLims := make(map[string]bool) + for method, override := range rateLimitConfig.MethodOverrides { + overrideLims[method] = limiterFactory(time.Duration(override.Interval), override.Limit, method) + + if override.Global { + globalMethodLims[method] = true + } + } + var senderLim FrontendRateLimiter + if senderRateLimitConfig.Enabled { + senderLim = limiterFactory(time.Duration(senderRateLimitConfig.Interval), senderRateLimitConfig.Limit, "senders") + } + + rateLimitHeader := defaultRateLimitHeader + if rateLimitConfig.IPHeaderOverride != "" { + rateLimitHeader = rateLimitConfig.IPHeaderOverride + } + + return &Server{ + BackendGroups: backendGroups, + wsBackendGroup: wsBackendGroup, + wsMethodWhitelist: wsMethodWhitelist, + rpcMethodMappings: rpcMethodMappings, + maxBodySize: maxBodySize, + authenticatedPaths: authenticatedPaths, + timeout: timeout, + maxUpstreamBatchSize: maxUpstreamBatchSize, + enableServedByHeader: enableServedByHeader, + cache: cache, + enableRequestLog: enableRequestLog, + maxRequestBodyLogLen: maxRequestBodyLogLen, + maxBatchSize: maxBatchSize, + upgrader: &websocket.Upgrader{ + HandshakeTimeout: defaultWSHandshakeTimeout, + }, + mainLim: mainLim, + overrideLims: overrideLims, + globallyLimitedMethods: globalMethodLims, + senderLim: senderLim, + allowedChainIds: senderRateLimitConfig.AllowedChainIds, + limExemptOrigins: limExemptOrigins, + limExemptUserAgents: limExemptUserAgents, + rateLimitHeader: rateLimitHeader, + }, nil +} + +func (s *Server) RPCListenAndServe(host string, port int) error { + s.srvMu.Lock() + hdlr := mux.NewRouter() + hdlr.HandleFunc("/healthz", s.HandleHealthz).Methods("GET") + hdlr.HandleFunc("/", s.HandleRPC).Methods("POST") + hdlr.HandleFunc("/{authorization}", s.HandleRPC).Methods("POST") + c := cors.New(cors.Options{ + AllowedOrigins: []string{"*"}, + }) + addr := fmt.Sprintf("%s:%d", host, port) + s.rpcServer = &http.Server{ + Handler: instrumentedHdlr(c.Handler(hdlr)), + Addr: addr, + } + log.Info("starting HTTP server", "addr", addr) + s.srvMu.Unlock() + return s.rpcServer.ListenAndServe() +} + +func (s *Server) WSListenAndServe(host string, port int) error { + s.srvMu.Lock() + hdlr := mux.NewRouter() + hdlr.HandleFunc("/", s.HandleWS) + hdlr.HandleFunc("/{authorization}", s.HandleWS) + c := cors.New(cors.Options{ + AllowedOrigins: []string{"*"}, + }) + addr := fmt.Sprintf("%s:%d", host, port) + s.wsServer = &http.Server{ + Handler: instrumentedHdlr(c.Handler(hdlr)), + Addr: addr, + } + log.Info("starting WS server", "addr", addr) + s.srvMu.Unlock() + return s.wsServer.ListenAndServe() +} + +func (s *Server) Shutdown() { + s.srvMu.Lock() + defer s.srvMu.Unlock() + if s.rpcServer != nil { + _ = s.rpcServer.Shutdown(context.Background()) + } + if s.wsServer != nil { + _ = s.wsServer.Shutdown(context.Background()) + } + for _, bg := range s.BackendGroups { + bg.Shutdown() + } +} + +func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("OK")) +} + +func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { + ctx := s.populateContext(w, r) + if ctx == nil { + return + } + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, s.timeout) + defer cancel() + + origin := r.Header.Get("Origin") + userAgent := r.Header.Get("User-Agent") + // Use XFF in context since it will automatically be replaced by the remote IP + xff := stripXFF(GetXForwardedFor(ctx)) + isUnlimitedOrigin := s.isUnlimitedOrigin(origin) + isUnlimitedUserAgent := s.isUnlimitedUserAgent(userAgent) + + if xff == "" { + writeRPCError(ctx, w, nil, ErrInvalidRequest("request does not include a remote IP")) + return + } + + isLimited := func(method string) bool { + isGloballyLimitedMethod := s.isGlobalLimit(method) + if !isGloballyLimitedMethod && (isUnlimitedOrigin || isUnlimitedUserAgent) { + return false + } + + var lim FrontendRateLimiter + if method == "" { + lim = s.mainLim + } else { + lim = s.overrideLims[method] + } + + if lim == nil { + return false + } + + ok, err := lim.Take(ctx, xff) + if err != nil { + log.Warn("error taking rate limit", "err", err) + return true + } + return !ok + } + + if isLimited("") { + RecordRPCError(ctx, BackendProxyd, "unknown", ErrOverRateLimit) + log.Warn( + "rate limited request", + "req_id", GetReqID(ctx), + "auth", GetAuthCtx(ctx), + "user_agent", userAgent, + "origin", origin, + "remote_ip", xff, + ) + writeRPCError(ctx, w, nil, ErrOverRateLimit) + return + } + + log.Info( + "received RPC request", + "req_id", GetReqID(ctx), + "auth", GetAuthCtx(ctx), + "user_agent", userAgent, + "origin", origin, + "remote_ip", xff, + ) + + body, err := io.ReadAll(LimitReader(r.Body, s.maxBodySize)) + if errors.Is(err, ErrLimitReaderOverLimit) { + log.Error("request body too large", "req_id", GetReqID(ctx)) + RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrRequestBodyTooLarge) + writeRPCError(ctx, w, nil, ErrRequestBodyTooLarge) + return + } + if err != nil { + log.Error("error reading request body", "err", err) + writeRPCError(ctx, w, nil, ErrInternal) + return + } + RecordRequestPayloadSize(ctx, len(body)) + + if s.enableRequestLog { + log.Info("Raw RPC request", + "body", truncate(string(body), s.maxRequestBodyLogLen), + "req_id", GetReqID(ctx), + "auth", GetAuthCtx(ctx), + ) + } + + if IsBatch(body) { + reqs, err := ParseBatchRPCReq(body) + if err != nil { + log.Error("error parsing batch RPC request", "err", err) + RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) + writeRPCError(ctx, w, nil, ErrParseErr) + return + } + + RecordBatchSize(len(reqs)) + + if len(reqs) > s.maxBatchSize { + RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrTooManyBatchRequests) + writeRPCError(ctx, w, nil, ErrTooManyBatchRequests) + return + } + + if len(reqs) == 0 { + writeRPCError(ctx, w, nil, ErrInvalidRequest("must specify at least one batch call")) + return + } + + batchRes, batchContainsCached, servedBy, err := s.handleBatchRPC(ctx, reqs, isLimited, true) + if err == context.DeadlineExceeded { + writeRPCError(ctx, w, nil, ErrGatewayTimeout) + return + } + if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) || + errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) { + writeRPCError(ctx, w, nil, ErrInvalidRequest(err.Error())) + return + } + if err != nil { + writeRPCError(ctx, w, nil, ErrInternal) + return + } + if s.enableServedByHeader { + w.Header().Set("x-served-by", servedBy) + } + setCacheHeader(w, batchContainsCached) + writeBatchRPCRes(ctx, w, batchRes) + return + } + + rawBody := json.RawMessage(body) + backendRes, cached, servedBy, err := s.handleBatchRPC(ctx, []json.RawMessage{rawBody}, isLimited, false) + if err != nil { + if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) || + errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) { + writeRPCError(ctx, w, nil, ErrInvalidRequest(err.Error())) + return + } + writeRPCError(ctx, w, nil, ErrInternal) + return + } + if s.enableServedByHeader { + w.Header().Set("x-served-by", servedBy) + } + setCacheHeader(w, cached) + writeRPCRes(ctx, w, backendRes[0]) +} + +func (s *Server) handleBatchRPC(ctx context.Context, reqs []json.RawMessage, isLimited limiterFunc, isBatch bool) ([]*RPCRes, bool, string, error) { + // A request set is transformed into groups of batches. + // Each batch group maps to a forwarded JSON-RPC batch request (subject to maxUpstreamBatchSize constraints) + // A groupID is used to decouple Requests that have duplicate ID so they're not part of the same batch that's + // forwarded to the backend. This is done to ensure that the order of JSON-RPC Responses match the Request order + // as the backend MAY return Responses out of order. + // NOTE: Duplicate request ids induces 1-sized JSON-RPC batches + type batchGroup struct { + groupID int + backendGroup string + } + + responses := make([]*RPCRes, len(reqs)) + batches := make(map[batchGroup][]batchElem) + ids := make(map[string]int, len(reqs)) + + for i := range reqs { + parsedReq, err := ParseRPCReq(reqs[i]) + if err != nil { + log.Info("error parsing RPC call", "source", "rpc", "err", err) + responses[i] = NewRPCErrorRes(nil, err) + continue + } + + // Simple health check + if len(reqs) == 1 && parsedReq.Method == proxydHealthzMethod { + res := &RPCRes{ + ID: parsedReq.ID, + JSONRPC: JSONRPCVersion, + Result: "OK", + } + return []*RPCRes{res}, false, "", nil + } + + if err := ValidateRPCReq(parsedReq); err != nil { + RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) + responses[i] = NewRPCErrorRes(nil, err) + continue + } + + if parsedReq.Method == "eth_accounts" { + RecordRPCForward(ctx, BackendProxyd, "eth_accounts", RPCRequestSourceHTTP) + responses[i] = NewRPCRes(parsedReq.ID, emptyArrayResponse) + continue + } + + group := s.rpcMethodMappings[parsedReq.Method] + if group == "" { + // use unknown below to prevent DOS vector that fills up memory + // with arbitrary method names. + log.Info( + "blocked request for non-whitelisted method", + "source", "rpc", + "req_id", GetReqID(ctx), + "method", parsedReq.Method, + ) + RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted) + responses[i] = NewRPCErrorRes(parsedReq.ID, ErrMethodNotWhitelisted) + continue + } + + // Take rate limit for specific methods. + // NOTE: eventually, this should apply to all batch requests. However, + // since we don't have data right now on the size of each batch, we + // only apply this to the methods that have an additional rate limit. + if _, ok := s.overrideLims[parsedReq.Method]; ok && isLimited(parsedReq.Method) { + log.Info( + "rate limited specific RPC", + "source", "rpc", + "req_id", GetReqID(ctx), + "method", parsedReq.Method, + ) + RecordRPCError(ctx, BackendProxyd, parsedReq.Method, ErrOverRateLimit) + responses[i] = NewRPCErrorRes(parsedReq.ID, ErrOverRateLimit) + continue + } + + // Apply a sender-based rate limit if it is enabled. Note that sender-based rate + // limits apply regardless of origin or user-agent. As such, they don't use the + // isLimited method. + if parsedReq.Method == "eth_sendRawTransaction" && s.senderLim != nil { + if err := s.rateLimitSender(ctx, parsedReq); err != nil { + RecordRPCError(ctx, BackendProxyd, parsedReq.Method, err) + responses[i] = NewRPCErrorRes(parsedReq.ID, err) + continue + } + } + + id := string(parsedReq.ID) + // If this is a duplicate Request ID, move the Request to a new batchGroup + ids[id]++ + batchGroupID := ids[id] + batchGroup := batchGroup{groupID: batchGroupID, backendGroup: group} + batches[batchGroup] = append(batches[batchGroup], batchElem{parsedReq, i}) + } + + servedBy := make(map[string]bool, 0) + var cached bool + for group, batch := range batches { + var cacheMisses []batchElem + + for _, req := range batch { + backendRes, _ := s.cache.GetRPC(ctx, req.Req) + if backendRes != nil { + responses[req.Index] = backendRes + cached = true + } else { + cacheMisses = append(cacheMisses, req) + } + } + + // Create minibatches - each minibatch must be no larger than the maxUpstreamBatchSize + numBatches := int(math.Ceil(float64(len(cacheMisses)) / float64(s.maxUpstreamBatchSize))) + for i := 0; i < numBatches; i++ { + if ctx.Err() == context.DeadlineExceeded { + log.Info("short-circuiting batch RPC", + "req_id", GetReqID(ctx), + "auth", GetAuthCtx(ctx), + "batch_index", i, + ) + batchRPCShortCircuitsTotal.Inc() + return nil, false, "", context.DeadlineExceeded + } + + start := i * s.maxUpstreamBatchSize + end := int(math.Min(float64(start+s.maxUpstreamBatchSize), float64(len(cacheMisses)))) + elems := cacheMisses[start:end] + res, sb, err := s.BackendGroups[group.backendGroup].Forward(ctx, createBatchRequest(elems), isBatch) + servedBy[sb] = true + if err != nil { + if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) || + errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) { + return nil, false, "", err + } + log.Error( + "error forwarding RPC batch", + "batch_size", len(elems), + "backend_group", group, + "req_id", GetReqID(ctx), + "err", err, + ) + res = nil + for _, elem := range elems { + res = append(res, NewRPCErrorRes(elem.Req.ID, err)) + } + } + + for i := range elems { + responses[elems[i].Index] = res[i] + + // TODO(inphi): batch put these + if res[i].Error == nil && res[i].Result != nil { + if err := s.cache.PutRPC(ctx, elems[i].Req, res[i]); err != nil { + log.Warn( + "cache put error", + "req_id", GetReqID(ctx), + "err", err, + ) + } + } + } + } + } + + servedByString := "" + for sb := range servedBy { + if servedByString != "" { + servedByString += ", " + } + servedByString += sb + } + + return responses, cached, servedByString, nil +} + +func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) { + ctx := s.populateContext(w, r) + if ctx == nil { + return + } + + log.Info("received WS connection", "req_id", GetReqID(ctx)) + + clientConn, err := s.upgrader.Upgrade(w, r, nil) + if err != nil { + log.Error("error upgrading client conn", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err) + return + } + clientConn.SetReadLimit(s.maxBodySize) + + proxier, err := s.wsBackendGroup.ProxyWS(ctx, clientConn, s.wsMethodWhitelist) + if err != nil { + if errors.Is(err, ErrNoBackends) { + RecordUnserviceableRequest(ctx, RPCRequestSourceWS) + } + log.Error("error dialing ws backend", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err) + clientConn.Close() + return + } + + activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Inc() + go func() { + // Below call blocks so run it in a goroutine. + if err := proxier.Proxy(ctx); err != nil { + log.Error("error proxying websocket", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err) + } + activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Dec() + }() + + log.Info("accepted WS connection", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx)) +} + +func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context.Context { + vars := mux.Vars(r) + authorization := vars["authorization"] + xff := r.Header.Get(s.rateLimitHeader) + if xff == "" { + ipPort := strings.Split(r.RemoteAddr, ":") + if len(ipPort) == 2 { + xff = ipPort[0] + } + } + ctx := context.WithValue(r.Context(), ContextKeyXForwardedFor, xff) // nolint:staticcheck + + if len(s.authenticatedPaths) > 0 { + if authorization == "" || s.authenticatedPaths[authorization] == "" { + log.Info("blocked unauthorized request", "authorization", authorization) + httpResponseCodesTotal.WithLabelValues("401").Inc() + w.WriteHeader(401) + return nil + } + + ctx = context.WithValue(ctx, ContextKeyAuth, s.authenticatedPaths[authorization]) // nolint:staticcheck + } + + return context.WithValue( + ctx, + ContextKeyReqID, // nolint:staticcheck + randStr(10), + ) +} + +func randStr(l int) string { + b := make([]byte, l) + if _, err := rand.Read(b); err != nil { + panic(err) + } + return hex.EncodeToString(b) +} + +func (s *Server) isUnlimitedOrigin(origin string) bool { + for _, pat := range s.limExemptOrigins { + if pat.MatchString(origin) { + return true + } + } + + return false +} + +func (s *Server) isUnlimitedUserAgent(origin string) bool { + for _, pat := range s.limExemptUserAgents { + if pat.MatchString(origin) { + return true + } + } + return false +} + +func (s *Server) isGlobalLimit(method string) bool { + return s.globallyLimitedMethods[method] +} + +func (s *Server) rateLimitSender(ctx context.Context, req *RPCReq) error { + var params []string + if err := json.Unmarshal(req.Params, ¶ms); err != nil { + log.Debug("error unmarshalling raw transaction params", "err", err, "req_Id", GetReqID(ctx)) + return ErrParseErr + } + + if len(params) != 1 { + log.Debug("raw transaction request has invalid number of params", "req_id", GetReqID(ctx)) + // The error below is identical to the one Geth responds with. + return ErrInvalidParams("missing value for required argument 0") + } + + var data hexutil.Bytes + if err := data.UnmarshalText([]byte(params[0])); err != nil { + log.Debug("error decoding raw tx data", "err", err, "req_id", GetReqID(ctx)) + // Geth returns the raw error from UnmarshalText. + return ErrInvalidParams(err.Error()) + } + + // Inflates a types.Transaction object from the transaction's raw bytes. + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(data); err != nil { + log.Debug("could not unmarshal transaction", "err", err, "req_id", GetReqID(ctx)) + return ErrInvalidParams(err.Error()) + } + + // Check if the transaction is for the expected chain, + // otherwise reject before rate limiting to avoid replay attacks. + if !s.isAllowedChainId(tx.ChainId()) { + log.Debug("chain id is not allowed", "req_id", GetReqID(ctx)) + return txpool.ErrInvalidSender + } + + // Convert the transaction into a Message object so that we can get the + // sender. This method performs an ecrecover, which can be expensive. + msg, err := core.TransactionToMessage(tx, types.LatestSignerForChainID(tx.ChainId()), nil) + if err != nil { + log.Debug("could not get message from transaction", "err", err, "req_id", GetReqID(ctx)) + return ErrInvalidParams(err.Error()) + } + ok, err := s.senderLim.Take(ctx, fmt.Sprintf("%s:%d", msg.From.Hex(), tx.Nonce())) + if err != nil { + log.Error("error taking from sender limiter", "err", err, "req_id", GetReqID(ctx)) + return ErrInternal + } + if !ok { + log.Debug("sender rate limit exceeded", "sender", msg.From.Hex(), "req_id", GetReqID(ctx)) + return ErrOverSenderRateLimit + } + + return nil +} + +func (s *Server) isAllowedChainId(chainId *big.Int) bool { + if s.allowedChainIds == nil || len(s.allowedChainIds) == 0 { + return true + } + for _, id := range s.allowedChainIds { + if chainId.Cmp(id) == 0 { + return true + } + } + return false +} + +func setCacheHeader(w http.ResponseWriter, cached bool) { + if cached { + w.Header().Set(cacheStatusHdr, "HIT") + } else { + w.Header().Set(cacheStatusHdr, "MISS") + } +} + +func writeRPCError(ctx context.Context, w http.ResponseWriter, id json.RawMessage, err error) { + var res *RPCRes + if r, ok := err.(*RPCErr); ok { + res = NewRPCErrorRes(id, r) + } else { + res = NewRPCErrorRes(id, ErrInternal) + } + writeRPCRes(ctx, w, res) +} + +func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) { + statusCode := 200 + if res.IsError() && res.Error.HTTPErrorCode != 0 { + statusCode = res.Error.HTTPErrorCode + } + + w.Header().Set("content-type", "application/json") + w.WriteHeader(statusCode) + ww := &recordLenWriter{Writer: w} + enc := json.NewEncoder(ww) + if err := enc.Encode(res); err != nil { + log.Error("error writing rpc response", "err", err) + RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) + return + } + httpResponseCodesTotal.WithLabelValues(strconv.Itoa(statusCode)).Inc() + RecordResponsePayloadSize(ctx, ww.Len) +} + +func writeBatchRPCRes(ctx context.Context, w http.ResponseWriter, res []*RPCRes) { + w.Header().Set("content-type", "application/json") + w.WriteHeader(200) + ww := &recordLenWriter{Writer: w} + enc := json.NewEncoder(ww) + if err := enc.Encode(res); err != nil { + log.Error("error writing batch rpc response", "err", err) + RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) + return + } + RecordResponsePayloadSize(ctx, ww.Len) +} + +func instrumentedHdlr(h http.Handler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + respTimer := prometheus.NewTimer(httpRequestDurationSumm) + h.ServeHTTP(w, r) + respTimer.ObserveDuration() + } +} + +func GetAuthCtx(ctx context.Context) string { + authUser, ok := ctx.Value(ContextKeyAuth).(string) + if !ok { + return "none" + } + + return authUser +} + +func GetReqID(ctx context.Context) string { + reqId, ok := ctx.Value(ContextKeyReqID).(string) + if !ok { + return "" + } + return reqId +} + +func GetXForwardedFor(ctx context.Context) string { + xff, ok := ctx.Value(ContextKeyXForwardedFor).(string) + if !ok { + return "" + } + return xff +} + +type recordLenWriter struct { + io.Writer + Len int +} + +func (w *recordLenWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + w.Len += n + return +} + +type NoopRPCCache struct{} + +func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) { + return nil, nil +} + +func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error { + return nil +} + +func truncate(str string, maxLen int) string { + if maxLen == 0 { + maxLen = maxRequestBodyLogLen + } + + if len(str) > maxLen { + return str[:maxLen] + "..." + } else { + return str + } +} + +type batchElem struct { + Req *RPCReq + Index int +} + +func createBatchRequest(elems []batchElem) []*RPCReq { + batch := make([]*RPCReq, len(elems)) + for i := range elems { + batch[i] = elems[i].Req + } + return batch +} diff --git a/proxyd/string_set.go b/proxyd/string_set.go new file mode 100644 index 0000000..4582349 --- /dev/null +++ b/proxyd/string_set.go @@ -0,0 +1,56 @@ +package proxyd + +import "sync" + +type StringSet struct { + underlying map[string]bool + mtx sync.RWMutex +} + +func NewStringSet() *StringSet { + return &StringSet{ + underlying: make(map[string]bool), + } +} + +func NewStringSetFromStrings(in []string) *StringSet { + underlying := make(map[string]bool) + for _, str := range in { + underlying[str] = true + } + return &StringSet{ + underlying: underlying, + } +} + +func (s *StringSet) Has(test string) bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + return s.underlying[test] +} + +func (s *StringSet) Add(str string) { + s.mtx.Lock() + defer s.mtx.Unlock() + s.underlying[str] = true +} + +func (s *StringSet) Entries() []string { + s.mtx.RLock() + defer s.mtx.RUnlock() + out := make([]string, len(s.underlying)) + var i int + for entry := range s.underlying { + out[i] = entry + i++ + } + return out +} + +func (s *StringSet) Extend(in []string) *StringSet { + out := NewStringSetFromStrings(in) + for k := range s.underlying { + out.Add(k) + } + return out +} diff --git a/proxyd/tls.go b/proxyd/tls.go new file mode 100644 index 0000000..ed2bdaf --- /dev/null +++ b/proxyd/tls.go @@ -0,0 +1,33 @@ +package proxyd + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "os" +) + +func CreateTLSClient(ca string) (*tls.Config, error) { + pem, err := os.ReadFile(ca) + if err != nil { + return nil, wrapErr(err, "error reading CA") + } + + roots := x509.NewCertPool() + ok := roots.AppendCertsFromPEM(pem) + if !ok { + return nil, errors.New("error parsing TLS client cert") + } + + return &tls.Config{ + RootCAs: roots, + }, nil +} + +func ParseKeyPair(crt, key string) (tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(crt, key) + if err != nil { + return tls.Certificate{}, wrapErr(err, "error loading x509 key pair") + } + return cert, nil +} diff --git a/proxyd/tools/mockserver/handler/handler.go b/proxyd/tools/mockserver/handler/handler.go new file mode 100644 index 0000000..0f9bfca --- /dev/null +++ b/proxyd/tools/mockserver/handler/handler.go @@ -0,0 +1,135 @@ +package handler + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/ethereum-optimism/optimism/proxyd" + + "github.com/gorilla/mux" + "github.com/pkg/errors" + "gopkg.in/yaml.v3" +) + +type MethodTemplate struct { + Method string `yaml:"method"` + Block string `yaml:"block"` + Response string `yaml:"response"` +} + +type MockedHandler struct { + Overrides []*MethodTemplate + Autoload bool + AutoloadFile string +} + +func (mh *MockedHandler) Serve(port int) error { + r := mux.NewRouter() + r.HandleFunc("/", mh.Handler) + http.Handle("/", r) + fmt.Printf("starting server up on :%d serving MockedResponsesFile %s\n", port, mh.AutoloadFile) + err := http.ListenAndServe(fmt.Sprintf(":%d", port), nil) + + if errors.Is(err, http.ErrServerClosed) { + fmt.Printf("server closed\n") + } else if err != nil { + fmt.Printf("error starting server: %s\n", err) + return err + } + return nil +} + +func (mh *MockedHandler) Handler(w http.ResponseWriter, req *http.Request) { + body, err := io.ReadAll(req.Body) + if err != nil { + fmt.Printf("error reading request: %v\n", err) + } + + var template []*MethodTemplate + if mh.Autoload { + template = append(template, mh.LoadFromFile(mh.AutoloadFile)...) + } + if mh.Overrides != nil { + template = append(template, mh.Overrides...) + } + + batched := proxyd.IsBatch(body) + var requests []map[string]interface{} + if batched { + err = json.Unmarshal(body, &requests) + if err != nil { + fmt.Printf("error reading request: %v\n", err) + } + } else { + var j map[string]interface{} + err = json.Unmarshal(body, &j) + if err != nil { + fmt.Printf("error reading request: %v\n", err) + } + requests = append(requests, j) + } + + var responses []string + for _, r := range requests { + method := r["method"] + block := "" + if method == "eth_getBlockByNumber" || method == "debug_getRawReceipts" { + block = (r["params"].([]interface{})[0]).(string) + } + + var selectedResponse string + for _, r := range template { + if r.Method == method && r.Block == block { + selectedResponse = r.Response + } + } + if selectedResponse != "" { + var rpcRes proxyd.RPCRes + err = json.Unmarshal([]byte(selectedResponse), &rpcRes) + if err != nil { + panic(err) + } + idJson, _ := json.Marshal(r["id"]) + rpcRes.ID = idJson + res, _ := json.Marshal(rpcRes) + responses = append(responses, string(res)) + } + } + + resBody := "" + if batched { + resBody = "[" + strings.Join(responses, ",") + "]" + } else if len(responses) > 0 { + resBody = responses[0] + } + + _, err = fmt.Fprint(w, resBody) + if err != nil { + fmt.Printf("error writing response: %v\n", err) + } +} + +func (mh *MockedHandler) LoadFromFile(file string) []*MethodTemplate { + contents, err := os.ReadFile(file) + if err != nil { + fmt.Printf("error reading MockedResponsesFile: %v\n", err) + } + var template []*MethodTemplate + err = yaml.Unmarshal(contents, &template) + if err != nil { + fmt.Printf("error reading MockedResponsesFile: %v\n", err) + } + return template +} + +func (mh *MockedHandler) AddOverride(template *MethodTemplate) { + mh.Overrides = append(mh.Overrides, template) +} + +func (mh *MockedHandler) ResetOverrides() { + mh.Overrides = make([]*MethodTemplate, 0) +} diff --git a/proxyd/tools/mockserver/main.go b/proxyd/tools/mockserver/main.go new file mode 100644 index 0000000..a58fc06 --- /dev/null +++ b/proxyd/tools/mockserver/main.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os" + "path" + "strconv" + + "github.com/ethereum-optimism/optimism/proxyd/tools/mockserver/handler" +) + +func main() { + if len(os.Args) < 3 { + fmt.Printf("simply mock a response based on an external text MockedResponsesFile\n") + fmt.Printf("usage: mockserver \n") + os.Exit(1) + } + port, _ := strconv.ParseInt(os.Args[1], 10, 32) + dir, _ := os.Getwd() + + h := handler.MockedHandler{ + Autoload: true, + AutoloadFile: path.Join(dir, os.Args[2]), + } + + err := h.Serve(int(port)) + if err != nil { + fmt.Printf("error starting mockserver: %v\n", err) + } +} diff --git a/proxyd/tools/mockserver/node1.yml b/proxyd/tools/mockserver/node1.yml new file mode 100644 index 0000000..313c653 --- /dev/null +++ b/proxyd/tools/mockserver/node1.yml @@ -0,0 +1,52 @@ +- method: eth_getBlockByNumber + block: latest + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash2", + "number": "0x2" + } + } +- method: eth_getBlockByNumber + block: 0x1 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash1", + "number": "0x1" + } + } +- method: eth_getBlockByNumber + block: 0x2 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash2", + "number": "0x2" + } + } +- method: eth_getBlockByNumber + block: 0x3 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash34", + "number": "0x3" + } + } +- method: debug_getRawReceipts + block: 0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560ff + response: > + {"jsonrpc":"2.0","id":1,"result":[]} +- method: debug_getRawReceipts + block: 0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560bb + response: > + {"jsonrpc":"2.0","id":1,"result":["0x02f902c10183037ec5b9010000000000000000000000000200000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000400000000000000000008000000000000000000000000000000000000020000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000200000000000000000000000000000000f901b6f8d994297a60578fd0e13076bf06cfeb2bbcd74f2680d2e1a0e5c486bee358a5fff5e4d70dc5fdaaf14806df125ffde843a8c40db608264812b8a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000282bde1ac0c0000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000016573937382e393939393939393939393939395334393600000000000000000000f8d994297a60578fd0e13076bf06cfeb2bbcd74f2680d2e1a01309ab74031e37b46ee8ce9ff667a17a5c69a500a05d167e4c89ad8b0bc40bf9b8a0000000000000000000000000cd28ab95ae80b31255b2258a116cd2c1a371e0f3000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000652d2a700000000000000000000000000000000000000000000000000000000000000016573937382e393939393939393939393939395334393600000000000000000000","0x02f9039f01830645cdb9010000000020000000000000001000000000000000008004000000000000004000000000000000000002000000000000000000000000000000000000000000000000000000a00000000000000000000000200000000000000000000000000000000000000000000000400000000000000000000000000000000000000008000000000000000000004000400800000020000000000000000000000002000000000000000000000000000000200000000000040000000000000000000000001000000100100200000002000000000100000000010000020000000000000000000000000010000000000000000000000000000000000000000000000000000008040000f90294f8dc944638ac6b5727a8b9586d3eba5b44be4b74ed41fcf863a02ac69ee804d9a7a0984249f508dfab7cb2534b465b6ce1580f99a38ba9c5e631a0000000000000000000000000f08f78880122a9ee99f4f68dcab02177aeb08160a0000000000000000000000000f08f78880122a9ee99f4f68dcab02177aeb08160b8600000000000000000000000000000000000000000000000000032cdc63449c00000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000f8dc944638ac6b5727a8b9586d3eba5b44be4b74ed41fcf863a031b2166ff604fc5672ea5df08a78081d2bc6d746cadce880747f3643d819e83da0000000000000000000000000f08f78880122a9ee99f4f68dcab02177aeb08160a0000000000000000000000000f08f78880122a9ee99f4f68dcab02177aeb08160b8600000000000000000000000000000000000000000000000000032cdc63449c00000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000f85a947ad11bb9216bc9dc4cbd488d7618cbfd433d1e75f842a04641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133ca0e4bac2a8774c733b2c6da6ddf718b53614f36417dfaac90b23d2747d82d2251580f87a947fd7eea37c53abf356cc80e71144d62cd8af27d3f842a0db5c7652857aa163daadd670e116628fb42e869d8ac4251ef8971d9e5727df1ba04bd009c947444055f7e29f16c227e544387b9de8b571888d37f50becee99d76ea00000000000000000000000000000000000000000000000000000000000000001","0x02f90327018307aa96b9010000000000000000000000000028000000000000000000000000000000400000000000000000000000000000000000000000000000020000000000000000000000000000100000000000000000000000000000000000000000000000000000004000001000000000000000000000000000000000004000000000000000000000000000000000008000000000000000000000000000000000000000000000008000000000021000000000000000000000002040000000000000000000000000000000000000000000000000000000000000008000000000000000001000000000000000000000000000010000000000000000000000000000000000000000004000f9021cf9013c94af4159a80b6cc41ed517db1c453d1ef5c2e4db72f863a05e3c1311ea442664e8b1611bfabef659120ea7a0a2cfc0667700bebc69cbffe1a000000000000000000000000000000000000000000000000000000000000b574ea0eef6d16b5a91e0c5aa2df75fbb865f2be353c5052294a97ee3522d6b1ed674bfb8c00000000000000000000000006bebc4925716945d46f0ec336d5c2564f419682c000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000b47d4ece2bd3d6457b9abcde547e332832a6881b5e8697f86a16fd1e3006843062eda71ce901c9f888b5abb4736b9ca9bb36748e000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000652d2a70f8db946bebc4925716945d46f0ec336d5c2564f419682cf842a0ff64905f73a67fb594e0f940a8075a860db489ad991e032f48c81123eb52d60ba000000000000000000000000000000000000000000000000000000000000b574eb88000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000034a36c4ece2bd3d6457b9abcde547e332832a6770a0000000000000000000000000000000000000000000000000095094d8053e000000000000000000000000000","0x02f901a70183083234b9010000000000000000040000000000000000000000020000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000040000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000800000000400000001000000000000000000000000000000000000000000000f89df89b94326c977e6efc84e512bb9c30f76e30c160ed06fbf863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000004281ecf07378ee595c564a59048801330f3084eea0000000000000000000000000cd5d6cadfd3b4c77415859a5d6999f2eea1da5d4a0000000000000000000000000000000000000000000000001158e460913d00000","0x02f9040a0183094189b9010000000000000000000000100000000000000000000000000000000000000000000002000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000008008000000000000000000000000000f902fff902fc94be72771b59f99adc9c07a43295fcada66cb865b9f842a035ee444266c4bfac0b9704ee4fc68807fe38d1e96b299a097442f8169f48d57da00000000000000000000000000000000000000000000000000000000000000027b902a000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000652d2a700000000000000000000000000000000000000000000000000000000000000027000000000000000000000000bc365d43d4761fd03738ebefa5ff3d7ccf8256a70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000001c000000000000000000000000000000000000000000000000000038d7ea4c6800000000000000000000000000000000000000000000000000000000000652e78100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183635326432363837663661356263626235613934643639370000000000000000000000000000000000000000000000000000000000000000000000000000001836353235626563643837396139613163653935363364643800000000000000000000000000000000000000000000000000000000000000000000000000000000","0x02f9039f01830c0891b9010000000020000000000000001000000000000000008004000000000000004000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000004000000200000000000000000000000000000000000000000000000400000000040000800000000000000000000000008000000000000000040004000400800000020200000000000000000000002000000000000000200000000000000200000000000040000800000000000000000000000000100000000000002000000000100000000000000000000000000000200000000000010000000000000000000000000000000000000000000000000000108000000f90294f8dc944638ac6b5727a8b9586d3eba5b44be4b74ed41fcf863a02ac69ee804d9a7a0984249f508dfab7cb2534b465b6ce1580f99a38ba9c5e631a0000000000000000000000000ffb08a46d802102de79b998be6fe0975e44cb212a0000000000000000000000000ffb08a46d802102de79b998be6fe0975e44cb212b8600000000000000000000000000000000000000000000000000032cdc63449c00000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000f8dc944638ac6b5727a8b9586d3eba5b44be4b74ed41fcf863a031b2166ff604fc5672ea5df08a78081d2bc6d746cadce880747f3643d819e83da0000000000000000000000000ffb08a46d802102de79b998be6fe0975e44cb212a0000000000000000000000000ffb08a46d802102de79b998be6fe0975e44cb212b8600000000000000000000000000000000000000000000000000032cdc63449c00000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000f85a947ad11bb9216bc9dc4cbd488d7618cbfd433d1e75f842a04641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133ca0c48bab3c23e29b52ad3d5b5a41b22448c12d59d5f986b479fdcc485cae9e794a80f87a947fd7eea37c53abf356cc80e71144d62cd8af27d3f842a0db5c7652857aa163daadd670e116628fb42e869d8ac4251ef8971d9e5727df1ba0d6910807a825d1b186bc97aaa5ae83f44bc11533f05f3bd567dd916e5de20de1a00000000000000000000000000000000000000000000000000000000000000001","0x02f9024a01837e0c08b9010000000000800000000000000100000000000000000000000000000000000000000000000000004000000000000200000000000000000000000000000000001000000000000000000000000000000000000000000040000000000040000000000800000000000000000000000000000000000000000000000000000000000000000000000000400020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000f9013ff9013c94c25708eaf0329888c4db0fa5746647ecca92f257f863a09efce6407f8b9dc575789c1e4cc190f025c452249eb8f7913c49958c9a5535dea000000000000000000000000000000000000000000000000000000000652d2a70a0000000000000000000000000a02d09d454861a0ccd2e8518886cdcec37ecdd2cb8c00000000000000000000000000000000000000000000000000000000000000b220000000000000000000000000000000000000000000000000000000000000b220000000000000000000000000000000000000000000000000000000000000b22000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066372656174650000000000000000000000000000000000000000000000000000","0x02f9039f018380d310b9010000000020000000000000001000000000000000008404000000000000004000000000000000000002000000000000000000000000000000000040000000000000000000000000000000000000000000200000000000000200000000000000000000000000000000400000000000000000000000000000000000000008000000000000000000004000400800000020000000001000000000000002400000000000000000000000000020200010000000040000000000000000000000000000000100000000400002000000000100000000000000000000000000000000000000000010000000000000000000000000000000000040000000000000000008000000f90294f8dc944638ac6b5727a8b9586d3eba5b44be4b74ed41fcf863a02ac69ee804d9a7a0984249f508dfab7cb2534b465b6ce1580f99a38ba9c5e631a0000000000000000000000000cbee1403a6adee830dfd02d763341687d81482eba0000000000000000000000000cbee1403a6adee830dfd02d763341687d81482ebb8600000000000000000000000000000000000000000000000000032cdc63449c00000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000f8dc944638ac6b5727a8b9586d3eba5b44be4b74ed41fcf863a031b2166ff604fc5672ea5df08a78081d2bc6d746cadce880747f3643d819e83da0000000000000000000000000cbee1403a6adee830dfd02d763341687d81482eba0000000000000000000000000cbee1403a6adee830dfd02d763341687d81482ebb8600000000000000000000000000000000000000000000000000032cdc63449c00000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000f85a947ad11bb9216bc9dc4cbd488d7618cbfd433d1e75f842a04641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133ca0f9cacd1bc3956dffcb1a7d556066528a92396aa2e7da07a02904b3bfd886661180f87a947fd7eea37c53abf356cc80e71144d62cd8af27d3f842a0db5c7652857aa163daadd670e116628fb42e869d8ac4251ef8971d9e5727df1ba00c4d6405faf20e9cb06cdcc5159705b5a4d42ee7ea60a15f20784e82a0b31786a00000000000000000000000000000000000000000000000000000000000000001","0x02f9010901838210d3b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f90c3f018388b358b9010000000000000000000000000000000000000040000000000200000000000004100000000002000000000000000000000000001000000000680000010000000009004020200000000008080008000800000000000000002000000000000000000040040000220000800000002080000800000000000000020000002090000000000000008400000000000000000000000000000000280000000200000000000040000000004000020000000000000000000000000000000000000000000000000000021002000008000000000000000000000000000800000000200100000820001005000000000000000000080000000000000000000000000040000000000000f90b34f89b948bd0e58032e5343c888eba4e72332176fffa7371f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000004ac853547fa1fe3f4690e452b904578f7d46a531a00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000016345785d8a0000f85894c9b7edc65488bdbb428526b03935090aef40ff03e1a0df21c415b78ed2552cc9971249e32a053abce6087a0ae0fbf3f78db5174a3493a0000000000000000000000000000000000000000000000000000098b591d3ac0ef8d9946f3a314c1279148e53f51af154817c3ef2c827b1e1a0b0c632f55f1e1b3b2c3d82f41ee4716bb4c00f0f5d84cdafc141581bb8757a4fb8a0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000022000100000000000000000000000000000000000000000000000000000000000493e0000000000000000000000000000000000000000000000000000000000000f8d99436ebea3941907c438ca8ca2b1065deef21ccdaede1a04e41ee13e03cd5e0446487b524fdc48af6acf26c074dacdbdfb6b574b42c8146b8a0000000000000000000000000000000000000000000000000000000000000277a0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000300000000000000000000000011f66d6864fcce84d5000e1e981dd586766339490000000000000000000000000000000000000000000000000000213ae829dcc5f9019a946f3a314c1279148e53f51af154817c3ef2c827b1e1a0e9bded5f24a4168e4f3bf44e00298c993b22376aad8c58c7dda9718a54cbea82b90160000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001140000000000000d54278911f66d6864fcce84d5000e1e981dd58676633949277ab92de63eb7d8a652bf80385906812f92d49c5139000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000a07355534400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016345785d8a0000000000000000000000000000000000000000000000000000000000000000277a00000000000000000000000000000000000000000000000000000000000000144ac853547fa1fe3f4690e452b904578f7d46a531000000000000000000000000000000000000000000000000f9013d9411f66d6864fcce84d5000e1e981dd58676633949f884a0aae74fdfb502b568e3ca6f5aa448a255c90a2f24c4a6104d65ae45f097b37388a00000000000000000000000004ac853547fa1fe3f4690e452b904578f7d46a531a00000000000000000000000000000000000000000000000000000000000000007a0000000000000000000000000000000000000000000000000000000000000277ab8a000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000d540000000000000000000000000000000000000000000000000000000000000028b92de63eb7d8a652bf80385906812f92d49c513911f66d6864fcce84d5000e1e981dd58676633949000000000000000000000000000000000000000000000000f85894c9b7edc65488bdbb428526b03935090aef40ff03e1a0df21c415b78ed2552cc9971249e32a053abce6087a0ae0fbf3f78db5174a3493a00000000000000000000000000000000000000000000000000000381ece38c000f8d9946f3a314c1279148e53f51af154817c3ef2c827b1e1a0b0c632f55f1e1b3b2c3d82f41ee4716bb4c00f0f5d84cdafc141581bb8757a4fb8a0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000022000100000000000000000000000000000000000000000000000000000000000493e0000000000000000000000000000000000000000000000000000000000000f8d99436ebea3941907c438ca8ca2b1065deef21ccdaede1a04e41ee13e03cd5e0446487b524fdc48af6acf26c074dacdbdfb6b574b42c8146b8a0000000000000000000000000000000000000000000000000000000000000279f0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000300000000000000000000000011f66d6864fcce84d5000e1e981dd586766339490000000000000000000000000000000000000000000000000000d027724dc000f9019a946f3a314c1279148e53f51af154817c3ef2c827b1e1a0e9bded5f24a4168e4f3bf44e00298c993b22376aad8c58c7dda9718a54cbea82b9016000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000114000000000000ac19278911f66d6864fcce84d5000e1e981dd58676633949279f4c11ccee50b70daa47c41849e45316d975b26102000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000a07355534400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016345785d8a0000000000000000000000000000000000000000000000000000000000000000277a00000000000000000000000000000000000000000000000000000000000000144ac853547fa1fe3f4690e452b904578f7d46a531000000000000000000000000000000000000000000000000f9013d9411f66d6864fcce84d5000e1e981dd58676633949f884a0aae74fdfb502b568e3ca6f5aa448a255c90a2f24c4a6104d65ae45f097b37388a00000000000000000000000004ac853547fa1fe3f4690e452b904578f7d46a531a00000000000000000000000000000000000000000000000000000000000000007a0000000000000000000000000000000000000000000000000000000000000279fb8a00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000ac1900000000000000000000000000000000000000000000000000000000000000284c11ccee50b70daa47c41849e45316d975b2610211f66d6864fcce84d5000e1e981dd58676633949000000000000000000000000000000000000000000000000f8bb94593be683204ff3501e6e4851956a2da310e393b6f842a0c1e18b2a3583b6bc0879a3f3f657c41adfb512076938208ec0b389d6d19874cba00000000000000000000000004ac853547fa1fe3f4690e452b904578f7d46a531b8607355534400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016345785d8a0000000000000000000000000000000000000000000000000000000000000000277a","0x02f901a7018389386bb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000010000000000000000000000000000020000000000000000000800000000000000000000000010000000000000004000000000000000000000000000000000000000000000000000000000000000000000100400000000000000000000000000000400000000000000000000000002000000000000000000000000000000000000000000000000000020000000000000000001000000000000000000000000000000000000000000000000f89df89b946199f797b524166122f1c6e6a78ff321389bb686f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000e43f4840dad185beef6daa8e7328b521e6a1a2a0a000000000000000000000000000000000000000000000d3c21bcecceda1000000","0x02f908fa01838cb5bdb90100000000004000000000040000000000000000000000005000000022000004000000000000000000000000000000000000000000000000000000000200002000000000000000080000000000080000000000000400004000000000004000000000000000000002010000000000000000000000000008000000000000100000000000000000000000000000000000001000000000000800000000000000200000000a0000000000000000000000100000000000000000080000000000000000000000000002000000000000000000000000200240000000800000000000000000000010000000040000000000000000000000000000000800000000000000000000f907eff9025d9400000000000000adc04c56bf30ac9d3c0aaf14dcf863a09d9af8e38d66c62e2c12f0225249fd9d721c54b83f48d9352c97c6cacdcb6f31a0000000000000000000000000f8de191520e37592aa84c62f650b067805cf1845a0000000000000000000000000ea2b4e7f02b859305093f9f4778a19d66ca176d5b901e0392f9c488837a9a75f4b5a139bed7757ebec5091b7e0b6f1ce70cbbbc75050e50000000000000000000000006f030b74371167d3b71cf3214e749b0d1814c0490000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d3664b5e72b46eaba722ab6f43c22dbf4018195400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011e1a300000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000002715ccea428f8c7694f7e78b2c89cb454c5f7294000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002c68af0bb140000000000000000000000000000f8de191520e37592aa84c62f650b067805cf1845f9025d9400000000000000adc04c56bf30ac9d3c0aaf14dcf863a09d9af8e38d66c62e2c12f0225249fd9d721c54b83f48d9352c97c6cacdcb6f31a00000000000000000000000006f030b74371167d3b71cf3214e749b0d1814c049a0000000000000000000000000ea2b4e7f02b859305093f9f4778a19d66ca176d5b901e0ea28a862f3530833f82dc3d97407cf3a23fb593335f2b11f9ade8d62576f14fe0000000000000000000000006f030b74371167d3b71cf3214e749b0d1814c04900000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000002715ccea428f8c7694f7e78b2c89cb454c5f7294000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002c68af0bb14000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d3664b5e72b46eaba722ab6f43c22dbf4018195400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011e1a3000000000000000000000000006f030b74371167d3b71cf3214e749b0d1814c049f8b99400000000000000adc04c56bf30ac9d3c0aaf14dce1a04b9f2d36e1b4c93de62cc077b00b1a91d84b6c31b4a14e012718dcca230689e7b88000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002392f9c488837a9a75f4b5a139bed7757ebec5091b7e0b6f1ce70cbbbc75050e5ea28a862f3530833f82dc3d97407cf3a23fb593335f2b11f9ade8d62576f14fef89b94d3664b5e72b46eaba722ab6f43c22dbf40181954f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a0000000000000000000000000f8de191520e37592aa84c62f650b067805cf1845a000000000000000000000000000000000000000adc04c56bf30ac9d3c0aaf14dca00000000000000000000000000000000000000000000000000000000017d78400f89b94d3664b5e72b46eaba722ab6f43c22dbf40181954f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000f8de191520e37592aa84c62f650b067805cf1845a00000000000000000000000006f030b74371167d3b71cf3214e749b0d1814c049a00000000000000000000000000000000000000000000000000000000011e1a300f89b942715ccea428f8c7694f7e78b2c89cb454c5f7294f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a00000000000000000000000006f030b74371167d3b71cf3214e749b0d1814c049a000000000000000000000000000000000000000adc04c56bf30ac9d3c0aaf14dca0000000000000000000000000000000000000000000000035a66e2580ecd70000f89b942715ccea428f8c7694f7e78b2c89cb454c5f7294f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000006f030b74371167d3b71cf3214e749b0d1814c049a0000000000000000000000000f8de191520e37592aa84c62f650b067805cf1845a000000000000000000000000000000000000000000000000002c68af0bb140000","0x02f9036101838ed0a2b9010000000000000000080000000000000000000000000040000000000008000000000000000000010000002000008000000000004000000000000000000000000000000000000000000001000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000010000000000000000048000000000000000000000000000000400000000000000000000000000000010000000000000000008000000000000000000000000000000000000000020002000000000000000000000000008000000000000000000000000000000000800000000000000000000000008000000000000000000000000000000000f90256f89b94eea85fdf0b05d1e0107a61b4b4db1f345854b952f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000030f7fd07ce431d090f8ec59c3b635a4df42a00a1a0000000000000000000000000c1b71d1ad2de3fcbecda73c3273296dd45863c21a00000000000000000000000000000000000000000000000012f9aa3647286b60ff89b94fb7378d0997b0092be6bbf278ca9b8058c24752ff863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000c1b71d1ad2de3fcbecda73c3273296dd45863c21a000000000000000000000000030f7fd07ce431d090f8ec59c3b635a4df42a00a1a0000000000000000000000000000000000000000000000001314fb37062980000f901199430f7fd07ce431d090f8ec59c3b635a4df42a00a1e1a03b841dc9ab51e3104bda4f61b41e4271192d22cd19da5ee6e292dc8e2744f713b8e00000000000000000000000009563fdb01bfbf3d6c548c2c64e446cb5900aca88000000000000000000000000c1b71d1ad2de3fcbecda73c3273296dd45863c2100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001314fb370629800000000000000000000000000000000000000000000000000012f9aa3647286b60fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc8","0x02f905c40183904383b9010000000000000000000008000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000010000000000000000000000000100000020002000000020000000800000000000000000000000200000000000000010000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000800000000000006000000000000800000100000000000000000000000000000000000000000000001000000000020000000000000000000800000000800000000000000000000004000000000000000f904b9f901ba94db249fda431b6385ad5e028f3aa31f3f51ebaef2e1a0cdb9fb741d82c65a081bb855b5e42174193549c537fd57a199609593827cff71b90180000000000000000000000000762f1119123806fc0aa4c58f61a9da096910200b0000000000000000000000000000000000000000000000000000000000004443000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000c7a6b4554482e676f65726c690000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006d0c7a6b4554482e676f65726c693f616c656f316d397673377a68787632783232673963667a7a74616e6c72356d357a63617334726a616d7768796a6e74336636746737656772717266703676720080c6a47e8d030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f9019a94762f1119123806fc0aa4c58f61a9da096910200be1a08636abd6d0e464fe725a13346c7ac779b73561c705506044a2e6b2cdb1295ea5b901600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000532e91ca086964251519359271b99bd08427314f000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000038d7ea4c68000000000000000000000000000000000000000000000000000000000000000000c7a6b4554482e676f65726c690000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f616c656f316d397673377a68787632783232673963667a7a74616e6c72356d357a63617334726a616d7768796a6e743366367467376567727172667036767200f9015c94532e91ca086964251519359271b99bd08427314ff863a0fed162bc92844d868aeee15dc79af2ef4aac1ef57805f4eec865983f4d35efd9a00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000660aba6ca934d1537a95cc4454469a1026ed6252b8e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000038d7ea4c68000000000000000000000000000000000000000000000000000002386f26fc10000000000000000000000000000000000000000000000000000000000000000003f616c656f316d397673377a68787632783232673963667a7a74616e6c72356d357a63617334726a616d7768796a6e743366367467376567727172667036767200","0x02f901a7018390a504b9010000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000100000004000000000000000000000000200000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000020000000000000000000000000000000000000000000000000020000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000400000000000001000000000000000000000000000000000000f89df89b94fad6367e97217cc51b4cd838cc086831f81d38c2f863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a000000000000000000000000028cd70b79775193ea6f75f0808870b0b9eb1ad01a00000000000000000000000001c5d511f65c99ff3ea07dcca7ed91146ed5351e2a00000000000000000000000000000000000000000000000000000000000000000","0x02f909ad018396c005b901000000400000040000000000000000000000000000000000000000000000000000000000000000000000000000000010000040000000000200000000004020000000000000a0080000080000080000000000000000020000000000800000000000000000000000000000000000000000000000000400000000000000188000000000000000000000001000000000000000010010000020000000000000000000000a0001000000001001000040000000400000000000001000000000000000000000000002000000000000000002000000010000800000000000000000000000000490100000000000000000000001000004000000000010000000000000000000f908a2f89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5faa0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a000000000000000000000000000000000000000000000000000000000003d0900f89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5faa0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a00000000000000000000000000000000000000000000000000000f4312ed20263f89b94633f534ddc7ccced21f70e3e6956e669daa49d41f863a05548c837ab068cf56a2c2479df0882a4922fd203edb7517321831d95078c5f62a0000000000000000000000000a375a26dbb09f5c57fb54264f393ad6952d1d2dea00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5faa000000000000000000000000000000000000000000000000000000000003d0900f89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a000000000000000000000000064046eaf582638f26ba3ff17ea51705f1367cbc3a000000000000000000000000000000000000000000000000000000000003d0900f8dd94633f534ddc7ccced21f70e3e6956e669daa49d41f884a0023916d46c0d18491146f8b0bc7d927a62a0559c8ca79920bda7dc7db1fc72f3a0000000000000000000000000a375a26dbb09f5c57fb54264f393ad6952d1d2dea00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5faa000000000000000000000000064046eaf582638f26ba3ff17ea51705f1367cbc3b84000000000000000000000000000000000000000000000000000000000ffffffff00000000000000000000000000000000000000000000000000000000003d0900f8fc9464046eaf582638f26ba3ff17ea51705f1367cbc3f863a051dfc81761c6e241cfba4adcd6a3af365b7b8305f76dc043f1b1b8bc22f73fdea000000000000000000000000000000000000000000000000000000000ffffffffa00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5fab88000000000000000000000000000000000000000000000000000000000000120f200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000003782dace9d900000ffffffffffffffffffffffffffffffffffffffffffffffffffd46bd89546914bf89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5faa0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a000000000000000000000000000000000000000000000000000000000003d0900f89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5faa0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a00000000000000000000000000000000000000000000000000000f4312e94f963f89b94633f534ddc7ccced21f70e3e6956e669daa49d41f863a05548c837ab068cf56a2c2479df0882a4922fd203edb7517321831d95078c5f62a0000000000000000000000000a375a26dbb09f5c57fb54264f393ad6952d1d2dea00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5faa000000000000000000000000000000000000000000000000000000000003d0900f89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a000000000000000000000000064046eaf582638f26ba3ff17ea51705f1367cbc3a000000000000000000000000000000000000000000000000000000000003d0900f8dd94633f534ddc7ccced21f70e3e6956e669daa49d41f884a0023916d46c0d18491146f8b0bc7d927a62a0559c8ca79920bda7dc7db1fc72f3a0000000000000000000000000a375a26dbb09f5c57fb54264f393ad6952d1d2dea00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5faa000000000000000000000000064046eaf582638f26ba3ff17ea51705f1367cbc3b84000000000000000000000000000000000000000000000000000000000658e7c8000000000000000000000000000000000000000000000000000000000003d0900f8fc9464046eaf582638f26ba3ff17ea51705f1367cbc3f863a051dfc81761c6e241cfba4adcd6a3af365b7b8305f76dc043f1b1b8bc22f73fdea000000000000000000000000000000000000000000000000000000000658e7c80a00000000000000000000000002e5357a050c29c8e9781516ac176c276f3c9c5fab8800000000000000000000000000000000000000000000000000000000000011e9a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003782dace9d900000000000000000000000000000000000000000000000000000002e45f9fa00bf77","0x02f9055c01839a4ac9b90100000000000004000000000000000000000000000000000000000000000000000000000004000000000000000000001000004000000000020000000000402000000000000080080000000000080000000000000000020000000000800000000000000000000000000000000000000000000000000400000000000000100000000000000000000000001000040000000000010010000000000000000000000000400a0800000000001001000040000000400000000000001000000000000004000000008002000000000000000000000100004000000000000000000000010000000090100000000000000000000001000004000000000010000000000000000000f90451f89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000046fcab71245e2ff12ecc0ba8d3490aec95617b54a0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a000000000000000000000000000000000000000000000000000000000003d0900f89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a000000000000000000000000046fcab71245e2ff12ecc0ba8d3490aec95617b54a0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a0000000000000000000000000000000000000000000000000000000003e7f2450f89b94633f534ddc7ccced21f70e3e6956e669daa49d41f863a05548c837ab068cf56a2c2479df0882a4922fd203edb7517321831d95078c5f62a0000000000000000000000000a375a26dbb09f5c57fb54264f393ad6952d1d2dea000000000000000000000000046fcab71245e2ff12ecc0ba8d3490aec95617b54a000000000000000000000000000000000000000000000000000000000003d0900f89b94a375a26dbb09f5c57fb54264f393ad6952d1d2def863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000633f534ddc7ccced21f70e3e6956e669daa49d41a0000000000000000000000000f1675ffed677b2e7ee0c87574ebd279049d9ebf9a000000000000000000000000000000000000000000000000000000000003d0900f8dd94633f534ddc7ccced21f70e3e6956e669daa49d41f884a0023916d46c0d18491146f8b0bc7d927a62a0559c8ca79920bda7dc7db1fc72f3a0000000000000000000000000a375a26dbb09f5c57fb54264f393ad6952d1d2dea000000000000000000000000046fcab71245e2ff12ecc0ba8d3490aec95617b54a0000000000000000000000000f1675ffed677b2e7ee0c87574ebd279049d9ebf9b84000000000000000000000000000000000000000000000000000000000ffffffff00000000000000000000000000000000000000000000000000000000003d0900f8fc94f1675ffed677b2e7ee0c87574ebd279049d9ebf9f863a051dfc81761c6e241cfba4adcd6a3af365b7b8305f76dc043f1b1b8bc22f73fdea000000000000000000000000000000000000000000000000000000000ffffffffa000000000000000000000000046fcab71245e2ff12ecc0ba8d3490aec95617b54b880ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa18700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003782dace9d900000fffffffffffffffffffffffffffffffffffffffffffffff3d3b3ce837117c4f3","0x02f9032701839baf92b9010000000000000000000000000000000000000000000000000000000000400000000000000000000000000200000000000000000000020000001200000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000021000000000000000000000002440000000000000000000000000000000001000000000000000000000000000008000000000000000001000000000000000000000000000000000000008000000000000000000000000000000004000f9021cf9013c94af4159a80b6cc41ed517db1c453d1ef5c2e4db72f863a05e3c1311ea442664e8b1611bfabef659120ea7a0a2cfc0667700bebc69cbffe1a000000000000000000000000000000000000000000000000000000000000b574fa0c16ec891aba6a3f7b381a53cdd85e9d84741f080b35012c1f376edcac6e95f10b8c00000000000000000000000006bebc4925716945d46f0ec336d5c2564f419682c000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000f5ce611c7321438f8a30aab16852d68da4f5ab5a4de176e8c0279273cbe8a9919b029628c5cc2def297494bb2b1d1a470ae6ce18000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000652d2a70f8db946bebc4925716945d46f0ec336d5c2564f419682cf842a0ff64905f73a67fb594e0f940a8075a860db489ad991e032f48c81123eb52d60ba000000000000000000000000000000000000000000000000000000000000b574fb88000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000034e4bd611c7321438f8a30aab16852d68da4f59a490000000000000000000000000000000000000000000000000429d069189e0000000000000000000000000000","0x02f901ff01839ceb65b9010000000000010002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000800000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000020000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000f8f5f85894d5c325d183c592c94998000c5e0eed9e6655c020e1a09866f8ddfe70bb512b2f2b28b49d4017c43f7ba775f1a20c61c13eea8cdac111a0c9afd20a1f61bb629588d82d72e13914812259d13161284b98747c955d62317ef89994d5c325d183c592c94998000c5e0eed9e6655c020e1a0d342ddf7a308dec111745b00315c14b7efb2bdae570a6856e088ed0c65a3576cb8600336df0b24cf3302afadbc4c828662c60c2cdae602e59beaab7011bf3fd5a223000000000000000000000000000000000000000000000000000000000004d0e5020abc24e9cfc67a97e463deef1ab573cb11d9cd618bedee85c2df85fac6faa2","0x02f901ff01839e2744b9010000000000010002000000000000000000000000000000000000000000000000000000002000000000000000001000000000000000000000000000800000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000800000000000000000000000000000000000000000000000000000000000000f8f5f85894de29d060d45901fb19ed6c6e959eb22d8626708ee1a09866f8ddfe70bb512b2f2b28b49d4017c43f7ba775f1a20c61c13eea8cdac111a035ceccc734a10ae9c593304774276b4b6324223b019c38c6662ab923022b2c8af89994de29d060d45901fb19ed6c6e959eb22d8626708ee1a0d342ddf7a308dec111745b00315c14b7efb2bdae570a6856e088ed0c65a3576cb860022db625db05534a27ea14675f8d22d7c603e0981ceff2b02504791b75c4f56100000000000000000000000000000000000000000000000000000000000d7c61019e776def80129482e5f6282056f7318bedf7bc30a4ae7cdf8814d8d0b28a99","0x02f901ff01839f6317b9010000000000010002000000000000000000000000000000000000000000000000000000002000000000000000001000000000000000000000000000800000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000800000000000000000000000000000000000000000000000000000000000000f8f5f85894de29d060d45901fb19ed6c6e959eb22d8626708ee1a09866f8ddfe70bb512b2f2b28b49d4017c43f7ba775f1a20c61c13eea8cdac111a01331bfa0165c171aefba1e6eebd55d52bc2389726c1d0594f4a9e2f7812b42e4f89994de29d060d45901fb19ed6c6e959eb22d8626708ee1a0d342ddf7a308dec111745b00315c14b7efb2bdae570a6856e088ed0c65a3576cb86002e2bf007e02d68956b61be94c5661c8c8d1fb7ae552a0b2e7f3869d20d7aa0700000000000000000000000000000000000000000000000000000000000d7c620467725c6a4525d13c195701bf88f4f3c65329438cf8a437c928067d9a80556e","0x02f9033f0183a10e1fb9010000000000010002000000000000000000000000000000000000000000000000000000002000000000000000001000000000000000000000000000800000000040000040000000000000000200000000000000000000002000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000020000000000004000000000000000080000000200400000000000800000000000000000000000000000000000000000000000000000000000000f90234f85894de29d060d45901fb19ed6c6e959eb22d8626708ee1a09866f8ddfe70bb512b2f2b28b49d4017c43f7ba775f1a20c61c13eea8cdac111a074c803dcb95e04990c3e71db65899a9e0282003a967632e13eb6ce45d598fb45f9013c94de29d060d45901fb19ed6c6e959eb22d8626708ef863a04264ac208b5fde633ccdd42e0f12c3d6d443a4f3779bbf886925b94665b63a22a0073314940630fd6dcda0d772d4c972c4e0a9946bef9dabf4ef84eda8ef542b82a0000000000000000000000000c3511006c04ef1d78af4c8e0e74ec18a6e64ff9eb8c00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000001f36e2be14d65c7a832d1a863ecfa06c6730634700000000000000000000000000000000000000000000000000470de4df8200000000000000000000000000000000000000000000000000000000000000000000f89994de29d060d45901fb19ed6c6e959eb22d8626708ee1a0d342ddf7a308dec111745b00315c14b7efb2bdae570a6856e088ed0c65a3576cb8600427eee54195a8dc4d35b17e5cbc8e18b6e8eebdaf2017f6e12dac62232ca9e000000000000000000000000000000000000000000000000000000000000d7c6300b1121da2e28ad38fb1736373bb7fe4787b73c881995bbd391b17aa113a2cc8","0x02f901ff0183a249e6b9010000000000010002000000000000000000000000000000000000000000000000000000002000000000000000001000000000000000000000000000800000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000800000000000000000000000000000000000000000000000000000000000000f8f5f85894de29d060d45901fb19ed6c6e959eb22d8626708ee1a09866f8ddfe70bb512b2f2b28b49d4017c43f7ba775f1a20c61c13eea8cdac111a0d4d6f16309b51be964a0482c4cd816fadeaa8759216dd9bbc20773ba36c75ccaf89994de29d060d45901fb19ed6c6e959eb22d8626708ee1a0d342ddf7a308dec111745b00315c14b7efb2bdae570a6856e088ed0c65a3576cb8600001d0bd72454b02cd7386951338297e0da0f5a8c1076a24c1425928dce0838300000000000000000000000000000000000000000000000000000000000d7c64028c033938806df04fcc792f72f91ca04c3c20963eec03addb44fecf6fa795de","0x02f901090183a29beeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9041e0183a8526db9010000000200000000400000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000010000020000000000000000000000000000004000000080000000004000000002000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800200010000000000000000000000000000000000000000000000000000000040000000000000000f90313f8b994caf156a3dd652e2b493fe9e53f3d526d3cbbd4a8e1a0e2db1e7820b0cca1226a7e7d5cc2a3df28542b04da0f0aa7949f2a74519ef5a0b880000000000000000000000000307c2d86e3638a5afce36115dcbc856260748d310000000000000000000000000000000000000000000000000000b588ff18e000000000000000000000000000307c2d86e3638a5afce36115dcbc856260748d310000000000000000000000000000000000000000000000000000000000000000f87994caf156a3dd652e2b493fe9e53f3d526d3cbbd4a8e1a0305bf06329ff886b42ab3ed2979092b17d3a7fc67e7de42ee393a24c8e39fee7b840000000000000000000000000307c2d86e3638a5afce36115dcbc856260748d31000000000000000000000000307c2d86e3638a5afce36115dcbc856260748d31f901da9475d8ec64bf68b364b1f45249774e78df2f401399e1a0c6cb9661759518374091eb98266dd634614ae793b31549daff33e83d6dee0165b901a0000000000000000000000000caf156a3dd652e2b493fe9e53f3d526d3cbbd4a8000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001200000000000000000000000004a1c82542ebdb854ece6ce5355b5c48eb299ecd8000000000000000000000000000000000000000000000000000aa87bee53800000000000000000000000000000000000000000000000000000000000000003840000000000000000000000000000000000000000000000000000b588ff18e000000000000000000000000000307c2d86e3638a5afce36115dcbc856260748d31000000000000000000000000000000000000000000000000000000000001518000000000000000000000000000000000000000000000000000000000000151800000000000000000000000000000000000000000000000000000000000001388000000000000000000000000000000000000000000000000000000000000001e347468207465737420636f6c6c656374697665206f6e203136204f6374200000","0x02f908450183ab3259b9010000000000004048000010000004005000000000080000000000000000000000080002000000000000008200000004008450000000000000000000100000200200000001000000000040000008200000000000000000000000000000102000000100000000000000000000000480000000000000000000000000000010000000000200000000024000000000000000000010000000000000000001004200400000020000000000000000000000001400000000000000000000040000000200000000000002000000080000000000020000000000000000040000000000000000001010000000000000000000000000000000000000000000000000800000000000f9073af89b944031bc992179a7742bb99ec99da67f852c11927af863a08c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925a0000000000000000000000000aed889cc423f93ae00d140fce00a4aa6b05aa783a0000000000000000000000000d029d527e1d700c549f4e04662035b8a8624ce4fa00000000000000000000000000000000000000000000000000000000000000000f89b944031bc992179a7742bb99ec99da67f852c11927af863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000aed889cc423f93ae00d140fce00a4aa6b05aa783a0000000000000000000000000d029d527e1d700c549f4e04662035b8a8624ce4fa000000000000000000000000000000000000000000000000000000000000003e8f902de94d9005878cb1a830355dbf4d814a835c54022038ef884a04b388aecf9fa6cc92253704e5975a6129a4f735bdbd99567df4ed0094ee4ceb5a00000000000000000000000000ded20eaea674409ccfeca298385f361ad359a43a00000000000000000000000004200000000000000000000000000000000000007a0000000000000000000000000000000000000000000000000000000000000170db9024000000000000000000000000000000000000000000000000000000000001e8480000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000652d2a7000000000000000000000000000000000000000000000000000000000000001a4cbd4ece90000000000000000000000004200000000000000000000000000000000000010000000000000000000000000d029d527e1d700c549f4e04662035b8a8624ce4f0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000170d00000000000000000000000000000000000000000000000000000000000000e4662a633a0000000000000000000000003c3a81e81dc49a522a592e7622a7e711c06bf354000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead0000000000000000000000000000aed889cc423f93ae00d140fce00a4aa6b05aa783000000000000000000000000aed889cc423f93ae00d140fce00a4aa6b05aa78300000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f901fc94fcdc20eaea674409ccfeca298385f361ad358932f842a0cb0f7ffd78f9aee47a248fae8db181db6eee833039123e026dcbff529522e52aa00000000000000000000000004200000000000000000000000000000000000010b901a0000000000000000000000000d029d527e1d700c549f4e04662035b8a8624ce4f0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000170d00000000000000000000000000000000000000000000000000000000001e848000000000000000000000000000000000000000000000000000000000000000e4662a633a0000000000000000000000003c3a81e81dc49a522a592e7622a7e711c06bf354000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead0000000000000000000000000000aed889cc423f93ae00d140fce00a4aa6b05aa783000000000000000000000000aed889cc423f93ae00d140fce00a4aa6b05aa78300000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f9011d94d029d527e1d700c549f4e04662035b8a8624ce4ff884a0718594027abd4eaed59f95162563e0cc6d0e8d5b86b1c7be8b1b0ac3343d0396a00000000000000000000000004031bc992179a7742bb99ec99da67f852c11927aa0000000000000000000000000deaddeaddeaddeaddeaddeaddeaddeaddead0000a0000000000000000000000000aed889cc423f93ae00d140fce00a4aa6b05aa783b880000000000000000000000000aed889cc423f93ae00d140fce00a4aa6b05aa78300000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000","0xf901090183ab8461b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0xf901090183abd669b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0xf903640183ae0bfcb9010000000000100000400000000000000000000000000000084000000000000000000000000000000000000000000000020000000000000000000000000000000000000040008800000000100008000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000010000000000002000000000000000000000000000000000000000002000000000000000000000000000000000000001000000000000000020080000000000000040000000080040002000000000000000000000000000000000000000000010000000020000000000000200000000000000000000000000000001002000000000000000000f90259f9011c94980205d352f198748b626f6f7c38a8a5663ec981f863a02bd2d8a84b748439fd50d79a49502b4eb5faa25b864da6a9ab5c150704be9a4da0000000000000000000000000000000000000000000000000000000000000006ea00000000000000000000000009f40916d0dfb2f8f5fb63d8f76826d09041f2eaeb8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000d68efdc6e047f712f4bff82fc800f5309ca0c4e8e7ba32b255b212fef748ed80af200000000000000000000000000000000000000000000000000000000000000148a555e4fc287650f5e8ca1778a35dd44e893d6aa000000000000000000000000f89b949f40916d0dfb2f8f5fb63d8f76826d09041f2eaef863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000363413eb82ff4ebda8e70f8e0f9615b684d2f9eda00000000000000000000000000000000000000000000000000de0b6b3a7640000f89b949f40916d0dfb2f8f5fb63d8f76826d09041f2eaef863a0bf551ec93859b170f9b2141bd9298bf3f64322c6f7beb2543a0cb669834118bfa0000000000000000000000000000000000000000000000000000000000000006ea0000000000000000000000000363413eb82ff4ebda8e70f8e0f9615b684d2f9eda00000000000000000000000000000000000000000000000000de0b6b3a7640000","0xf903640183b08524b9010000000000002000000000000000000000000000000040000000000000000000000000000000000020000000000000000000000000000000000000000000000000000040000800000000000008000000000001000000000000000000000000000000000000020000000000000000000800000000000020000000000010000000000002004000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000020080000100400000040000000080100002000000000000000000000000000000000000000000010000000020000000000000200000000000000000000000000000001002000000000000000000f90259f9011c94980205d352f198748b626f6f7c38a8a5663ec981f863a02bd2d8a84b748439fd50d79a49502b4eb5faa25b864da6a9ab5c150704be9a4da0000000000000000000000000000000000000000000000000000000000000006ea00000000000000000000000004f7a67464b5976d7547c860109e4432d50afb38eb8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000001a400c8fb05948cf6527abe1b570154f613139797c9cf1ace53bd3c8f4a8f756a703f60000000000000000000000000000000000000000000000000000000000000014dd69db25f6d620a7bad3023c5d32761d353d3de9000000000000000000000000f89b944f7a67464b5976d7547c860109e4432d50afb38ef863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000004f7a67464b5976d7547c860109e4432d50afb38ea00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000005805420c76fdc3d1f89b944f7a67464b5976d7547c860109e4432d50afb38ef863a0bf551ec93859b170f9b2141bd9298bf3f64322c6f7beb2543a0cb669834118bfa0000000000000000000000000000000000000000000000000000000000000006ea000000000000000000000000095ac38e1c9af5a6d868c1b376e7c0ba9b9251ca5a00000000000000000000000000000000000000000000000005805420c76fdc3d1","0xf903640183b2fe40b9010000000000002000000000000000000000000000000040000000000000000000000000000000000000000000010000000000000000000000000000000000000000000040000800000000000008000000000001000000000100000000000000000000000000020000000000000000000800000000000020000000000010000000000002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000020080000100000000040000000080100002000000000000000000000000000000000000000000010000000020000000000000200000008000000000000000000000001002000000000000000000f90259f9011c94980205d352f198748b626f6f7c38a8a5663ec981f863a02bd2d8a84b748439fd50d79a49502b4eb5faa25b864da6a9ab5c150704be9a4da0000000000000000000000000000000000000000000000000000000000000006ea00000000000000000000000004f7a67464b5976d7547c860109e4432d50afb38eb8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000001a400d1b36efb5ef48c1953b391dadade996e546aebdeea81b118ac2eefda2a548bac50000000000000000000000000000000000000000000000000000000000000014dd69db25f6d620a7bad3023c5d32761d353d3de9000000000000000000000000f89b944f7a67464b5976d7547c860109e4432d50afb38ef863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000004f7a67464b5976d7547c860109e4432d50afb38ea00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000005735cbdc26b5d43af89b944f7a67464b5976d7547c860109e4432d50afb38ef863a0bf551ec93859b170f9b2141bd9298bf3f64322c6f7beb2543a0cb669834118bfa0000000000000000000000000000000000000000000000000000000000000006ea0000000000000000000000000f7db1e439db7a79a8f91cbde7c0069a97b89c813a00000000000000000000000000000000000000000000000005735cbdc26b5d43a","0xf902c40183b4a7bab9010000000040000000080000000000000000000000000000000001000002000000000000000000000000000000000000000000000000000000000000000000002000000040000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000008000000000000000000000000000010000000000000000000000000000000000002000000000000000000000000000000000000000000200000000000000000000000000000040000000080000020000000000000000000000000000000000000000000010000000000000000000000200000000000000000000000000000000000000000000000000000f901b9f8dc94980205d352f198748b626f6f7c38a8a5663ec981f863a074bbc026808dcba59692d6a8bb20596849ca718e10e2432c6cdf48af865bc5d9a0000000000000000000000000000000000000000000000000000000000000006ea0000000000000000000000000a6bf2be6c60175601bf88217c75dd4b14abb5fbbb860312401a801d73bde5cacb7e2eeba637d09c23f9a18dff6504cbdc8f7d7b2e348312401a801d73bde5cacb7e2eeba637d09c23f9a18dff6504cbdc8f7d7b2e3480000000000000000000000000000000000000000000000000000000000000014f8d994a6bf2be6c60175601bf88217c75dd4b14abb5fbbe1a0293e3a2153dc5c8d3667cbd6ede71a71674b2381e5dc4b40c91ad0e813447c0fb8a0000000000000000000000000980205d352f198748b626f6f7c38a8a5663ec981cf507db63af850463ebaaeffe5f783680e6aa364627d07c6488f5ba313ebbcca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000","0xf903640183b6bf22b9010000000000002000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000008000000000001000000000000000000000000000000000000020000000000000000000800000000000020000000000010000000008002002000000000000000000000000000000000000002000000000000000000000000000000000040000000000000000000020180000100000000040000000000140002000000000000000000000000000000000000000000000000000020000000000000200000000000000000000000000000001002000000000400000000f90259f9011c94980205d352f198748b626f6f7c38a8a5663ec981f863a02bd2d8a84b748439fd50d79a49502b4eb5faa25b864da6a9ab5c150704be9a4da0000000000000000000000000000000000000000000000000000000000000006fa00000000000000000000000004f7a67464b5976d7547c860109e4432d50afb38eb8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000073bd808065379db49bfa5292e83432fd6ede06ae24792f0d266ac89be03f7925229ce0000000000000000000000000000000000000000000000000000000000000014dd69db25f6d620a7bad3023c5d32761d353d3de9000000000000000000000000f89b944f7a67464b5976d7547c860109e4432d50afb38ef863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000004f7a67464b5976d7547c860109e4432d50afb38ea00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000a7da3311d85502df89b944f7a67464b5976d7547c860109e4432d50afb38ef863a0bf551ec93859b170f9b2141bd9298bf3f64322c6f7beb2543a0cb669834118bfa0000000000000000000000000000000000000000000000000000000000000006fa0000000000000000000000000c677f78297d40138b68be496ae34d861dc9edbeba00000000000000000000000000000000000000000000000000a7da3311d85502d","0xf902c40183b86890b9010000000040000000080000000000000000000000000000000001000002000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000008000000000000000000000000000010000000000000000000000000000000000002000000000000000000000000000000000040000000200000000000000100000000000000040000000000040020000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000f901b9f8dc94980205d352f198748b626f6f7c38a8a5663ec981f863a074bbc026808dcba59692d6a8bb20596849ca718e10e2432c6cdf48af865bc5d9a0000000000000000000000000000000000000000000000000000000000000006fa0000000000000000000000000a6bf2be6c60175601bf88217c75dd4b14abb5fbbb86031f7494a39416159f52b17758dda7bdafa4215dcf706dfc06cd6f8933b817d2031f7494a39416159f52b17758dda7bdafa4215dcf706dfc06cd6f8933b817d200000000000000000000000000000000000000000000000000000000000000014f8d994a6bf2be6c60175601bf88217c75dd4b14abb5fbbe1a0293e3a2153dc5c8d3667cbd6ede71a71674b2381e5dc4b40c91ad0e813447c0fb8a0000000000000000000000000980205d352f198748b626f6f7c38a8a5663ec981ea46511277ad0d9d47ff09d07eb1e1298bcefa6a6da798de1e0af884002aceb1000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000","0x02f902460183e60720b9010000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000001000000000000080004002000000000000000000000000000000000000000100000000000020000000000000001000000000000000000000000000000000020000000000000000f9013bf89b9488045945952b374abf696602941b51149bad8ab4f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000e8c83b0cb059fe98f4e0bb2b7be404565e5aaa75a00000000000000000000000000000000000000000033b2e3c9fd0803ce8000000f89c9488045945952b374abf696602941b51149bad8ab4f884a02f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0da00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000e8c83b0cb059fe98f4e0bb2b7be404565e5aaa75a0000000000000000000000000e8c83b0cb059fe98f4e0bb2b7be404565e5aaa7580","0xf901090183e65928b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901e50183e6f1e1b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d99416324d80bfc68b1fec6c288f0dac640a044d2678e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000180ab5200000000000000000000000000000000000000000000000000000000652d2a5500000000000000000000000000000000000000000000000000000000000000074144412f55534400000000000000000000000000000000000000000000000000","0x02f901e50183e78ab2b9010000000000000000000000000400000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000800040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d994e7a43467520e4d12d1f9e94b99d6f041786aadcee1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000024cba3965500000000000000000000000000000000000000000000000000000000652d2a550000000000000000000000000000000000000000000000000000000000000008574554482f555344000000000000000000000000000000000000000000000000","0x02f901e50183e82377b9010000000000000000000000000000000000000000000000000000000000000000000000080000000000001000000020000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d994b52a8b962ff3d8a6a0937896ff3da3879eac64e3e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000005f6321e00000000000000000000000000000000000000000000000000000000652d2a560000000000000000000000000000000000000000000000000000000000000008555344542f555344000000000000000000000000000000000000000000000000","0xf901090183e8ef8fb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901e50183e9883cb9010000000002000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000f8dbf8d994117a5ab00f93469bea455f0864ef9ad8d9630cc9e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000007e060100000000000000000000000000000000000000000000000000000000652d2a5800000000000000000000000000000000000000000000000000000000000000074752542f55534400000000000000000000000000000000000000000000000000","0x02f901e50183ea2101b9010000000000000000000000000000000000000000000000000000004000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000800000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d994bbbf9614de2b788a66d970b552a79fae6419abdce1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000005eebd8700000000000000000000000000000000000000000000000000000000652d2a580000000000000000000000000000000000000000000000000000000000000008465241582f555344000000000000000000000000000000000000000000000000","0x02f901e50183eab9d2b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200f8dbf8d99439f46d72bb20c7bcb8a2cdf52630fac1496e859ae1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000024f8bc612d00000000000000000000000000000000000000000000000000000000652d2a5a0000000000000000000000000000000000000000000000000000000000000008574554482f555344000000000000000000000000000000000000000000000000","0x02f901e50183eb528bb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000020000000010000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d9945ae58e9dec27619572a42dad916e413afa89e46de1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000019d1f00000000000000000000000000000000000000000000000000000000652d2a5c000000000000000000000000000000000000000000000000000000000000000842414e4b2f555344000000000000000000000000000000000000000000000000","0x02f9043a0183ed1237b9010000200000000000000000002080000000000000000000000000010000000000000000000000000000000000000000200000000000000000000000020000000000000000000400000000002008000000300000000000000000000000008000000000000000008000100000000000000000000000000000000000020010000000000000000080000000004000000000000000000001000000080000104000000000000000000000000000000000000000000000000000000000000000000000000000204002000000001000000000000000000000000000001000000000000020000000000000000000000000000000000000000000000001400000000000000000f9032ff87a94b4fbf271143f4fbf7b91a5ded31805e42b2208d6f842a0e1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109ca00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da0000000000000000000000000000000000000000000000000000000e8d4a51000f89b94b4fbf271143f4fbf7b91a5ded31805e42b2208d6f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da000000000000000000000000077f0ea26bae49c9f412a1511730c7c1d3382f697a0000000000000000000000000000000000000000000000000000000e8d4a51000f89b94257c2c98163227062e5a33095a20fc7604ee52b5f863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000077f0ea26bae49c9f412a1511730c7c1d3382f697a0000000000000000000000000185d901fe591ce516ed9e192b33da3ef14d53b93a0000000000000000000000000000000000000000000000000042a7d29b88844e5f8799477f0ea26bae49c9f412a1511730c7c1d3382f697e1a01c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1b840000000000000000000000000000000000000000000007b61de650eaa9bccab150000000000000000000000000000000000000000000000001adafd27de2bd68bf8fc9477f0ea26bae49c9f412a1511730c7c1d3382f697f863a0d78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822a00000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488da0000000000000000000000000185d901fe591ce516ed9e192b33da3ef14d53b93b8800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e8d4a51000000000000000000000000000000000000000000000000000042a7d29b88844e50000000000000000000000000000000000000000000000000000000000000000","0x02f901e50183edaafcb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000800000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d994aac02884a376dc5145389ba37f08b0dde08d3f18e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000b37b80500000000000000000000000000000000000000000000000000000000652d2a5e0000000000000000000000000000000000000000000000000000000000000008445944582f555344000000000000000000000000000000000000000000000000","0x02f901e50183ee43c1b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000008000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400040000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d994402a30f83bbfc2203e1fc5d8a9bb41e1b0ddc639e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000024df2e4ee100000000000000000000000000000000000000000000000000000000652d2a5f00000000000000000000000000000000000000000000000000000000000000074554482f55534400000000000000000000000000000000000000000000000000","0x02f901e50183eedc86b9010000000000000000000000000000000000000000000000000000000000000000040000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000f8dbf8d9943ae963e586b6c1d16f371ac0a1260cdaed6a76bde1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000024dcbf623800000000000000000000000000000000000000000000000000000000652d2a5f00000000000000000000000000000000000000000000000000000000000000074554482f55534400000000000000000000000000000000000000000000000000","0x02f901e50183ef753fb9010000000000000000000000000000000000000000000000000000000000000040000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000f8dbf8d994c8f4aeb27fce1f361cda3aadcda992c7ed7b0e74e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000005b67ef00000000000000000000000000000000000000000000000000000000652d2a610000000000000000000000000000000000000000000000000000000000000008444f47452f555344000000000000000000000000000000000000000000000000","0x02f901e50183f00e04b9010000000000000000000000000000000000000000000000000000000004000000000000000000000000001000000000000400000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d99460cfba755fac7178e9a8e133699ad2f7dcf6ad9ae1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000229ca2bd00000000000000000000000000000000000000000000000000000000652d2a620000000000000000000000000000000000000000000000000000000000000008555255532f555344000000000000000000000000000000000000000000000000","0x02f901098083f0647ab9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901e50183f0fd33b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d9945e3b4e52af7f15f4e4e12033d71cfc3afbc7d3c0e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000002c0f30100000000000000000000000000000000000000000000000000000000652d2a6400000000000000000000000000000000000000000000000000000000000000074f4d472f55534400000000000000000000000000000000000000000000000000","0x02f901e50183f195ecb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000f8dbf8d994ffd9e1167e2ad8f323464832ad99a03bda99b7b7e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000150ea000000000000000000000000000000000000000000000000000000000652d2a64000000000000000000000000000000000000000000000000000000000000000847414c412f555344000000000000000000000000000000000000000000000000","0x02f901098083f22563b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901e50183f2be28b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d9940e324d90e9180df65e63438b2af37458b7b7b500e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000004f47321cd00000000000000000000000000000000000000000000000000000000652d2a690000000000000000000000000000000000000000000000000000000000000007424e422f55534400000000000000000000000000000000000000000000000000","0x02f901e50183f356f9b9010000000000000000000000000000000000000000000000000000000000000000000000000000200000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000f8dbf8d9944a7d0e32e82aea46773c348896761addc51dfb11e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000105dd23061100000000000000000000000000000000000000000000000000000000652d2a6900000000000000000000000000000000000000000000000000000000000000074254432f55534400000000000000000000000000000000000000000000000000","0x02f901e50183f3e4dab9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d99416324d80bfc68b1fec6c288f0dac640a044d2678e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000018227472200000000000000000000000000000000000000000000000000000000652d2a690000000000000000000000000000000000000000000000000000000000000008414156452f555344000000000000000000000000000000000000000000000000","0x02f901e50183f472c7b9010000000000000000000000000400000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000800040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d994e7a43467520e4d12d1f9e94b99d6f041786aadcee1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000285d0fb605900000000000000000000000000000000000000000000000000000000652d2a6a0000000000000000000000000000000000000000000000000000000000000008574254432f555344000000000000000000000000000000000000000000000000","0x02f901e50183f50b8cb9010000000000000000000000000000000000000000000000000000000000000000000000000000000002001000000000000000000000000000000000000010000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d99407c4eee621098c526403b30bdcb17b3722719dcee1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000024dfc6ecd300000000000000000000000000000000000000000000000000000000652d2a6a00000000000000000000000000000000000000000000000000000000000000074554482f55534400000000000000000000000000000000000000000000000000","0x02f901e50183f59979b9010000000000000000000000000000000000000000000000000000000000000000000000080000000000001000000020000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d994b52a8b962ff3d8a6a0937896ff3da3879eac64e3e1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000002870fd6b6be00000000000000000000000000000000000000000000000000000000652d2a6a0000000000000000000000000000000000000000000000000000000000000008574254432f555344000000000000000000000000000000000000000000000000","0x02f904240183f78797b9010000000000100000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000800600000000000000000000000000000000000000000001082000000000001001000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000580000000000000000000000200000000001000000000000000000000000000000000000000000000000000000000000000000000000020000000000000004000000000000000000000000020000000000000000000000000000000000000000000000000000000000000020000f90319f901dc949054f0d5f352fafe6ebf0ec14654da0362dc96caf842a0f6a97944f31ea060dfde0566e4167c1a1082551e64b60ecb14d599a9d023d451a00000000000000000000000000000000000000000000000000000000000011eb6b901800000000000000000000000000000000000000000000000d51cfff1e9a1af7c000000000000000000000000006af57e73d328e2a8ec95e01178d1e2a2a387d66a00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000140000000000000000000000012332e3d78c6cc4a1a0c4dae81535bc000065fa80300000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000d51cfff1e9a1af7c000000000000000000000000000000000000000000000000d51cfff1e9a1af7c000000000000000000000000000000000000000000000000d51cfff1e9a1af7c000000000000000000000000000000000000000000000000d51cfff1e9a1af7c0000000000000000000000000000000000000000000000000000000000000000040001020300000000000000000000000000000000000000000000000000000000f89b949054f0d5f352fafe6ebf0ec14654da0362dc96caf863a00109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271a00000000000000000000000000000000000000000000000000000000000011eb6a00000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000652d2a70f89b949054f0d5f352fafe6ebf0ec14654da0362dc96caf863a00559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5fa00000000000000000000000000000000000000000000000d51cfff1e9a1af7c00a00000000000000000000000000000000000000000000000000000000000011eb6a000000000000000000000000000000000000000000000000000000000652d2a70","0x02f901e50183f8156cb9010000000000000000000000000000000000000000000000000000004000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000800000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f8dbf8d994bbbf9614de2b788a66d970b552a79fae6419abdce1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000001c062a100000000000000000000000000000000000000000000000000000000652d2a6c000000000000000000000000000000000000000000000000000000000000000853414e442f555344000000000000000000000000000000000000000000000000","0x02f901e50183f8ae31b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000f8dbf8d994e5d686595da780e6fbe88c31b77c1225974c89fbe1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000024df3d91e000000000000000000000000000000000000000000000000000000000652d2a6d00000000000000000000000000000000000000000000000000000000000000074554482f55534400000000000000000000000000000000000000000000000000","0x02f901e50183f93c06b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200f8dbf8d99439f46d72bb20c7bcb8a2cdf52630fac1496e859ae1a0a7fc99ed7617309ee23f63ae90196a1e490d362e6f6a547a59bc809ee2291782b8a000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000005f4f04b00000000000000000000000000000000000000000000000000000000652d2a6e0000000000000000000000000000000000000000000000000000000000000008555344432f555344000000000000000000000000000000000000000000000000","0x02f901090183f9a412b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901c80183faf748b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000080000000000000004000000000800000000000000000000000000000000000000000000000200000000000000000400000000010000000000000000000000000000000000000000000000004800200000000000000000004000000010000000000000000000000000000000f8bef8bc94e5ff3b57695079f808a24256734483cd3889fa9ef884a0a7aaf2512769da4e444e3de247be2564225c2e7a8f74cfe528e46e17d24868e2a08a767b2e89133b95f135e6803d3b75eb52b9636707f38d986de63283fd028beba0000000000000000000000000000000000000000000000000000000000000803ca000000000000000000000000000000000000000000000000000000000003c1c98a000000000000000000000000000000000000000000000000000000000652d2a70","0x02f901090183fb5cb4b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901090183fbb880b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901090183fdf200b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901090183fe5538b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901090183febd24b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0xf90246018401009066b9010000000000000002000000000000000000000000000000000000800000000000000000000000000000000000000800000000000000000000000000000000000000000100000000000000000008020000000000040000000000000000000000000000000000000008000000000000000000100000000000000000800010800000000000000000000000000000010000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000002000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000040000000004000000000000044f9013af89b9470e53130c4638aa80d5a8abf12983f66e0b1d05ff863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000003e62eb1d9f503f1db36bcfcabdaa7488718eee09a0000000000000000000000000e8c84a631d71e1bb7083d3a82a3a74870a286b97a0000000000000000000000000000000000000000000000000016345785d8a0000f89b943e62eb1d9f503f1db36bcfcabdaa7488718eee09f863a0cb339b570a7f0b25afa7333371ff11192092a0aeace12b671f4c212f2815c6fea0000000000000000000000000000000000000000000000000000000000000235aa0000000000000000000000000e8c84a631d71e1bb7083d3a82a3a74870a286b97a0191ae0366d4470dec94ac0ad040496453fecc903ffbad13da54e39498f84b0ff","0x02f9010a01840100f0f6b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a0184010158f2b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a01840101d1feb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a0184010239f6b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a01840102a1f2b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f90365018401048fdfb9010000000002000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000020000000000000002000000000000000000800000008000000000000040000000000000000002000000000000000000000000000100000000200000000000000200000000010000800000000000000000000000008000000000000000000000000000000000000000004000000000000000000000000800000000000000000000008000000002000000000000002000000000000000000000000000000000000800000000400000000000000000000002000000000000000000000004000000000000000000000000200f90259f89b948f6d296024766b919473f0ef3b1f7addd3f710dff863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa00000000000000000000000003dd070f9ee183bd667700d668b35b8932438118ea0000000000000000000000000f2ce1c36503401e5fcdecb17b53ac20939ac05d6a000000000000000000000000000000000000000000000000000000000000004d6f89b94d87ba7a50b2e7e660f678a895e4b72e7cb4ccd9cf863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000f2ce1c36503401e5fcdecb17b53ac20939ac05d6a00000000000000000000000003dd070f9ee183bd667700d668b35b8932438118ea00000000000000000000000000000000000000000000000000000000000550ab9f9011c943dd070f9ee183bd667700d668b35b8932438118ef863a0c42079f94a6350d7e6235f29174924f928cc2ac818eb64fed8004e115fbcca67a0000000000000000000000000e592427a0aece92de3edee1f18e0157c05861564a0000000000000000000000000f2ce1c36503401e5fcdecb17b53ac20939ac05d6b8a0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb2a0000000000000000000000000000000000000000000000000000000000550ab90000000000000000000000000000000000000042fb2bb8fcdc72e6c302336a2800000000000000000000000000000000000000000000000000000000609fce5a000000000000000000000000000000000000000000000000000000000001487c","0x02f9010a01840105994fb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a01840106126bb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0xf9018601840106f418b9010000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000001000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000010f87bf87994b33a774f60c3eeea880c09bd56f18def648f8fbbe1a0b78ebc573f1f889ca9e1e0fb62c843c836f3d3a2e1f43ef62940e9b894f4ea4cb8400000000000000000000000000000000000000000000000000e507b8392b34d0000000000000000000000000000000000000000000000000000000000652d2a70","0x02f9010a018401075be8b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f901c901840108af12b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000100000000000000040000000000000000000000000000000000000000000000020000000000000000000000000000000000080000000100000000000000000000000000000000000000200000000000000000000000000000000000000004000020000800000000000000000000000000000000000000000000000000000000000000000000000000002000400000000000000000000000000000000000040000000000000000000001000000000000000000000000000000000000000000040000f8bef8bc943f97a3e25166de26eef93ad91e382215b21fecf7f884a0a7aaf2512769da4e444e3de247be2564225c2e7a8f74cfe528e46e17d24868e2a048f9d6c5064b083c5c5f17c6f18f8ac529863c506fc2d65b5ebb26571dfa1ec0a00000000000000000000000000000000000000000000000000000000000007097a0000000000000000000000000000000000000000000000000000000000034c740a000000000000000000000000000000000000000000000000000000000652d2a70","0x02f9010a01840109284ab9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a01840109a406b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a0184010a1d2eb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a0184010a8536b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a0184010aead2b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a0184010b52b2b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a0184010bc95ab9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0x02f9010a0184010c428ab9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0","0xf901a80184010cc606b9010000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000080000000000000020000000000000000000800000000000000000000000010000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000040001000000000000000000000000000000000000000002000000008000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000f89df89b94ccb2505976e9d2fd355c73c3f1a004446d1dfedaf863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa0000000000000000000000000c813edb526830d24a2ce5801d9ef5026a3967529a00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000a"]} diff --git a/proxyd/tools/mockserver/node2.yml b/proxyd/tools/mockserver/node2.yml new file mode 100644 index 0000000..b94ee7a --- /dev/null +++ b/proxyd/tools/mockserver/node2.yml @@ -0,0 +1,44 @@ +- method: eth_getBlockByNumber + block: latest + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash2", + "number": "0x2" + } + } +- method: eth_getBlockByNumber + block: 0x1 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash1", + "number": "0x1" + } + } +- method: eth_getBlockByNumber + block: 0x2 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash2", + "number": "0x2" + } + } +- method: eth_getBlockByNumber + block: 0x3 + response: > + { + "jsonrpc": "2.0", + "id": 67, + "result": { + "hash": "hash3", + "number": "0x3" + } + } \ No newline at end of file