Merge branch 'upstream-main' into 19-admin-imitate
This commit is contained in:
commit
cc41e54cbf
@ -6,5 +6,6 @@ perf.data.old
|
|||||||
/data/
|
/data/
|
||||||
/docker-compose*
|
/docker-compose*
|
||||||
/Dockerfile
|
/Dockerfile
|
||||||
|
/Jenkinsfile
|
||||||
/redis-cell-server/
|
/redis-cell-server/
|
||||||
/target
|
/target
|
||||||
|
562
Cargo.lock
generated
562
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -11,5 +11,9 @@ members = [
|
|||||||
[profile.release]
|
[profile.release]
|
||||||
# `debug = true` so that sentry can give us line numbers
|
# `debug = true` so that sentry can give us line numbers
|
||||||
debug = true
|
debug = true
|
||||||
|
|
||||||
|
[profile.faster_release]
|
||||||
|
inherits = "release"
|
||||||
|
|
||||||
# spend longer compiling for a slightly faster binary
|
# spend longer compiling for a slightly faster binary
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
45
Dockerfile
45
Dockerfile
@ -1,21 +1,52 @@
|
|||||||
FROM rust:1-bullseye as builder
|
#
|
||||||
|
# cargo-nextest
|
||||||
|
# We only pay the installation cost once,
|
||||||
|
# it will be cached from the second build onwards
|
||||||
|
#
|
||||||
|
FROM rust:1-bullseye AS builder
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
ENV CARGO_TERM_COLOR always
|
||||||
|
|
||||||
|
# a next-generation test runner for Rust projects.
|
||||||
|
# TODO: more mount type cache?
|
||||||
|
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
|
cargo install cargo-nextest
|
||||||
|
|
||||||
|
# foundry is needed to run tests
|
||||||
ENV PATH /root/.foundry/bin:$PATH
|
ENV PATH /root/.foundry/bin:$PATH
|
||||||
RUN curl -L https://foundry.paradigm.xyz | bash && foundryup
|
RUN curl -L https://foundry.paradigm.xyz | bash && foundryup
|
||||||
|
|
||||||
WORKDIR /usr/src/web3_proxy
|
# copy the application
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
# test the application with cargo-nextest
|
||||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
--mount=type=cache,target=/usr/src/web3_proxy/target \
|
--mount=type=cache,target=/app/target \
|
||||||
cargo test &&\
|
cargo nextest run
|
||||||
cargo install --locked --no-default-features --root /opt/bin --path ./web3_proxy
|
|
||||||
|
|
||||||
FROM debian:bullseye-slim
|
# build the application
|
||||||
|
# using a "release" profile (which install does) is **very** important
|
||||||
|
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
|
--mount=type=cache,target=/app/target \
|
||||||
|
cargo install --locked --no-default-features --profile faster_release --root /opt/bin --path ./web3_proxy
|
||||||
|
|
||||||
COPY --from=builder /opt/bin/* /usr/local/bin/
|
#
|
||||||
|
# We do not need the Rust toolchain to run the binary!
|
||||||
|
#
|
||||||
|
FROM debian:bullseye-slim AS runtime
|
||||||
|
|
||||||
|
# Create llama user to avoid running container with root
|
||||||
|
RUN mkdir /llama \
|
||||||
|
&& adduser --home /llama --shell /sbin/nologin --gecos '' --no-create-home --disabled-password --uid 1001 llama \
|
||||||
|
&& chown -R llama /llama
|
||||||
|
|
||||||
|
USER llama
|
||||||
|
|
||||||
ENTRYPOINT ["web3_proxy_cli"]
|
ENTRYPOINT ["web3_proxy_cli"]
|
||||||
CMD [ "--config", "/web3-proxy.toml", "proxyd" ]
|
CMD [ "--config", "/web3-proxy.toml", "proxyd" ]
|
||||||
|
|
||||||
# TODO: lower log level when done with prototyping
|
# TODO: lower log level when done with prototyping
|
||||||
ENV RUST_LOG "warn,web3_proxy=debug,web3_proxy_cli=debug"
|
ENV RUST_LOG "warn,web3_proxy=debug,web3_proxy_cli=debug"
|
||||||
|
|
||||||
|
COPY --from=builder /opt/bin/* /usr/local/bin/
|
||||||
|
174
Jenkinsfile
vendored
Normal file
174
Jenkinsfile
vendored
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
def buildAndPush() {
|
||||||
|
// env.BRANCH_NAME is set to the git branch name by default
|
||||||
|
// env.REGISTRY is the repository url for this pipeline
|
||||||
|
// env.GIT_SHORT is the git short hash of the currently checked out repo
|
||||||
|
// env.LATEST_BRANCH is the branch name that gets tagged latest
|
||||||
|
// env.ARCH is the system architecture. some apps can be generic (amd64, arm64),
|
||||||
|
// but apps that compile for specific hardware (like web3-proxy) will need more specific tags (amd64_epyc2, arm64_graviton2, intel_xeon3, etc.)
|
||||||
|
|
||||||
|
// TODO: check that this system actually matches the given arch
|
||||||
|
sh '''#!/bin/bash
|
||||||
|
set -eux -o pipefail
|
||||||
|
|
||||||
|
[ -n "$GIT_SHORT" ]
|
||||||
|
[ -n "$GIT_SHORT" ]
|
||||||
|
[ -n "$REGISTRY" ]
|
||||||
|
[ -n "$ARCH" ]
|
||||||
|
|
||||||
|
# deterministic mtime on .git keeps Dockerfiles that do 'ADD . .' or similar
|
||||||
|
# without this, the build process always thinks the directory has changes
|
||||||
|
git restore-mtime
|
||||||
|
touch -t "$(git show -s --date=format:'%Y%m%d%H%M.%S' --format=%cd HEAD)" .git
|
||||||
|
|
||||||
|
function buildAndPush {
|
||||||
|
image=$1
|
||||||
|
buildcache=$2
|
||||||
|
|
||||||
|
buildctl build \
|
||||||
|
--frontend=dockerfile.v0 \
|
||||||
|
--local context=. \
|
||||||
|
--local dockerfile=. \
|
||||||
|
--output "type=image,name=${image},push=true" \
|
||||||
|
--export-cache type=s3,region=us-east-2,bucket=llamarpc-buildctl-cache,name=${buildcache} \
|
||||||
|
--import-cache type=s3,region=us-east-2,bucket=llamarpc-buildctl-cache,name=${buildcache} \
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
BUILDCACHE="${REGISTRY}:buildcache_${ARCH}"
|
||||||
|
|
||||||
|
# build and push a docker image tagged with the short git commit
|
||||||
|
buildAndPush "${REGISTRY}:git_${GIT_SHORT}_${ARCH}" "${BUILDCACHE}"
|
||||||
|
|
||||||
|
# push an image tagged with the branch
|
||||||
|
# since buildAndPush just ran above, this should be very quick
|
||||||
|
# TODO: maybe replace slashes in the name with dashes or underscores
|
||||||
|
buildAndPush "${REGISTRY}:branch_${BRANCH_NAME}_${ARCH}" "${BUILDCACHE}"
|
||||||
|
|
||||||
|
if [ "${BRANCH_NAME}" = "${LATEST_BRANCH}" ]; then
|
||||||
|
buildAndPush "${REGISTRY}:latest_${ARCH}" "${BUILDCACHE}"
|
||||||
|
fi
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
|
||||||
|
pipeline {
|
||||||
|
agent any
|
||||||
|
options {
|
||||||
|
ansiColor('xterm')
|
||||||
|
}
|
||||||
|
environment {
|
||||||
|
// AWS_ECR_URL needs to be set in jenkin's config.
|
||||||
|
// AWS_ECR_URL could really be any docker registry. we just use ECR so that we don't have to manage it
|
||||||
|
REGISTRY="${AWS_ECR_URL}/web3-proxy"
|
||||||
|
|
||||||
|
// branch that should get tagged with "latest_$arch" (stable, main, master, etc.)
|
||||||
|
LATEST_BRANCH="main"
|
||||||
|
|
||||||
|
// non-buildkit builds are officially deprecated
|
||||||
|
// buildkit is much faster and handles caching much better than the default build process.
|
||||||
|
DOCKER_BUILDKIT=1
|
||||||
|
|
||||||
|
GIT_SHORT="${GIT_COMMIT.substring(0,8)}"
|
||||||
|
}
|
||||||
|
stages {
|
||||||
|
stage('build and push') {
|
||||||
|
parallel {
|
||||||
|
stage('build and push amd64_epyc2 image') {
|
||||||
|
agent {
|
||||||
|
label 'amd64_epyc2'
|
||||||
|
}
|
||||||
|
environment {
|
||||||
|
ARCH="amd64_epyc2"
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
buildAndPush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('build and push amd64_epyc3 image') {
|
||||||
|
agent {
|
||||||
|
label 'amd64_epyc3'
|
||||||
|
}
|
||||||
|
environment {
|
||||||
|
ARCH="amd64_epyc3"
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
buildAndPush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Build and push arm64_graviton1 image') {
|
||||||
|
agent {
|
||||||
|
label 'arm64_graviton1'
|
||||||
|
}
|
||||||
|
environment {
|
||||||
|
ARCH="arm64_graviton1"
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
buildAndPush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Build and push arm64_graviton2 image') {
|
||||||
|
agent {
|
||||||
|
label 'arm64_graviton2'
|
||||||
|
}
|
||||||
|
environment {
|
||||||
|
ARCH="arm64_graviton2"
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
buildAndPush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Build and push intel_xeon3 image') {
|
||||||
|
agent {
|
||||||
|
label 'intel_xeon3'
|
||||||
|
}
|
||||||
|
environment {
|
||||||
|
ARCH="intel_xeon3"
|
||||||
|
}
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
buildAndPush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
stage('create (experimental) manifest') {
|
||||||
|
agent any
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
sh '''#!/bin/bash
|
||||||
|
set -eux -o pipefail
|
||||||
|
|
||||||
|
[ -n "$BRANCH_NAME" ]
|
||||||
|
[ -n "$GIT_SHORT" ]
|
||||||
|
[ -n "$LATEST_BRANCH" ]
|
||||||
|
[ -n "$REGISTRY" ]
|
||||||
|
|
||||||
|
function manifest {
|
||||||
|
repo=$1
|
||||||
|
|
||||||
|
docker manifest create "${repo}" --amend "${repo}_arm64_graviton2" --amend "${repo}_amd64_epyc2" --amend "${repo}_intel_xeon3"
|
||||||
|
|
||||||
|
docker manifest push --purge "${repo}"
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest "${REGISTRY}:git_${GIT_SHORT}"
|
||||||
|
manifest "${REGISTRY}:branch_${BRANCH_NAME}"
|
||||||
|
|
||||||
|
if [ "${BRANCH_NAME}" = "${LATEST_BRANCH}" ]; then
|
||||||
|
manifest "${REGISTRY}:latest"
|
||||||
|
fi
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -37,7 +37,7 @@ Options:
|
|||||||
Start the server with the defaults (listen on `http://localhost:8544` and use `./config/development.toml` which uses the database and cache running under docker and proxies to a bunch of public nodes:
|
Start the server with the defaults (listen on `http://localhost:8544` and use `./config/development.toml` which uses the database and cache running under docker and proxies to a bunch of public nodes:
|
||||||
|
|
||||||
```
|
```
|
||||||
cargo run --release -- daemon
|
cargo run --release -- proxyd
|
||||||
```
|
```
|
||||||
|
|
||||||
## Common commands
|
## Common commands
|
||||||
|
21
TODO.md
21
TODO.md
@ -243,8 +243,8 @@ These are roughly in order of completition
|
|||||||
- [x] cache the status page for a second
|
- [x] cache the status page for a second
|
||||||
- [x] request accounting for websockets
|
- [x] request accounting for websockets
|
||||||
- [x] database merge scripts
|
- [x] database merge scripts
|
||||||
- [x] test that sets up a Web3Connection and asks "has_block" for old and new blocks
|
- [x] test that sets up a Web3Rpc and asks "has_block" for old and new blocks
|
||||||
- [x] test that sets up Web3Connections with 2 nodes. one behind by several blocks. and see what the "next" server shows as
|
- [x] test that sets up Web3Rpcs with 2 nodes. one behind by several blocks. and see what the "next" server shows as
|
||||||
- [x] ethspam on bsc and polygon gives 1/4 errors. fix whatever is causing this
|
- [x] ethspam on bsc and polygon gives 1/4 errors. fix whatever is causing this
|
||||||
- bugfix! we were using the whole connection list instead of just the synced connection list when picking servers. oops!
|
- bugfix! we were using the whole connection list instead of just the synced connection list when picking servers. oops!
|
||||||
- [x] actually block unauthenticated requests instead of emitting warning of "allowing without auth during development!"
|
- [x] actually block unauthenticated requests instead of emitting warning of "allowing without auth during development!"
|
||||||
@ -289,7 +289,7 @@ These are not yet ordered. There might be duplicates. We might not actually need
|
|||||||
- we were caching too aggressively
|
- we were caching too aggressively
|
||||||
- [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message
|
- [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message
|
||||||
- we just want to be sure that the server has our tx and in this case, it does.
|
- we just want to be sure that the server has our tx and in this case, it does.
|
||||||
- ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Connections { conns: {"local_erigon_alpha_archive_ws": Web3Connection { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Connection { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Connection { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None
|
- ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Rpcs { conns: {"local_erigon_alpha_archive_ws": Web3Rpc { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Rpc { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Rpc { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None
|
||||||
- [x] serde collect unknown fields in config instead of crash
|
- [x] serde collect unknown fields in config instead of crash
|
||||||
- [x] upgrade user tier by address
|
- [x] upgrade user tier by address
|
||||||
- [x] all_backend_connections skips syncing servers
|
- [x] all_backend_connections skips syncing servers
|
||||||
@ -324,6 +324,12 @@ These are not yet ordered. There might be duplicates. We might not actually need
|
|||||||
- [x] improve waiting for sync when rate limited
|
- [x] improve waiting for sync when rate limited
|
||||||
- [x] improve pager duty errors for smarter deduping
|
- [x] improve pager duty errors for smarter deduping
|
||||||
- [x] add create_key cli command
|
- [x] add create_key cli command
|
||||||
|
- [x] short lived cache on /health
|
||||||
|
- [x] cache /status for longer
|
||||||
|
- [x] sort connections during eth_sendRawTransaction
|
||||||
|
- [x] block all admin_ rpc commands
|
||||||
|
- [x] remove the "metered" crate now that we save aggregate queries?
|
||||||
|
- [x] add archive depth to app config
|
||||||
- [-] proxy mode for benchmarking all backends
|
- [-] proxy mode for benchmarking all backends
|
||||||
- [-] proxy mode for sending to multiple backends
|
- [-] proxy mode for sending to multiple backends
|
||||||
- [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly
|
- [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly
|
||||||
@ -375,7 +381,6 @@ These are not yet ordered. There might be duplicates. We might not actually need
|
|||||||
- [ ] cli commands to search users by key
|
- [ ] cli commands to search users by key
|
||||||
- [ ] cli flag to set prometheus port
|
- [ ] cli flag to set prometheus port
|
||||||
- [ ] flamegraphs show 25% of the time to be in moka-housekeeper. tune that
|
- [ ] flamegraphs show 25% of the time to be in moka-housekeeper. tune that
|
||||||
- [ ] remove the "metered" crate now that we save aggregate queries?
|
|
||||||
- [ ] remove/change the "active_requests" counter? maybe only once we have dynamic soft limits?
|
- [ ] remove/change the "active_requests" counter? maybe only once we have dynamic soft limits?
|
||||||
- [ ] refactor so configs can change while running
|
- [ ] refactor so configs can change while running
|
||||||
- this will probably be a rather large change, but is necessary when we have autoscaling
|
- this will probably be a rather large change, but is necessary when we have autoscaling
|
||||||
@ -551,10 +556,10 @@ in another repo: event subscriber
|
|||||||
- [ ] weird flapping fork could have more useful logs. like, howd we get to 1/1/4 and fork. geth changed its mind 3 times?
|
- [ ] weird flapping fork could have more useful logs. like, howd we get to 1/1/4 and fork. geth changed its mind 3 times?
|
||||||
- should we change our code to follow the same consensus rules as geth? our first seen still seems like a reasonable choice
|
- should we change our code to follow the same consensus rules as geth? our first seen still seems like a reasonable choice
|
||||||
- other chains might change all sorts of things about their fork choice rules
|
- other chains might change all sorts of things about their fork choice rules
|
||||||
2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
|
2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Rpc { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
|
||||||
2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
|
2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
|
||||||
2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
|
2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
|
||||||
2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
|
2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
|
||||||
- [ ] threshold should check actual available request limits (if any) instead of just the soft limit
|
- [ ] threshold should check actual available request limits (if any) instead of just the soft limit
|
||||||
- [ ] foreign key on_update and on_delete
|
- [ ] foreign key on_update and on_delete
|
||||||
- [ ] database creation timestamps
|
- [ ] database creation timestamps
|
||||||
|
32
config/minimal.toml
Normal file
32
config/minimal.toml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
[app]
|
||||||
|
chain_id = 1
|
||||||
|
|
||||||
|
# no database
|
||||||
|
# no influxdb
|
||||||
|
# no redis
|
||||||
|
# no sentry
|
||||||
|
# no public limits means anon gets full access
|
||||||
|
|
||||||
|
# no thundering herd protection
|
||||||
|
min_sum_soft_limit = 1
|
||||||
|
min_synced_rpcs = 1
|
||||||
|
|
||||||
|
# 1GB of cache
|
||||||
|
response_cache_max_bytes = 1_000_000_000
|
||||||
|
|
||||||
|
[balanced_rpcs]
|
||||||
|
|
||||||
|
[balanced_rpcs.llama_public_wss]
|
||||||
|
# TODO: what should we do if all rpcs are disabled? warn and wait for a config change?
|
||||||
|
disabled = false
|
||||||
|
display_name = "LlamaNodes WSS"
|
||||||
|
url = "wss://eth.llamarpc.com/"
|
||||||
|
soft_limit = 1_000
|
||||||
|
tier = 0
|
||||||
|
|
||||||
|
[balanced_rpcs.llama_public_https]
|
||||||
|
disabled = false
|
||||||
|
display_name = "LlamaNodes HTTPS"
|
||||||
|
url = "https://eth.llamarpc.com/"
|
||||||
|
soft_limit = 1_000
|
||||||
|
tier = 0
|
@ -7,8 +7,8 @@ edition = "2021"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
redis-rate-limiter = { path = "../redis-rate-limiter" }
|
redis-rate-limiter = { path = "../redis-rate-limiter" }
|
||||||
|
|
||||||
anyhow = "1.0.68"
|
anyhow = "1.0.69"
|
||||||
hashbrown = "0.13.2"
|
hashbrown = "0.13.2"
|
||||||
log = "0.4.17"
|
log = "0.4.17"
|
||||||
moka = { version = "0.9.6", default-features = false, features = ["future"] }
|
moka = { version = "0.9.7", default-features = false, features = ["future"] }
|
||||||
tokio = "1.24.2"
|
tokio = "1.25.0"
|
||||||
|
@ -10,8 +10,8 @@ path = "src/mod.rs"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
sea-orm = "0.10.7"
|
sea-orm = "0.11.0"
|
||||||
serde = "1.0.152"
|
serde = "1.0.152"
|
||||||
uuid = "1.2.2"
|
uuid = "1.3.0"
|
||||||
ethers = "1.0.2"
|
ethers = "1.0.2"
|
||||||
ulid = "1.0.0"
|
ulid = "1.0.0"
|
||||||
|
@ -9,10 +9,10 @@ name = "migration"
|
|||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "1.24.2", features = ["full", "tracing"] }
|
tokio = { version = "1.25.0", features = ["full", "tracing"] }
|
||||||
|
|
||||||
[dependencies.sea-orm-migration]
|
[dependencies.sea-orm-migration]
|
||||||
version = "0.10.7"
|
version = "0.11.0"
|
||||||
features = [
|
features = [
|
||||||
# Enable at least one `ASYNC_RUNTIME` and `DATABASE_DRIVER` feature if you want to run migration via CLI.
|
# Enable at least one `ASYNC_RUNTIME` and `DATABASE_DRIVER` feature if you want to run migration via CLI.
|
||||||
# View the list of supported features at https://www.sea-ql.org/SeaORM/docs/install-and-config/database-and-async-runtime.
|
# View the list of supported features at https://www.sea-ql.org/SeaORM/docs/install-and-config/database-and-async-runtime.
|
||||||
|
@ -5,6 +5,6 @@ authors = ["Bryan Stitt <bryan@stitthappens.com>"]
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.68"
|
anyhow = "1.0.69"
|
||||||
deadpool-redis = { version = "0.11.1", features = ["rt_tokio_1", "serde"] }
|
deadpool-redis = { version = "0.11.1", features = ["rt_tokio_1", "serde"] }
|
||||||
tokio = "1.24.2"
|
tokio = "1.25.0"
|
||||||
|
@ -25,10 +25,10 @@ thread-fast-rng = { path = "../thread-fast-rng" }
|
|||||||
# TODO: import chrono from sea-orm so we always have the same version
|
# TODO: import chrono from sea-orm so we always have the same version
|
||||||
# TODO: make sure this time version matches siwe. PR to put this in their prelude
|
# TODO: make sure this time version matches siwe. PR to put this in their prelude
|
||||||
|
|
||||||
anyhow = { version = "1.0.68", features = ["backtrace"] }
|
anyhow = { version = "1.0.69", features = ["backtrace"] }
|
||||||
argh = "0.1.10"
|
argh = "0.1.10"
|
||||||
axum = { version = "0.6.4", features = ["headers", "ws"] }
|
axum = { version = "0.6.4", features = ["headers", "ws"] }
|
||||||
axum-client-ip = "0.3.1"
|
axum-client-ip = "0.4.0"
|
||||||
axum-macros = "0.3.2"
|
axum-macros = "0.3.2"
|
||||||
chrono = "0.4.23"
|
chrono = "0.4.23"
|
||||||
counter = "0.5.7"
|
counter = "0.5.7"
|
||||||
@ -38,7 +38,7 @@ env_logger = "0.10.0"
|
|||||||
ethers = { version = "1.0.2", default-features = false, features = ["rustls", "ws"] }
|
ethers = { version = "1.0.2", default-features = false, features = ["rustls", "ws"] }
|
||||||
fdlimit = "0.2.1"
|
fdlimit = "0.2.1"
|
||||||
flume = "0.10.14"
|
flume = "0.10.14"
|
||||||
futures = { version = "0.3.25", features = ["thread-pool"] }
|
futures = { version = "0.3.26", features = ["thread-pool"] }
|
||||||
gethostname = "0.4.1"
|
gethostname = "0.4.1"
|
||||||
glob = "0.3.1"
|
glob = "0.3.1"
|
||||||
handlebars = "4.3.6"
|
handlebars = "4.3.6"
|
||||||
@ -48,28 +48,28 @@ http = "0.2.8"
|
|||||||
ipnet = "2.7.1"
|
ipnet = "2.7.1"
|
||||||
itertools = "0.10.5"
|
itertools = "0.10.5"
|
||||||
log = "0.4.17"
|
log = "0.4.17"
|
||||||
metered = { version = "0.9.0", features = ["serialize"] }
|
moka = { version = "0.9.7", default-features = false, features = ["future"] }
|
||||||
moka = { version = "0.9.6", default-features = false, features = ["future"] }
|
notify = "5.1.0"
|
||||||
notify = "5.0.0"
|
|
||||||
num = "0.4.0"
|
num = "0.4.0"
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.15"
|
||||||
pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] }
|
pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] }
|
||||||
parking_lot = { version = "0.12.1", features = ["arc_lock"] }
|
parking_lot = { version = "0.12.1", features = ["arc_lock"] }
|
||||||
|
prettytable = "*"
|
||||||
proctitle = "0.1.1"
|
proctitle = "0.1.1"
|
||||||
regex = "1.7.1"
|
regex = "1.7.1"
|
||||||
reqwest = { version = "0.11.14", default-features = false, features = ["json", "tokio-rustls"] }
|
reqwest = { version = "0.11.14", default-features = false, features = ["json", "tokio-rustls"] }
|
||||||
rustc-hash = "1.1.0"
|
rustc-hash = "1.1.0"
|
||||||
sentry = { version = "0.29.2", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] }
|
sentry = { version = "0.29.3", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] }
|
||||||
serde = { version = "1.0.152", features = [] }
|
serde = { version = "1.0.152", features = [] }
|
||||||
serde_json = { version = "1.0.91", default-features = false, features = ["alloc", "raw_value"] }
|
serde_json = { version = "1.0.93", default-features = false, features = ["alloc", "raw_value"] }
|
||||||
serde_prometheus = "0.1.6"
|
serde_prometheus = "0.2.0"
|
||||||
siwe = "0.5.0"
|
siwe = "0.5.0"
|
||||||
time = "0.3.17"
|
time = "0.3.17"
|
||||||
tokio = { version = "1.24.2", features = ["full"] }
|
tokio = { version = "1.25.0", features = ["full"] }
|
||||||
tokio-stream = { version = "0.1.11", features = ["sync"] }
|
tokio-stream = { version = "0.1.11", features = ["sync"] }
|
||||||
toml = "0.6.0"
|
toml = "0.7.2"
|
||||||
tower = "0.4.13"
|
tower = "0.4.13"
|
||||||
tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] }
|
tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] }
|
||||||
ulid = { version = "1.0.0", features = ["serde"] }
|
ulid = { version = "1.0.0", features = ["serde"] }
|
||||||
url = "2.3.1"
|
url = "2.3.1"
|
||||||
uuid = "1.2.2"
|
uuid = "1.3.0"
|
||||||
|
@ -1,32 +0,0 @@
|
|||||||
use metered::{metered, HitCount, Throughput};
|
|
||||||
use serde::Serialize;
|
|
||||||
use thread_fast_rng::{rand::Rng, thread_fast_rng};
|
|
||||||
|
|
||||||
#[derive(Default, Debug, Serialize)]
|
|
||||||
pub struct Biz {
|
|
||||||
metrics: BizMetrics,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[metered(registry = BizMetrics)]
|
|
||||||
impl Biz {
|
|
||||||
#[measure([HitCount, Throughput])]
|
|
||||||
pub fn biz(&self) {
|
|
||||||
let delay = std::time::Duration::from_millis(thread_fast_rng().gen::<u64>() % 200);
|
|
||||||
std::thread::sleep(delay);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let buz = Biz::default();
|
|
||||||
|
|
||||||
for _ in 0..100 {
|
|
||||||
buz.biz();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut globals = std::collections::HashMap::new();
|
|
||||||
globals.insert("service", "web3_proxy_prometheus_example");
|
|
||||||
|
|
||||||
let serialized = serde_prometheus::to_string(&buz.metrics, Some("example"), globals).unwrap();
|
|
||||||
|
|
||||||
println!("{}", serialized);
|
|
||||||
}
|
|
@ -11,9 +11,8 @@ use crate::jsonrpc::{
|
|||||||
JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum,
|
JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum,
|
||||||
};
|
};
|
||||||
use crate::rpcs::blockchain::{ArcBlock, SavedBlock};
|
use crate::rpcs::blockchain::{ArcBlock, SavedBlock};
|
||||||
use crate::rpcs::connection::Web3Connection;
|
use crate::rpcs::many::Web3Rpcs;
|
||||||
use crate::rpcs::connections::Web3Connections;
|
use crate::rpcs::one::Web3Rpc;
|
||||||
use crate::rpcs::request::OpenRequestHandleMetrics;
|
|
||||||
use crate::rpcs::transactions::TxStatus;
|
use crate::rpcs::transactions::TxStatus;
|
||||||
use crate::user_token::UserBearerToken;
|
use crate::user_token::UserBearerToken;
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
@ -32,7 +31,6 @@ use futures::stream::{FuturesUnordered, StreamExt};
|
|||||||
use hashbrown::{HashMap, HashSet};
|
use hashbrown::{HashMap, HashSet};
|
||||||
use ipnet::IpNet;
|
use ipnet::IpNet;
|
||||||
use log::{debug, error, info, trace, warn, Level};
|
use log::{debug, error, info, trace, warn, Level};
|
||||||
use metered::{metered, ErrorCount, HitCount, ResponseTime, Throughput};
|
|
||||||
use migration::sea_orm::{
|
use migration::sea_orm::{
|
||||||
self, ConnectionTrait, Database, DatabaseConnection, EntityTrait, PaginatorTrait,
|
self, ConnectionTrait, Database, DatabaseConnection, EntityTrait, PaginatorTrait,
|
||||||
};
|
};
|
||||||
@ -71,7 +69,9 @@ pub static REQUEST_PERIOD: u64 = 60;
|
|||||||
#[derive(From)]
|
#[derive(From)]
|
||||||
struct ResponseCacheKey {
|
struct ResponseCacheKey {
|
||||||
// if none, this is cached until evicted
|
// if none, this is cached until evicted
|
||||||
block: Option<SavedBlock>,
|
from_block: Option<SavedBlock>,
|
||||||
|
// to_block is only set when ranges of blocks are requested (like with eth_getLogs)
|
||||||
|
to_block: Option<SavedBlock>,
|
||||||
method: String,
|
method: String,
|
||||||
// TODO: better type for this
|
// TODO: better type for this
|
||||||
params: Option<serde_json::Value>,
|
params: Option<serde_json::Value>,
|
||||||
@ -96,7 +96,22 @@ impl PartialEq for ResponseCacheKey {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
match (self.block.as_ref(), other.block.as_ref()) {
|
match (self.from_block.as_ref(), other.from_block.as_ref()) {
|
||||||
|
(None, None) => {}
|
||||||
|
(None, Some(_)) => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
(Some(_), None) => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
(Some(s), Some(o)) => {
|
||||||
|
if s != o {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match (self.to_block.as_ref(), other.to_block.as_ref()) {
|
||||||
(None, None) => {}
|
(None, None) => {}
|
||||||
(None, Some(_)) => {
|
(None, Some(_)) => {
|
||||||
return false;
|
return false;
|
||||||
@ -123,7 +138,8 @@ impl Eq for ResponseCacheKey {}
|
|||||||
|
|
||||||
impl Hash for ResponseCacheKey {
|
impl Hash for ResponseCacheKey {
|
||||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||||
self.block.as_ref().map(|x| x.hash()).hash(state);
|
self.from_block.as_ref().map(|x| x.hash()).hash(state);
|
||||||
|
self.to_block.as_ref().map(|x| x.hash()).hash(state);
|
||||||
self.method.hash(state);
|
self.method.hash(state);
|
||||||
self.params.as_ref().map(|x| x.to_string()).hash(state);
|
self.params.as_ref().map(|x| x.to_string()).hash(state);
|
||||||
self.cache_errors.hash(state)
|
self.cache_errors.hash(state)
|
||||||
@ -182,9 +198,9 @@ impl DatabaseReplica {
|
|||||||
// TODO: i'm sure this is more arcs than necessary, but spawning futures makes references hard
|
// TODO: i'm sure this is more arcs than necessary, but spawning futures makes references hard
|
||||||
pub struct Web3ProxyApp {
|
pub struct Web3ProxyApp {
|
||||||
/// Send requests to the best server available
|
/// Send requests to the best server available
|
||||||
pub balanced_rpcs: Arc<Web3Connections>,
|
pub balanced_rpcs: Arc<Web3Rpcs>,
|
||||||
/// Send private requests (like eth_sendRawTransaction) to all these servers
|
/// Send private requests (like eth_sendRawTransaction) to all these servers
|
||||||
pub private_rpcs: Option<Arc<Web3Connections>>,
|
pub private_rpcs: Option<Arc<Web3Rpcs>>,
|
||||||
response_cache: ResponseCache,
|
response_cache: ResponseCache,
|
||||||
// don't drop this or the sender will stop working
|
// don't drop this or the sender will stop working
|
||||||
// TODO: broadcast channel instead?
|
// TODO: broadcast channel instead?
|
||||||
@ -193,9 +209,6 @@ pub struct Web3ProxyApp {
|
|||||||
pub config: AppConfig,
|
pub config: AppConfig,
|
||||||
pub db_conn: Option<sea_orm::DatabaseConnection>,
|
pub db_conn: Option<sea_orm::DatabaseConnection>,
|
||||||
pub db_replica: Option<DatabaseReplica>,
|
pub db_replica: Option<DatabaseReplica>,
|
||||||
/// prometheus metrics
|
|
||||||
app_metrics: Arc<Web3ProxyAppMetrics>,
|
|
||||||
open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
|
|
||||||
/// store pending transactions that we've seen so that we don't send duplicates to subscribers
|
/// store pending transactions that we've seen so that we don't send duplicates to subscribers
|
||||||
pub pending_transactions: Cache<TxHash, TxStatus, hashbrown::hash_map::DefaultHashBuilder>,
|
pub pending_transactions: Cache<TxHash, TxStatus, hashbrown::hash_map::DefaultHashBuilder>,
|
||||||
pub frontend_ip_rate_limiter: Option<DeferredRateLimiter<IpAddr>>,
|
pub frontend_ip_rate_limiter: Option<DeferredRateLimiter<IpAddr>>,
|
||||||
@ -288,7 +301,7 @@ pub async fn migrate_db(
|
|||||||
);
|
);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if Migrator::get_pending_migrations(&db_conn).await?.is_empty() {
|
if Migrator::get_pending_migrations(db_conn).await?.is_empty() {
|
||||||
info!("no migrations to apply");
|
info!("no migrations to apply");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@ -314,10 +327,10 @@ pub async fn migrate_db(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
let migration_result = Migrator::up(&db_conn, None).await;
|
let migration_result = Migrator::up(db_conn, None).await;
|
||||||
|
|
||||||
// drop the distributed lock
|
// drop the distributed lock
|
||||||
drop_migration_lock(&db_conn).await?;
|
drop_migration_lock(db_conn).await?;
|
||||||
|
|
||||||
// return if migrations erred
|
// return if migrations erred
|
||||||
migration_result
|
migration_result
|
||||||
@ -347,7 +360,6 @@ pub struct Web3ProxyAppSpawn {
|
|||||||
pub background_handles: FuturesUnordered<AnyhowJoinHandle<()>>,
|
pub background_handles: FuturesUnordered<AnyhowJoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[metered(registry = Web3ProxyAppMetrics, registry_expr = self.app_metrics, visibility = pub)]
|
|
||||||
impl Web3ProxyApp {
|
impl Web3ProxyApp {
|
||||||
/// The main entrypoint.
|
/// The main entrypoint.
|
||||||
pub async fn spawn(
|
pub async fn spawn(
|
||||||
@ -377,10 +389,6 @@ impl Web3ProxyApp {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup metrics
|
|
||||||
let app_metrics = Default::default();
|
|
||||||
let open_request_handle_metrics: Arc<OpenRequestHandleMetrics> = Default::default();
|
|
||||||
|
|
||||||
let mut db_conn = None::<DatabaseConnection>;
|
let mut db_conn = None::<DatabaseConnection>;
|
||||||
let mut db_replica = None::<DatabaseReplica>;
|
let mut db_replica = None::<DatabaseReplica>;
|
||||||
|
|
||||||
@ -564,7 +572,7 @@ impl Web3ProxyApp {
|
|||||||
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
|
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
|
||||||
|
|
||||||
// connect to the load balanced rpcs
|
// connect to the load balanced rpcs
|
||||||
let (balanced_rpcs, balanced_handle) = Web3Connections::spawn(
|
let (balanced_rpcs, balanced_handle) = Web3Rpcs::spawn(
|
||||||
top_config.app.chain_id,
|
top_config.app.chain_id,
|
||||||
db_conn.clone(),
|
db_conn.clone(),
|
||||||
balanced_rpcs,
|
balanced_rpcs,
|
||||||
@ -576,7 +584,6 @@ impl Web3ProxyApp {
|
|||||||
top_config.app.min_synced_rpcs,
|
top_config.app.min_synced_rpcs,
|
||||||
Some(pending_tx_sender.clone()),
|
Some(pending_tx_sender.clone()),
|
||||||
pending_transactions.clone(),
|
pending_transactions.clone(),
|
||||||
open_request_handle_metrics.clone(),
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context("spawning balanced rpcs")?;
|
.context("spawning balanced rpcs")?;
|
||||||
@ -591,7 +598,7 @@ impl Web3ProxyApp {
|
|||||||
warn!("No private relays configured. Any transactions will be broadcast to the public mempool!");
|
warn!("No private relays configured. Any transactions will be broadcast to the public mempool!");
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
let (private_rpcs, private_handle) = Web3Connections::spawn(
|
let (private_rpcs, private_handle) = Web3Rpcs::spawn(
|
||||||
top_config.app.chain_id,
|
top_config.app.chain_id,
|
||||||
db_conn.clone(),
|
db_conn.clone(),
|
||||||
private_rpcs,
|
private_rpcs,
|
||||||
@ -607,7 +614,6 @@ impl Web3ProxyApp {
|
|||||||
// TODO: subscribe to pending transactions on the private rpcs? they seem to have low rate limits
|
// TODO: subscribe to pending transactions on the private rpcs? they seem to have low rate limits
|
||||||
None,
|
None,
|
||||||
pending_transactions.clone(),
|
pending_transactions.clone(),
|
||||||
open_request_handle_metrics.clone(),
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context("spawning private_rpcs")?;
|
.context("spawning private_rpcs")?;
|
||||||
@ -663,14 +669,12 @@ impl Web3ProxyApp {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// keep 1GB of blocks in the cache
|
// responses can be very different in sizes, so this is a cache with a max capacity and a weigher
|
||||||
// responses can be very different in sizes, so this definitely needs a weigher
|
|
||||||
// TODO: max_capacity from config
|
|
||||||
// TODO: don't allow any response to be bigger than X% of the cache
|
// TODO: don't allow any response to be bigger than X% of the cache
|
||||||
let response_cache = Cache::builder()
|
let response_cache = Cache::builder()
|
||||||
.max_capacity(1024 * 1024 * 1024)
|
.max_capacity(top_config.app.response_cache_max_bytes)
|
||||||
.weigher(|k: &ResponseCacheKey, v| {
|
.weigher(|k: &ResponseCacheKey, v| {
|
||||||
// TODO: is this good?
|
// TODO: is this good enough?
|
||||||
if let Ok(v) = serde_json::to_string(v) {
|
if let Ok(v) = serde_json::to_string(v) {
|
||||||
let weight = k.weight() + v.len();
|
let weight = k.weight() + v.len();
|
||||||
|
|
||||||
@ -718,8 +722,6 @@ impl Web3ProxyApp {
|
|||||||
db_conn,
|
db_conn,
|
||||||
db_replica,
|
db_replica,
|
||||||
vredis_pool,
|
vredis_pool,
|
||||||
app_metrics,
|
|
||||||
open_request_handle_metrics,
|
|
||||||
rpc_secret_key_cache,
|
rpc_secret_key_cache,
|
||||||
bearer_token_semaphores,
|
bearer_token_semaphores,
|
||||||
ip_semaphores,
|
ip_semaphores,
|
||||||
@ -893,9 +895,7 @@ impl Web3ProxyApp {
|
|||||||
// "user_cache_size": app.rpc_secret_key_cache.weighted_size(),
|
// "user_cache_size": app.rpc_secret_key_cache.weighted_size(),
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
struct CombinedMetrics<'a> {
|
struct CombinedMetrics {
|
||||||
app: &'a Web3ProxyAppMetrics,
|
|
||||||
backend_rpc: &'a OpenRequestHandleMetrics,
|
|
||||||
recent_ip_counts: RecentCounts,
|
recent_ip_counts: RecentCounts,
|
||||||
recent_user_id_counts: RecentCounts,
|
recent_user_id_counts: RecentCounts,
|
||||||
recent_tx_counts: RecentCounts,
|
recent_tx_counts: RecentCounts,
|
||||||
@ -903,14 +903,13 @@ impl Web3ProxyApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let metrics = CombinedMetrics {
|
let metrics = CombinedMetrics {
|
||||||
app: &self.app_metrics,
|
|
||||||
backend_rpc: &self.open_request_handle_metrics,
|
|
||||||
recent_ip_counts,
|
recent_ip_counts,
|
||||||
recent_user_id_counts,
|
recent_user_id_counts,
|
||||||
recent_tx_counts,
|
recent_tx_counts,
|
||||||
user_count,
|
user_count,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// TODO: i don't like this library. it doesn't include HELP or TYPE lines and so our prometheus server fails to parse it
|
||||||
serde_prometheus::to_string(&metrics, Some("web3_proxy"), globals)
|
serde_prometheus::to_string(&metrics, Some("web3_proxy"), globals)
|
||||||
.expect("prometheus metrics should always serialize")
|
.expect("prometheus metrics should always serialize")
|
||||||
}
|
}
|
||||||
@ -921,8 +920,7 @@ impl Web3ProxyApp {
|
|||||||
authorization: Arc<Authorization>,
|
authorization: Arc<Authorization>,
|
||||||
request: JsonRpcRequestEnum,
|
request: JsonRpcRequestEnum,
|
||||||
proxy_mode: ProxyMode,
|
proxy_mode: ProxyMode,
|
||||||
) -> Result<(JsonRpcForwardedResponseEnum, Vec<Arc<Web3Connection>>), FrontendErrorResponse>
|
) -> Result<(JsonRpcForwardedResponseEnum, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
|
||||||
{
|
|
||||||
// trace!(?request, "proxy_web3_rpc");
|
// trace!(?request, "proxy_web3_rpc");
|
||||||
|
|
||||||
// even though we have timeouts on the requests to our backend providers,
|
// even though we have timeouts on the requests to our backend providers,
|
||||||
@ -961,7 +959,7 @@ impl Web3ProxyApp {
|
|||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
requests: Vec<JsonRpcRequest>,
|
requests: Vec<JsonRpcRequest>,
|
||||||
proxy_mode: ProxyMode,
|
proxy_mode: ProxyMode,
|
||||||
) -> anyhow::Result<(Vec<JsonRpcForwardedResponse>, Vec<Arc<Web3Connection>>)> {
|
) -> Result<(Vec<JsonRpcForwardedResponse>, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
|
||||||
// TODO: we should probably change ethers-rs to support this directly. they pushed this off to v2 though
|
// TODO: we should probably change ethers-rs to support this directly. they pushed this off to v2 though
|
||||||
let num_requests = requests.len();
|
let num_requests = requests.len();
|
||||||
|
|
||||||
@ -978,7 +976,7 @@ impl Web3ProxyApp {
|
|||||||
// TODO: i'm sure this could be done better with iterators
|
// TODO: i'm sure this could be done better with iterators
|
||||||
// TODO: stream the response?
|
// TODO: stream the response?
|
||||||
let mut collected: Vec<JsonRpcForwardedResponse> = Vec::with_capacity(num_requests);
|
let mut collected: Vec<JsonRpcForwardedResponse> = Vec::with_capacity(num_requests);
|
||||||
let mut collected_rpcs: HashSet<Arc<Web3Connection>> = HashSet::new();
|
let mut collected_rpcs: HashSet<Arc<Web3Rpc>> = HashSet::new();
|
||||||
for response in responses {
|
for response in responses {
|
||||||
// TODO: any way to attach the tried rpcs to the error? it is likely helpful
|
// TODO: any way to attach the tried rpcs to the error? it is likely helpful
|
||||||
let (response, rpcs) = response?;
|
let (response, rpcs) = response?;
|
||||||
@ -1013,13 +1011,13 @@ impl Web3ProxyApp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[measure([ErrorCount, HitCount, ResponseTime, Throughput])]
|
// #[measure([ErrorCount, HitCount, ResponseTime, Throughput])]
|
||||||
async fn proxy_cached_request(
|
async fn proxy_cached_request(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
mut request: JsonRpcRequest,
|
mut request: JsonRpcRequest,
|
||||||
proxy_mode: ProxyMode,
|
proxy_mode: ProxyMode,
|
||||||
) -> anyhow::Result<(JsonRpcForwardedResponse, Vec<Arc<Web3Connection>>)> {
|
) -> Result<(JsonRpcForwardedResponse, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
|
||||||
// trace!("Received request: {:?}", request);
|
// trace!("Received request: {:?}", request);
|
||||||
|
|
||||||
let request_metadata = Arc::new(RequestMetadata::new(REQUEST_PERIOD, request.num_bytes())?);
|
let request_metadata = Arc::new(RequestMetadata::new(REQUEST_PERIOD, request.num_bytes())?);
|
||||||
@ -1033,13 +1031,7 @@ impl Web3ProxyApp {
|
|||||||
// TODO: don't clone?
|
// TODO: don't clone?
|
||||||
let partial_response: serde_json::Value = match request_method.as_ref() {
|
let partial_response: serde_json::Value = match request_method.as_ref() {
|
||||||
// lots of commands are blocked
|
// lots of commands are blocked
|
||||||
method @ ("admin_addPeer"
|
method @ ("db_getHex"
|
||||||
| "admin_datadir"
|
|
||||||
| "admin_startRPC"
|
|
||||||
| "admin_startWS"
|
|
||||||
| "admin_stopRPC"
|
|
||||||
| "admin_stopWS"
|
|
||||||
| "db_getHex"
|
|
||||||
| "db_getString"
|
| "db_getString"
|
||||||
| "db_putHex"
|
| "db_putHex"
|
||||||
| "db_putString"
|
| "db_putString"
|
||||||
@ -1114,6 +1106,7 @@ impl Web3ProxyApp {
|
|||||||
| "eth_newBlockFilter"
|
| "eth_newBlockFilter"
|
||||||
| "eth_newFilter"
|
| "eth_newFilter"
|
||||||
| "eth_newPendingTransactionFilter"
|
| "eth_newPendingTransactionFilter"
|
||||||
|
| "eth_pollSubscriptions"
|
||||||
| "eth_uninstallFilter") => {
|
| "eth_uninstallFilter") => {
|
||||||
// TODO: unsupported command stat
|
// TODO: unsupported command stat
|
||||||
// TODO: what error code?
|
// TODO: what error code?
|
||||||
@ -1138,9 +1131,10 @@ impl Web3ProxyApp {
|
|||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
// TODO: what does geth do if this happens?
|
// TODO: what does geth do if this happens?
|
||||||
return Err(anyhow::anyhow!(
|
// TODO: i think we want a 502 so that haproxy retries on another server
|
||||||
"no servers synced. unknown eth_blockNumber"
|
return Err(
|
||||||
));
|
anyhow::anyhow!("no servers synced. unknown eth_blockNumber").into(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1211,7 +1205,7 @@ impl Web3ProxyApp {
|
|||||||
ProxyMode::Fastest(0) => None,
|
ProxyMode::Fastest(0) => None,
|
||||||
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
|
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
|
||||||
// TODO: what if we do 2 per tier? we want to blast the third party rpcs
|
// TODO: what if we do 2 per tier? we want to blast the third party rpcs
|
||||||
// TODO: maybe having the third party rpcs in their own Web3Connections would be good for this
|
// TODO: maybe having the third party rpcs in their own Web3Rpcs would be good for this
|
||||||
ProxyMode::Fastest(x) => Some(x * 4),
|
ProxyMode::Fastest(x) => Some(x * 4),
|
||||||
ProxyMode::Versus => None,
|
ProxyMode::Versus => None,
|
||||||
};
|
};
|
||||||
@ -1221,6 +1215,7 @@ impl Web3ProxyApp {
|
|||||||
// if we are sending the transaction privately, no matter the proxy_mode, we send to ALL private rpcs
|
// if we are sending the transaction privately, no matter the proxy_mode, we send to ALL private rpcs
|
||||||
(private_rpcs, None)
|
(private_rpcs, None)
|
||||||
} else {
|
} else {
|
||||||
|
// TODO: send to balanced_rpcs AND private_rpcs
|
||||||
(&self.balanced_rpcs, default_num)
|
(&self.balanced_rpcs, default_num)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1236,6 +1231,7 @@ impl Web3ProxyApp {
|
|||||||
None,
|
None,
|
||||||
Level::Trace,
|
Level::Trace,
|
||||||
num,
|
num,
|
||||||
|
true,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -1376,12 +1372,17 @@ impl Web3ProxyApp {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: don't return with ? here. send a jsonrpc invalid request
|
|
||||||
let param = Bytes::from_str(
|
let param = Bytes::from_str(
|
||||||
params[0]
|
params[0]
|
||||||
.as_str()
|
.as_str()
|
||||||
.context("parsing params 0 into str then bytes")?,
|
.context("parsing params 0 into str then bytes")?,
|
||||||
)?;
|
)
|
||||||
|
.map_err(|x| {
|
||||||
|
trace!("bad request: {:?}", x);
|
||||||
|
FrontendErrorResponse::BadRequest(
|
||||||
|
"param 0 could not be read as H256".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
let hash = H256::from(keccak256(param));
|
let hash = H256::from(keccak256(param));
|
||||||
|
|
||||||
@ -1413,6 +1414,11 @@ impl Web3ProxyApp {
|
|||||||
}
|
}
|
||||||
// anything else gets sent to backend rpcs and cached
|
// anything else gets sent to backend rpcs and cached
|
||||||
method => {
|
method => {
|
||||||
|
if method.starts_with("admin_") {
|
||||||
|
// TODO: emit a stat? will probably just be noise
|
||||||
|
return Err(FrontendErrorResponse::AccessDenied);
|
||||||
|
}
|
||||||
|
|
||||||
// emit stats
|
// emit stats
|
||||||
|
|
||||||
// TODO: if no servers synced, wait for them to be synced? probably better to error and let haproxy retry another server
|
// TODO: if no servers synced, wait for them to be synced? probably better to error and let haproxy retry another server
|
||||||
@ -1434,7 +1440,8 @@ impl Web3ProxyApp {
|
|||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
BlockNeeded::CacheSuccessForever => Some(ResponseCacheKey {
|
BlockNeeded::CacheSuccessForever => Some(ResponseCacheKey {
|
||||||
block: None,
|
from_block: None,
|
||||||
|
to_block: None,
|
||||||
method: method.to_string(),
|
method: method.to_string(),
|
||||||
params: request.params.clone(),
|
params: request.params.clone(),
|
||||||
cache_errors: false,
|
cache_errors: false,
|
||||||
@ -1444,12 +1451,12 @@ impl Web3ProxyApp {
|
|||||||
block_num,
|
block_num,
|
||||||
cache_errors,
|
cache_errors,
|
||||||
} => {
|
} => {
|
||||||
let (request_block_hash, archive_needed) = self
|
let (request_block_hash, block_depth) = self
|
||||||
.balanced_rpcs
|
.balanced_rpcs
|
||||||
.block_hash(authorization, &block_num)
|
.block_hash(authorization, &block_num)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if archive_needed {
|
if block_depth < self.config.archive_depth {
|
||||||
request_metadata
|
request_metadata
|
||||||
.archive_request
|
.archive_request
|
||||||
.store(true, atomic::Ordering::Relaxed);
|
.store(true, atomic::Ordering::Relaxed);
|
||||||
@ -1461,7 +1468,48 @@ impl Web3ProxyApp {
|
|||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Some(ResponseCacheKey {
|
Some(ResponseCacheKey {
|
||||||
block: Some(SavedBlock::new(request_block)),
|
from_block: Some(SavedBlock::new(request_block)),
|
||||||
|
to_block: None,
|
||||||
|
method: method.to_string(),
|
||||||
|
// TODO: hash here?
|
||||||
|
params: request.params.clone(),
|
||||||
|
cache_errors,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
BlockNeeded::CacheRange {
|
||||||
|
from_block_num,
|
||||||
|
to_block_num,
|
||||||
|
cache_errors,
|
||||||
|
} => {
|
||||||
|
let (from_block_hash, block_depth) = self
|
||||||
|
.balanced_rpcs
|
||||||
|
.block_hash(authorization, &from_block_num)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if block_depth < self.config.archive_depth {
|
||||||
|
request_metadata
|
||||||
|
.archive_request
|
||||||
|
.store(true, atomic::Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
let from_block = self
|
||||||
|
.balanced_rpcs
|
||||||
|
.block(authorization, &from_block_hash, None)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let (to_block_hash, _) = self
|
||||||
|
.balanced_rpcs
|
||||||
|
.block_hash(authorization, &to_block_num)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let to_block = self
|
||||||
|
.balanced_rpcs
|
||||||
|
.block(authorization, &to_block_hash, None)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Some(ResponseCacheKey {
|
||||||
|
from_block: Some(SavedBlock::new(from_block)),
|
||||||
|
to_block: Some(SavedBlock::new(to_block)),
|
||||||
method: method.to_string(),
|
method: method.to_string(),
|
||||||
// TODO: hash here?
|
// TODO: hash here?
|
||||||
params: request.params.clone(),
|
params: request.params.clone(),
|
||||||
@ -1476,14 +1524,11 @@ impl Web3ProxyApp {
|
|||||||
let authorization = authorization.clone();
|
let authorization = authorization.clone();
|
||||||
|
|
||||||
if let Some(cache_key) = cache_key {
|
if let Some(cache_key) = cache_key {
|
||||||
let request_block_number = cache_key.block.as_ref().map(|x| x.number());
|
let from_block_num = cache_key.from_block.as_ref().map(|x| x.number());
|
||||||
|
|
||||||
self.response_cache
|
self.response_cache
|
||||||
.try_get_with(cache_key, async move {
|
.try_get_with(cache_key, async move {
|
||||||
// TODO: retry some failures automatically!
|
|
||||||
// TODO: try private_rpcs if all the balanced_rpcs fail!
|
|
||||||
// TODO: put the hash here instead of the block number? its in the request already.
|
// TODO: put the hash here instead of the block number? its in the request already.
|
||||||
|
|
||||||
let mut response = self
|
let mut response = self
|
||||||
.balanced_rpcs
|
.balanced_rpcs
|
||||||
.try_proxy_connection(
|
.try_proxy_connection(
|
||||||
@ -1491,7 +1536,7 @@ impl Web3ProxyApp {
|
|||||||
&authorization,
|
&authorization,
|
||||||
request,
|
request,
|
||||||
Some(&request_metadata),
|
Some(&request_metadata),
|
||||||
request_block_number.as_ref(),
|
from_block_num.as_ref(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -1499,6 +1544,8 @@ impl Web3ProxyApp {
|
|||||||
response.id = Default::default();
|
response.id = Default::default();
|
||||||
|
|
||||||
// TODO: only cache the inner response
|
// TODO: only cache the inner response
|
||||||
|
// TODO: how are we going to stream this?
|
||||||
|
// TODO: check response size. if its very large, return it in a custom Error type that bypasses caching
|
||||||
Ok::<_, anyhow::Error>(response)
|
Ok::<_, anyhow::Error>(response)
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
|
@ -155,7 +155,7 @@ mod tests {
|
|||||||
use std::env;
|
use std::env;
|
||||||
|
|
||||||
use web3_proxy::{
|
use web3_proxy::{
|
||||||
config::{AppConfig, Web3ConnectionConfig},
|
config::{AppConfig, Web3RpcConfig},
|
||||||
rpcs::blockchain::ArcBlock,
|
rpcs::blockchain::ArcBlock,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ mod tests {
|
|||||||
min_sum_soft_limit: 1,
|
min_sum_soft_limit: 1,
|
||||||
min_synced_rpcs: 1,
|
min_synced_rpcs: 1,
|
||||||
public_requests_per_period: Some(1_000_000),
|
public_requests_per_period: Some(1_000_000),
|
||||||
response_cache_max_bytes: 10_usize.pow(7),
|
response_cache_max_bytes: 10_u64.pow(7),
|
||||||
redirect_public_url: Some("example.com/".to_string()),
|
redirect_public_url: Some("example.com/".to_string()),
|
||||||
redirect_rpc_key_url: Some("example.com/{{rpc_key_id}}".to_string()),
|
redirect_rpc_key_url: Some("example.com/{{rpc_key_id}}".to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@ -204,7 +204,7 @@ mod tests {
|
|||||||
balanced_rpcs: HashMap::from([
|
balanced_rpcs: HashMap::from([
|
||||||
(
|
(
|
||||||
"anvil".to_string(),
|
"anvil".to_string(),
|
||||||
Web3ConnectionConfig {
|
Web3RpcConfig {
|
||||||
disabled: false,
|
disabled: false,
|
||||||
display_name: None,
|
display_name: None,
|
||||||
url: anvil.endpoint(),
|
url: anvil.endpoint(),
|
||||||
@ -219,7 +219,7 @@ mod tests {
|
|||||||
),
|
),
|
||||||
(
|
(
|
||||||
"anvil_ws".to_string(),
|
"anvil_ws".to_string(),
|
||||||
Web3ConnectionConfig {
|
Web3RpcConfig {
|
||||||
disabled: false,
|
disabled: false,
|
||||||
display_name: None,
|
display_name: None,
|
||||||
url: anvil.ws_endpoint(),
|
url: anvil.ws_endpoint(),
|
||||||
|
@ -11,6 +11,7 @@ mod daemon;
|
|||||||
mod drop_migration_lock;
|
mod drop_migration_lock;
|
||||||
mod list_user_tier;
|
mod list_user_tier;
|
||||||
mod pagerduty;
|
mod pagerduty;
|
||||||
|
mod popularity_contest;
|
||||||
mod rpc_accounting;
|
mod rpc_accounting;
|
||||||
mod sentryd;
|
mod sentryd;
|
||||||
mod transfer_key;
|
mod transfer_key;
|
||||||
@ -80,6 +81,7 @@ enum SubCommand {
|
|||||||
CreateUser(create_user::CreateUserSubCommand),
|
CreateUser(create_user::CreateUserSubCommand),
|
||||||
DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand),
|
DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand),
|
||||||
Pagerduty(pagerduty::PagerdutySubCommand),
|
Pagerduty(pagerduty::PagerdutySubCommand),
|
||||||
|
PopularityContest(popularity_contest::PopularityContestSubCommand),
|
||||||
Proxyd(daemon::ProxydSubCommand),
|
Proxyd(daemon::ProxydSubCommand),
|
||||||
RpcAccounting(rpc_accounting::RpcAccountingSubCommand),
|
RpcAccounting(rpc_accounting::RpcAccountingSubCommand),
|
||||||
Sentryd(sentryd::SentrydSubCommand),
|
Sentryd(sentryd::SentrydSubCommand),
|
||||||
@ -372,6 +374,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
x.main(pagerduty_async, top_config).await
|
x.main(pagerduty_async, top_config).await
|
||||||
}
|
}
|
||||||
|
SubCommand::PopularityContest(x) => x.main().await,
|
||||||
SubCommand::Sentryd(x) => {
|
SubCommand::Sentryd(x) => {
|
||||||
if cli_config.sentry_url.is_none() {
|
if cli_config.sentry_url.is_none() {
|
||||||
warn!("sentry_url is not set! Logs will only show in this console");
|
warn!("sentry_url is not set! Logs will only show in this console");
|
||||||
|
135
web3_proxy/src/bin/web3_proxy_cli/popularity_contest.rs
Normal file
135
web3_proxy/src/bin/web3_proxy_cli/popularity_contest.rs
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
// show what nodes are used most often
|
||||||
|
use argh::FromArgs;
|
||||||
|
use log::trace;
|
||||||
|
use prettytable::{row, Table};
|
||||||
|
|
||||||
|
#[derive(FromArgs, PartialEq, Debug)]
|
||||||
|
/// Second subcommand.
|
||||||
|
#[argh(subcommand, name = "popularity_contest")]
|
||||||
|
pub struct PopularityContestSubCommand {
|
||||||
|
#[argh(positional)]
|
||||||
|
/// the web3-proxy url
|
||||||
|
/// TODO: query multiple and add them together
|
||||||
|
rpc: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct BackendRpcData<'a> {
|
||||||
|
name: &'a str,
|
||||||
|
// tier: u64,
|
||||||
|
// backup: bool,
|
||||||
|
// block_data_limit: u64,
|
||||||
|
requests: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PopularityContestSubCommand {
|
||||||
|
pub async fn main(self) -> anyhow::Result<()> {
|
||||||
|
let x: serde_json::Value = reqwest::get(format!("{}/status", self.rpc))
|
||||||
|
.await?
|
||||||
|
.json()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let conns = x
|
||||||
|
.as_object()
|
||||||
|
.unwrap()
|
||||||
|
.get("balanced_rpcs")
|
||||||
|
.unwrap()
|
||||||
|
.as_object()
|
||||||
|
.unwrap()
|
||||||
|
.get("conns")
|
||||||
|
.unwrap()
|
||||||
|
.as_array()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut by_tier = BTreeMap::<u64, Vec<_>>::new();
|
||||||
|
let mut tier_requests = BTreeMap::<u64, u64>::new();
|
||||||
|
let mut total_requests = 0;
|
||||||
|
|
||||||
|
for conn in conns {
|
||||||
|
let conn = conn.as_object().unwrap();
|
||||||
|
|
||||||
|
let name = conn
|
||||||
|
.get("display_name")
|
||||||
|
.unwrap_or_else(|| conn.get("name").unwrap())
|
||||||
|
.as_str()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
if name.ends_with("http") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tier = conn.get("tier").unwrap().as_u64().unwrap();
|
||||||
|
|
||||||
|
// let backup = conn.get("backup").unwrap().as_bool().unwrap();
|
||||||
|
|
||||||
|
// let block_data_limit = conn
|
||||||
|
// .get("block_data_limit")
|
||||||
|
// .unwrap()
|
||||||
|
// .as_u64()
|
||||||
|
// .unwrap_or(u64::MAX);
|
||||||
|
|
||||||
|
let requests = conn.get("total_requests").unwrap().as_u64().unwrap();
|
||||||
|
|
||||||
|
let rpc_data = BackendRpcData {
|
||||||
|
name,
|
||||||
|
// tier,
|
||||||
|
// backup,
|
||||||
|
// block_data_limit,
|
||||||
|
requests,
|
||||||
|
};
|
||||||
|
|
||||||
|
total_requests += rpc_data.requests;
|
||||||
|
|
||||||
|
*tier_requests.entry(tier).or_default() += rpc_data.requests;
|
||||||
|
|
||||||
|
by_tier.entry(tier).or_default().push(rpc_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!("tier_requests: {:#?}", tier_requests);
|
||||||
|
trace!("by_tier: {:#?}", by_tier);
|
||||||
|
|
||||||
|
let mut table = Table::new();
|
||||||
|
|
||||||
|
table.add_row(row![
|
||||||
|
"name",
|
||||||
|
"tier",
|
||||||
|
"rpc_requests",
|
||||||
|
"tier_request_pct",
|
||||||
|
"total_pct"
|
||||||
|
]);
|
||||||
|
|
||||||
|
let total_requests = total_requests as f32;
|
||||||
|
|
||||||
|
for (tier, rpcs) in by_tier.iter() {
|
||||||
|
let t = (*tier_requests.get(tier).unwrap()) as f32;
|
||||||
|
|
||||||
|
for rpc in rpcs.iter() {
|
||||||
|
let tier_request_pct = if t == 0.0 {
|
||||||
|
0.0
|
||||||
|
} else {
|
||||||
|
(rpc.requests as f32) / t * 100.0
|
||||||
|
};
|
||||||
|
|
||||||
|
let total_request_pct = if total_requests == 0.0 {
|
||||||
|
0.0
|
||||||
|
} else {
|
||||||
|
(rpc.requests as f32) / total_requests * 100.0
|
||||||
|
};
|
||||||
|
|
||||||
|
table.add_row(row![
|
||||||
|
rpc.name,
|
||||||
|
tier,
|
||||||
|
rpc.requests,
|
||||||
|
tier_request_pct,
|
||||||
|
total_request_pct
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
table.printstd();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -4,38 +4,36 @@ use ethers::{
|
|||||||
prelude::{BlockNumber, U64},
|
prelude::{BlockNumber, U64},
|
||||||
types::H256,
|
types::H256,
|
||||||
};
|
};
|
||||||
use log::{trace, warn};
|
use log::warn;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::{frontend::authorization::Authorization, rpcs::connections::Web3Connections};
|
use crate::{frontend::authorization::Authorization, rpcs::many::Web3Rpcs};
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
pub fn block_num_to_U64(block_num: BlockNumber, latest_block: U64) -> U64 {
|
pub fn block_num_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bool) {
|
||||||
match block_num {
|
match block_num {
|
||||||
BlockNumber::Earliest => {
|
BlockNumber::Earliest => (U64::zero(), false),
|
||||||
// modified is false because we want the backend to see "pending"
|
|
||||||
U64::zero()
|
|
||||||
}
|
|
||||||
BlockNumber::Finalized => {
|
BlockNumber::Finalized => {
|
||||||
warn!("finalized block requested! not yet implemented!");
|
warn!("finalized block requested! not yet implemented!");
|
||||||
latest_block - 10
|
(latest_block - 10, false)
|
||||||
}
|
}
|
||||||
BlockNumber::Latest => {
|
BlockNumber::Latest => {
|
||||||
// change "latest" to a number
|
// change "latest" to a number
|
||||||
latest_block
|
(latest_block, true)
|
||||||
}
|
}
|
||||||
BlockNumber::Number(x) => {
|
BlockNumber::Number(x) => {
|
||||||
// we already have a number
|
// we already have a number
|
||||||
x
|
(x, false)
|
||||||
}
|
}
|
||||||
BlockNumber::Pending => {
|
BlockNumber::Pending => {
|
||||||
|
// modified is false because we want the backend to see "pending"
|
||||||
// TODO: think more about how to handle Pending
|
// TODO: think more about how to handle Pending
|
||||||
latest_block
|
(latest_block, false)
|
||||||
}
|
}
|
||||||
BlockNumber::Safe => {
|
BlockNumber::Safe => {
|
||||||
warn!("finalized block requested! not yet implemented!");
|
warn!("finalized block requested! not yet implemented!");
|
||||||
latest_block - 3
|
(latest_block - 3, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -47,7 +45,7 @@ pub async fn clean_block_number(
|
|||||||
params: &mut serde_json::Value,
|
params: &mut serde_json::Value,
|
||||||
block_param_id: usize,
|
block_param_id: usize,
|
||||||
latest_block: U64,
|
latest_block: U64,
|
||||||
rpcs: &Web3Connections,
|
rpcs: &Web3Rpcs,
|
||||||
) -> anyhow::Result<U64> {
|
) -> anyhow::Result<U64> {
|
||||||
match params.as_array_mut() {
|
match params.as_array_mut() {
|
||||||
None => {
|
None => {
|
||||||
@ -58,7 +56,7 @@ pub async fn clean_block_number(
|
|||||||
None => {
|
None => {
|
||||||
if params.len() == block_param_id {
|
if params.len() == block_param_id {
|
||||||
// add the latest block number to the end of the params
|
// add the latest block number to the end of the params
|
||||||
params.push(serde_json::to_value(latest_block)?);
|
params.push(json!(latest_block));
|
||||||
} else {
|
} else {
|
||||||
// don't modify the request. only cache with current block
|
// don't modify the request. only cache with current block
|
||||||
// TODO: more useful log that include the
|
// TODO: more useful log that include the
|
||||||
@ -69,37 +67,41 @@ pub async fn clean_block_number(
|
|||||||
Ok(latest_block)
|
Ok(latest_block)
|
||||||
}
|
}
|
||||||
Some(x) => {
|
Some(x) => {
|
||||||
let start = x.clone();
|
|
||||||
|
|
||||||
// convert the json value to a BlockNumber
|
// convert the json value to a BlockNumber
|
||||||
let block_num = if let Some(obj) = x.as_object_mut() {
|
let (block_num, change) = if let Some(obj) = x.as_object_mut() {
|
||||||
// it might be a Map like `{"blockHash": String("0xa5626dc20d3a0a209b1de85521717a3e859698de8ce98bca1b16822b7501f74b")}`
|
// it might be a Map like `{"blockHash": String("0xa5626dc20d3a0a209b1de85521717a3e859698de8ce98bca1b16822b7501f74b")}`
|
||||||
if let Some(block_hash) = obj.remove("blockHash") {
|
if let Some(block_hash) = obj.remove("blockHash") {
|
||||||
let block_hash: H256 =
|
let block_hash: H256 =
|
||||||
serde_json::from_value(block_hash).context("decoding blockHash")?;
|
serde_json::from_value(block_hash).context("decoding blockHash")?;
|
||||||
|
|
||||||
let block = rpcs.block(authorization, &block_hash, None).await?;
|
let block = rpcs
|
||||||
|
.block(authorization, &block_hash, None)
|
||||||
|
.await
|
||||||
|
.context("fetching block number from hash")?;
|
||||||
|
|
||||||
|
// TODO: set change to true? i think not we should probably use hashes for everything.
|
||||||
|
(
|
||||||
block
|
block
|
||||||
.number
|
.number
|
||||||
.expect("blocks here should always have numbers")
|
.expect("blocks here should always have numbers"),
|
||||||
|
false,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow::anyhow!("blockHash missing"));
|
return Err(anyhow::anyhow!("blockHash missing"));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// it might be a string like "latest" or a block number
|
// it might be a string like "latest" or a block number
|
||||||
// TODO: "BlockNumber" needs a better name
|
// TODO: "BlockNumber" needs a better name
|
||||||
let block_number = serde_json::from_value::<BlockNumber>(x.take())?;
|
// TODO: use take instead of clone
|
||||||
|
let block_number = serde_json::from_value::<BlockNumber>(x.clone())
|
||||||
|
.context("checking params for BlockNumber")?;
|
||||||
|
|
||||||
block_num_to_U64(block_number, latest_block)
|
block_num_to_U64(block_number, latest_block)
|
||||||
};
|
};
|
||||||
|
|
||||||
// if we changed "latest" to a number, update the params to match
|
// if we changed "latest" to a number, update the params to match
|
||||||
*x = serde_json::to_value(block_num)?;
|
if change {
|
||||||
|
*x = json!(block_num);
|
||||||
// TODO: only do this if trace logging is enabled
|
|
||||||
if x.as_u64() != start.as_u64() {
|
|
||||||
trace!("changed {} to {}", start, x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(block_num)
|
Ok(block_num)
|
||||||
@ -112,7 +114,15 @@ pub async fn clean_block_number(
|
|||||||
pub enum BlockNeeded {
|
pub enum BlockNeeded {
|
||||||
CacheSuccessForever,
|
CacheSuccessForever,
|
||||||
CacheNever,
|
CacheNever,
|
||||||
Cache { block_num: U64, cache_errors: bool },
|
Cache {
|
||||||
|
block_num: U64,
|
||||||
|
cache_errors: bool,
|
||||||
|
},
|
||||||
|
CacheRange {
|
||||||
|
from_block_num: U64,
|
||||||
|
to_block_num: U64,
|
||||||
|
cache_errors: bool,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn block_needed(
|
pub async fn block_needed(
|
||||||
@ -120,21 +130,22 @@ pub async fn block_needed(
|
|||||||
method: &str,
|
method: &str,
|
||||||
params: Option<&mut serde_json::Value>,
|
params: Option<&mut serde_json::Value>,
|
||||||
head_block_num: U64,
|
head_block_num: U64,
|
||||||
rpcs: &Web3Connections,
|
rpcs: &Web3Rpcs,
|
||||||
) -> anyhow::Result<BlockNeeded> {
|
) -> anyhow::Result<BlockNeeded> {
|
||||||
// if no params, no block is needed
|
|
||||||
let params = if let Some(params) = params {
|
let params = if let Some(params) = params {
|
||||||
|
// grab the params so we can inspect and potentially modify them
|
||||||
params
|
params
|
||||||
} else {
|
} else {
|
||||||
|
// if no params, no block is needed
|
||||||
// TODO: check all the methods with no params, some might not be cacheable
|
// TODO: check all the methods with no params, some might not be cacheable
|
||||||
// caching for one block should always be okay
|
// caching with the head block /should/ always be okay
|
||||||
return Ok(BlockNeeded::Cache {
|
return Ok(BlockNeeded::Cache {
|
||||||
block_num: head_block_num,
|
block_num: head_block_num,
|
||||||
cache_errors: true,
|
cache_errors: true,
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
// get the index for the BlockNumber or return None to say no block is needed.
|
// get the index for the BlockNumber
|
||||||
// The BlockNumber is usually the last element.
|
// The BlockNumber is usually the last element.
|
||||||
// TODO: double check these. i think some of the getBlock stuff will never need archive
|
// TODO: double check these. i think some of the getBlock stuff will never need archive
|
||||||
let block_param_id = match method {
|
let block_param_id = match method {
|
||||||
@ -168,39 +179,44 @@ pub async fn block_needed(
|
|||||||
.as_object_mut()
|
.as_object_mut()
|
||||||
.ok_or_else(|| anyhow::anyhow!("invalid format"))?;
|
.ok_or_else(|| anyhow::anyhow!("invalid format"))?;
|
||||||
|
|
||||||
if let Some(x) = obj.get_mut("fromBlock") {
|
|
||||||
let block_num: BlockNumber = serde_json::from_value(x.take())?;
|
|
||||||
|
|
||||||
let block_num = block_num_to_U64(block_num, head_block_num);
|
|
||||||
|
|
||||||
*x = json!(block_num);
|
|
||||||
|
|
||||||
// TODO: maybe don't return. instead check toBlock too?
|
|
||||||
// TODO: if there is a very wide fromBlock and toBlock, we need to check that our rpcs have both!
|
|
||||||
return Ok(BlockNeeded::Cache {
|
|
||||||
block_num,
|
|
||||||
cache_errors: false,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(x) = obj.get_mut("toBlock") {
|
|
||||||
let block_num: BlockNumber = serde_json::from_value(x.take())?;
|
|
||||||
|
|
||||||
let block_num = block_num_to_U64(block_num, head_block_num);
|
|
||||||
|
|
||||||
*x = json!(block_num);
|
|
||||||
|
|
||||||
return Ok(BlockNeeded::Cache {
|
|
||||||
block_num,
|
|
||||||
cache_errors: false,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.contains_key("blockHash") {
|
if obj.contains_key("blockHash") {
|
||||||
1
|
1
|
||||||
} else {
|
} else {
|
||||||
return Ok(BlockNeeded::Cache {
|
let from_block_num = if let Some(x) = obj.get_mut("fromBlock") {
|
||||||
block_num: head_block_num,
|
// TODO: use .take instead of clone
|
||||||
|
let block_num: BlockNumber = serde_json::from_value(x.clone())?;
|
||||||
|
|
||||||
|
let (block_num, change) = block_num_to_U64(block_num, head_block_num);
|
||||||
|
|
||||||
|
if change {
|
||||||
|
*x = json!(block_num);
|
||||||
|
}
|
||||||
|
|
||||||
|
block_num
|
||||||
|
} else {
|
||||||
|
let (block_num, _) = block_num_to_U64(BlockNumber::Earliest, head_block_num);
|
||||||
|
|
||||||
|
block_num
|
||||||
|
};
|
||||||
|
|
||||||
|
let to_block_num = if let Some(x) = obj.get_mut("toBlock") {
|
||||||
|
// TODO: use .take instead of clone
|
||||||
|
let block_num: BlockNumber = serde_json::from_value(x.clone())?;
|
||||||
|
|
||||||
|
let (block_num, change) = block_num_to_U64(block_num, head_block_num);
|
||||||
|
|
||||||
|
if change {
|
||||||
|
*x = json!(block_num);
|
||||||
|
}
|
||||||
|
|
||||||
|
block_num
|
||||||
|
} else {
|
||||||
|
head_block_num
|
||||||
|
};
|
||||||
|
|
||||||
|
return Ok(BlockNeeded::CacheRange {
|
||||||
|
from_block_num: from_block_num,
|
||||||
|
to_block_num: to_block_num,
|
||||||
cache_errors: true,
|
cache_errors: true,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
use crate::rpcs::blockchain::BlockHashesCache;
|
use crate::rpcs::blockchain::BlockHashesCache;
|
||||||
use crate::rpcs::connection::Web3Connection;
|
use crate::rpcs::one::Web3Rpc;
|
||||||
use crate::rpcs::request::OpenRequestHandleMetrics;
|
|
||||||
use crate::{app::AnyhowJoinHandle, rpcs::blockchain::ArcBlock};
|
use crate::{app::AnyhowJoinHandle, rpcs::blockchain::ArcBlock};
|
||||||
use argh::FromArgs;
|
use argh::FromArgs;
|
||||||
use ethers::prelude::TxHash;
|
use ethers::prelude::TxHash;
|
||||||
@ -12,8 +11,8 @@ use serde::Deserialize;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
pub type BlockAndRpc = (Option<ArcBlock>, Arc<Web3Connection>);
|
pub type BlockAndRpc = (Option<ArcBlock>, Arc<Web3Rpc>);
|
||||||
pub type TxHashAndRpc = (TxHash, Arc<Web3Connection>);
|
pub type TxHashAndRpc = (TxHash, Arc<Web3Rpc>);
|
||||||
|
|
||||||
#[derive(Debug, FromArgs)]
|
#[derive(Debug, FromArgs)]
|
||||||
/// Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or similar) JsonRPC servers.
|
/// Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or similar) JsonRPC servers.
|
||||||
@ -42,15 +41,15 @@ pub struct CliConfig {
|
|||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct TopConfig {
|
pub struct TopConfig {
|
||||||
pub app: AppConfig,
|
pub app: AppConfig,
|
||||||
pub balanced_rpcs: HashMap<String, Web3ConnectionConfig>,
|
pub balanced_rpcs: HashMap<String, Web3RpcConfig>,
|
||||||
// TODO: instead of an option, give it a default
|
// TODO: instead of an option, give it a default
|
||||||
pub private_rpcs: Option<HashMap<String, Web3ConnectionConfig>>,
|
pub private_rpcs: Option<HashMap<String, Web3RpcConfig>>,
|
||||||
/// unknown config options get put here
|
/// unknown config options get put here
|
||||||
#[serde(flatten, default = "HashMap::default")]
|
#[serde(flatten, default = "HashMap::default")]
|
||||||
pub extra: HashMap<String, serde_json::Value>,
|
pub extra: HashMap<String, serde_json::Value>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// shared configuration between Web3Connections
|
/// shared configuration between Web3Rpcs
|
||||||
// TODO: no String, only &str
|
// TODO: no String, only &str
|
||||||
#[derive(Clone, Debug, Default, Deserialize)]
|
#[derive(Clone, Debug, Default, Deserialize)]
|
||||||
pub struct AppConfig {
|
pub struct AppConfig {
|
||||||
@ -59,6 +58,10 @@ pub struct AppConfig {
|
|||||||
#[serde(default = "default_allowed_origin_requests_per_period")]
|
#[serde(default = "default_allowed_origin_requests_per_period")]
|
||||||
pub allowed_origin_requests_per_period: HashMap<String, u64>,
|
pub allowed_origin_requests_per_period: HashMap<String, u64>,
|
||||||
|
|
||||||
|
/// erigon defaults to pruning beyond 90,000 blocks
|
||||||
|
#[serde(default = "default_archive_depth")]
|
||||||
|
pub archive_depth: u64,
|
||||||
|
|
||||||
/// EVM chain id. 1 for ETH
|
/// EVM chain id. 1 for ETH
|
||||||
/// TODO: better type for chain_id? max of `u64::MAX / 2 - 36` <https://github.com/ethereum/EIPs/issues/2294>
|
/// TODO: better type for chain_id? max of `u64::MAX / 2 - 36` <https://github.com/ethereum/EIPs/issues/2294>
|
||||||
pub chain_id: u64,
|
pub chain_id: u64,
|
||||||
@ -135,7 +138,7 @@ pub struct AppConfig {
|
|||||||
|
|
||||||
/// RPC responses are cached locally
|
/// RPC responses are cached locally
|
||||||
#[serde(default = "default_response_cache_max_bytes")]
|
#[serde(default = "default_response_cache_max_bytes")]
|
||||||
pub response_cache_max_bytes: usize,
|
pub response_cache_max_bytes: u64,
|
||||||
|
|
||||||
/// the stats page url for an anonymous user.
|
/// the stats page url for an anonymous user.
|
||||||
pub redirect_public_url: Option<String>,
|
pub redirect_public_url: Option<String>,
|
||||||
@ -159,6 +162,10 @@ pub struct AppConfig {
|
|||||||
pub extra: HashMap<String, serde_json::Value>,
|
pub extra: HashMap<String, serde_json::Value>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_archive_depth() -> u64 {
|
||||||
|
90_000
|
||||||
|
}
|
||||||
|
|
||||||
fn default_allowed_origin_requests_per_period() -> HashMap<String, u64> {
|
fn default_allowed_origin_requests_per_period() -> HashMap<String, u64> {
|
||||||
HashMap::new()
|
HashMap::new()
|
||||||
}
|
}
|
||||||
@ -183,15 +190,15 @@ fn default_login_rate_limit_per_period() -> u64 {
|
|||||||
10
|
10
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_response_cache_max_bytes() -> usize {
|
fn default_response_cache_max_bytes() -> u64 {
|
||||||
// TODO: default to some percentage of the system?
|
// TODO: default to some percentage of the system?
|
||||||
// 100 megabytes
|
// 100 megabytes
|
||||||
10_usize.pow(8)
|
10u64.pow(8)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configuration for a backend web3 RPC server
|
/// Configuration for a backend web3 RPC server
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct Web3ConnectionConfig {
|
pub struct Web3RpcConfig {
|
||||||
/// simple way to disable a connection without deleting the row
|
/// simple way to disable a connection without deleting the row
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub disabled: bool,
|
pub disabled: bool,
|
||||||
@ -223,9 +230,9 @@ fn default_tier() -> u64 {
|
|||||||
0
|
0
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Web3ConnectionConfig {
|
impl Web3RpcConfig {
|
||||||
/// Create a Web3Connection from config
|
/// Create a Web3Rpc from config
|
||||||
/// TODO: move this into Web3Connection? (just need to make things pub(crate))
|
/// TODO: move this into Web3Rpc? (just need to make things pub(crate))
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub async fn spawn(
|
pub async fn spawn(
|
||||||
self,
|
self,
|
||||||
@ -238,13 +245,9 @@ impl Web3ConnectionConfig {
|
|||||||
block_map: BlockHashesCache,
|
block_map: BlockHashesCache,
|
||||||
block_sender: Option<flume::Sender<BlockAndRpc>>,
|
block_sender: Option<flume::Sender<BlockAndRpc>>,
|
||||||
tx_id_sender: Option<flume::Sender<TxHashAndRpc>>,
|
tx_id_sender: Option<flume::Sender<TxHashAndRpc>>,
|
||||||
open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
|
) -> anyhow::Result<(Arc<Web3Rpc>, AnyhowJoinHandle<()>)> {
|
||||||
) -> anyhow::Result<(Arc<Web3Connection>, AnyhowJoinHandle<()>)> {
|
|
||||||
if !self.extra.is_empty() {
|
if !self.extra.is_empty() {
|
||||||
warn!(
|
warn!("unknown Web3RpcConfig fields!: {:?}", self.extra.keys());
|
||||||
"unknown Web3ConnectionConfig fields!: {:?}",
|
|
||||||
self.extra.keys()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let hard_limit = match (self.hard_limit, redis_pool) {
|
let hard_limit = match (self.hard_limit, redis_pool) {
|
||||||
@ -266,7 +269,7 @@ impl Web3ConnectionConfig {
|
|||||||
|
|
||||||
let backup = self.backup.unwrap_or(false);
|
let backup = self.backup.unwrap_or(false);
|
||||||
|
|
||||||
Web3Connection::spawn(
|
Web3Rpc::spawn(
|
||||||
name,
|
name,
|
||||||
self.display_name,
|
self.display_name,
|
||||||
chain_id,
|
chain_id,
|
||||||
@ -283,7 +286,6 @@ impl Web3ConnectionConfig {
|
|||||||
tx_id_sender,
|
tx_id_sender,
|
||||||
true,
|
true,
|
||||||
self.tier,
|
self.tier,
|
||||||
open_request_handle_metrics,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use super::errors::FrontendErrorResponse;
|
use super::errors::FrontendErrorResponse;
|
||||||
use crate::app::{AuthorizationChecks, Web3ProxyApp, APP_USER_AGENT};
|
use crate::app::{AuthorizationChecks, Web3ProxyApp, APP_USER_AGENT};
|
||||||
use crate::rpcs::connection::Web3Connection;
|
use crate::rpcs::one::Web3Rpc;
|
||||||
use crate::user_token::UserBearerToken;
|
use crate::user_token::UserBearerToken;
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use axum::headers::authorization::Bearer;
|
use axum::headers::authorization::Bearer;
|
||||||
@ -80,7 +80,7 @@ pub struct RequestMetadata {
|
|||||||
// TODO: "archive" isn't really a boolean.
|
// TODO: "archive" isn't really a boolean.
|
||||||
pub archive_request: AtomicBool,
|
pub archive_request: AtomicBool,
|
||||||
/// if this is empty, there was a cache_hit
|
/// if this is empty, there was a cache_hit
|
||||||
pub backend_requests: Mutex<Vec<Arc<Web3Connection>>>,
|
pub backend_requests: Mutex<Vec<Arc<Web3Rpc>>>,
|
||||||
pub no_servers: AtomicU64,
|
pub no_servers: AtomicU64,
|
||||||
pub error_response: AtomicBool,
|
pub error_response: AtomicBool,
|
||||||
pub response_bytes: AtomicU64,
|
pub response_bytes: AtomicU64,
|
||||||
|
@ -11,7 +11,7 @@ use axum::{
|
|||||||
use derive_more::From;
|
use derive_more::From;
|
||||||
use http::header::InvalidHeaderValue;
|
use http::header::InvalidHeaderValue;
|
||||||
use ipnet::AddrParseError;
|
use ipnet::AddrParseError;
|
||||||
use log::{trace, warn};
|
use log::{debug, trace, warn};
|
||||||
use migration::sea_orm::DbErr;
|
use migration::sea_orm::DbErr;
|
||||||
use redis_rate_limiter::redis::RedisError;
|
use redis_rate_limiter::redis::RedisError;
|
||||||
use reqwest::header::ToStrError;
|
use reqwest::header::ToStrError;
|
||||||
@ -25,6 +25,7 @@ pub type FrontendResult = Result<Response, FrontendErrorResponse>;
|
|||||||
pub enum FrontendErrorResponse {
|
pub enum FrontendErrorResponse {
|
||||||
AccessDenied,
|
AccessDenied,
|
||||||
Anyhow(anyhow::Error),
|
Anyhow(anyhow::Error),
|
||||||
|
BadRequest(String),
|
||||||
SemaphoreAcquireError(AcquireError),
|
SemaphoreAcquireError(AcquireError),
|
||||||
Database(DbErr),
|
Database(DbErr),
|
||||||
HeadersError(headers::Error),
|
HeadersError(headers::Error),
|
||||||
@ -71,18 +72,17 @@ impl FrontendErrorResponse {
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
// Self::(err) => {
|
Self::BadRequest(err) => {
|
||||||
// warn!("boxed err={:?}", err);
|
debug!("BAD_REQUEST: {}", err);
|
||||||
// (
|
(
|
||||||
// StatusCode::INTERNAL_SERVER_ERROR,
|
StatusCode::BAD_REQUEST,
|
||||||
// JsonRpcForwardedResponse::from_str(
|
JsonRpcForwardedResponse::from_str(
|
||||||
// // TODO: make this better. maybe include the error type?
|
&format!("bad request: {}", err),
|
||||||
// "boxed error!",
|
Some(StatusCode::BAD_REQUEST.as_u16().into()),
|
||||||
// Some(StatusCode::INTERNAL_SERVER_ERROR.as_u16().into()),
|
None,
|
||||||
// None,
|
),
|
||||||
// ),
|
)
|
||||||
// )
|
}
|
||||||
// }
|
|
||||||
Self::Database(err) => {
|
Self::Database(err) => {
|
||||||
warn!("database err={:?}", err);
|
warn!("database err={:?}", err);
|
||||||
(
|
(
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
//! `frontend` contains HTTP and websocket endpoints for use by users and admins.
|
//! `frontend` contains HTTP and websocket endpoints for use by users and admins.
|
||||||
|
//!
|
||||||
|
//! Important reading about axum extractors: https://docs.rs/axum/latest/axum/extract/index.html#the-order-of-extractors
|
||||||
|
|
||||||
pub mod admin;
|
pub mod admin;
|
||||||
pub mod authorization;
|
pub mod authorization;
|
||||||
@ -31,6 +33,7 @@ pub enum FrontendResponseCaches {
|
|||||||
// TODO: what should this cache's value be?
|
// TODO: what should this cache's value be?
|
||||||
pub type FrontendResponseCache =
|
pub type FrontendResponseCache =
|
||||||
Cache<FrontendResponseCaches, Arc<serde_json::Value>, hashbrown::hash_map::DefaultHashBuilder>;
|
Cache<FrontendResponseCaches, Arc<serde_json::Value>, hashbrown::hash_map::DefaultHashBuilder>;
|
||||||
|
pub type FrontendHealthCache = Cache<(), bool, hashbrown::hash_map::DefaultHashBuilder>;
|
||||||
|
|
||||||
/// Start the frontend server.
|
/// Start the frontend server.
|
||||||
pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()> {
|
pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()> {
|
||||||
@ -38,7 +41,11 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
|
|||||||
// TODO: a moka cache is probably way overkill for this.
|
// TODO: a moka cache is probably way overkill for this.
|
||||||
// no need for max items. only expire because of time to live
|
// no need for max items. only expire because of time to live
|
||||||
let response_cache: FrontendResponseCache = Cache::builder()
|
let response_cache: FrontendResponseCache = Cache::builder()
|
||||||
.time_to_live(Duration::from_secs(1))
|
.time_to_live(Duration::from_secs(2))
|
||||||
|
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
|
||||||
|
|
||||||
|
let health_cache: FrontendHealthCache = Cache::builder()
|
||||||
|
.time_to_live(Duration::from_millis(100))
|
||||||
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
|
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
|
||||||
|
|
||||||
// TODO: read config for if fastest/versus should be available publicly. default off
|
// TODO: read config for if fastest/versus should be available publicly. default off
|
||||||
@ -182,6 +189,7 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
|
|||||||
.layer(Extension(proxy_app.clone()))
|
.layer(Extension(proxy_app.clone()))
|
||||||
// frontend caches
|
// frontend caches
|
||||||
.layer(Extension(response_cache))
|
.layer(Extension(response_cache))
|
||||||
|
.layer(Extension(health_cache))
|
||||||
// 404 for any unknown routes
|
// 404 for any unknown routes
|
||||||
.fallback(errors::handler_404);
|
.fallback(errors::handler_404);
|
||||||
|
|
||||||
@ -199,7 +207,6 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
|
|||||||
- axum::extract::ConnectInfo (if not behind proxy)
|
- axum::extract::ConnectInfo (if not behind proxy)
|
||||||
*/
|
*/
|
||||||
let service = app.into_make_service_with_connect_info::<SocketAddr>();
|
let service = app.into_make_service_with_connect_info::<SocketAddr>();
|
||||||
// let service = app.into_make_service();
|
|
||||||
|
|
||||||
// `axum::Server` is a re-export of `hyper::Server`
|
// `axum::Server` is a re-export of `hyper::Server`
|
||||||
axum::Server::bind(&addr)
|
axum::Server::bind(&addr)
|
||||||
|
@ -8,7 +8,7 @@ use axum::extract::Path;
|
|||||||
use axum::headers::{Origin, Referer, UserAgent};
|
use axum::headers::{Origin, Referer, UserAgent};
|
||||||
use axum::TypedHeader;
|
use axum::TypedHeader;
|
||||||
use axum::{response::IntoResponse, Extension, Json};
|
use axum::{response::IntoResponse, Extension, Json};
|
||||||
use axum_client_ip::ClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use axum_macros::debug_handler;
|
use axum_macros::debug_handler;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -19,7 +19,7 @@ use std::sync::Arc;
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn proxy_web3_rpc(
|
pub async fn proxy_web3_rpc(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
Json(payload): Json<JsonRpcRequestEnum>,
|
Json(payload): Json<JsonRpcRequestEnum>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
@ -29,7 +29,7 @@ pub async fn proxy_web3_rpc(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn fastest_proxy_web3_rpc(
|
pub async fn fastest_proxy_web3_rpc(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
Json(payload): Json<JsonRpcRequestEnum>,
|
Json(payload): Json<JsonRpcRequestEnum>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
@ -41,7 +41,7 @@ pub async fn fastest_proxy_web3_rpc(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn versus_proxy_web3_rpc(
|
pub async fn versus_proxy_web3_rpc(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
Json(payload): Json<JsonRpcRequestEnum>,
|
Json(payload): Json<JsonRpcRequestEnum>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
@ -50,7 +50,7 @@ pub async fn versus_proxy_web3_rpc(
|
|||||||
|
|
||||||
async fn _proxy_web3_rpc(
|
async fn _proxy_web3_rpc(
|
||||||
app: Arc<Web3ProxyApp>,
|
app: Arc<Web3ProxyApp>,
|
||||||
ClientIp(ip): ClientIp,
|
InsecureClientIp(ip): InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
payload: JsonRpcRequestEnum,
|
payload: JsonRpcRequestEnum,
|
||||||
proxy_mode: ProxyMode,
|
proxy_mode: ProxyMode,
|
||||||
@ -91,7 +91,7 @@ async fn _proxy_web3_rpc(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn proxy_web3_rpc_with_key(
|
pub async fn proxy_web3_rpc_with_key(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
referer: Option<TypedHeader<Referer>>,
|
referer: Option<TypedHeader<Referer>>,
|
||||||
user_agent: Option<TypedHeader<UserAgent>>,
|
user_agent: Option<TypedHeader<UserAgent>>,
|
||||||
@ -114,7 +114,7 @@ pub async fn proxy_web3_rpc_with_key(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn fastest_proxy_web3_rpc_with_key(
|
pub async fn fastest_proxy_web3_rpc_with_key(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
referer: Option<TypedHeader<Referer>>,
|
referer: Option<TypedHeader<Referer>>,
|
||||||
user_agent: Option<TypedHeader<UserAgent>>,
|
user_agent: Option<TypedHeader<UserAgent>>,
|
||||||
@ -137,7 +137,7 @@ pub async fn fastest_proxy_web3_rpc_with_key(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn versus_proxy_web3_rpc_with_key(
|
pub async fn versus_proxy_web3_rpc_with_key(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
referer: Option<TypedHeader<Referer>>,
|
referer: Option<TypedHeader<Referer>>,
|
||||||
user_agent: Option<TypedHeader<UserAgent>>,
|
user_agent: Option<TypedHeader<UserAgent>>,
|
||||||
@ -160,7 +160,7 @@ pub async fn versus_proxy_web3_rpc_with_key(
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn _proxy_web3_rpc_with_key(
|
async fn _proxy_web3_rpc_with_key(
|
||||||
app: Arc<Web3ProxyApp>,
|
app: Arc<Web3ProxyApp>,
|
||||||
ClientIp(ip): ClientIp,
|
InsecureClientIp(ip): InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
referer: Option<TypedHeader<Referer>>,
|
referer: Option<TypedHeader<Referer>>,
|
||||||
user_agent: Option<TypedHeader<UserAgent>>,
|
user_agent: Option<TypedHeader<UserAgent>>,
|
||||||
|
@ -17,7 +17,7 @@ use axum::{
|
|||||||
response::{IntoResponse, Redirect},
|
response::{IntoResponse, Redirect},
|
||||||
Extension, TypedHeader,
|
Extension, TypedHeader,
|
||||||
};
|
};
|
||||||
use axum_client_ip::ClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use axum_macros::debug_handler;
|
use axum_macros::debug_handler;
|
||||||
use futures::SinkExt;
|
use futures::SinkExt;
|
||||||
use futures::{
|
use futures::{
|
||||||
@ -49,7 +49,7 @@ pub enum ProxyMode {
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn websocket_handler(
|
pub async fn websocket_handler(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
ws_upgrade: Option<WebSocketUpgrade>,
|
ws_upgrade: Option<WebSocketUpgrade>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
@ -61,7 +61,7 @@ pub async fn websocket_handler(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn fastest_websocket_handler(
|
pub async fn fastest_websocket_handler(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
ws_upgrade: Option<WebSocketUpgrade>,
|
ws_upgrade: Option<WebSocketUpgrade>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
@ -75,7 +75,7 @@ pub async fn fastest_websocket_handler(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn versus_websocket_handler(
|
pub async fn versus_websocket_handler(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
ws_upgrade: Option<WebSocketUpgrade>,
|
ws_upgrade: Option<WebSocketUpgrade>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
@ -86,7 +86,7 @@ pub async fn versus_websocket_handler(
|
|||||||
async fn _websocket_handler(
|
async fn _websocket_handler(
|
||||||
proxy_mode: ProxyMode,
|
proxy_mode: ProxyMode,
|
||||||
app: Arc<Web3ProxyApp>,
|
app: Arc<Web3ProxyApp>,
|
||||||
ClientIp(ip): ClientIp,
|
InsecureClientIp(ip): InsecureClientIp,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
ws_upgrade: Option<WebSocketUpgrade>,
|
ws_upgrade: Option<WebSocketUpgrade>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
@ -121,7 +121,7 @@ async fn _websocket_handler(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn websocket_handler_with_key(
|
pub async fn websocket_handler_with_key(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
Path(rpc_key): Path<String>,
|
Path(rpc_key): Path<String>,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
referer: Option<TypedHeader<Referer>>,
|
referer: Option<TypedHeader<Referer>>,
|
||||||
@ -144,7 +144,7 @@ pub async fn websocket_handler_with_key(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn fastest_websocket_handler_with_key(
|
pub async fn fastest_websocket_handler_with_key(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
Path(rpc_key): Path<String>,
|
Path(rpc_key): Path<String>,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
referer: Option<TypedHeader<Referer>>,
|
referer: Option<TypedHeader<Referer>>,
|
||||||
@ -168,7 +168,7 @@ pub async fn fastest_websocket_handler_with_key(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn versus_websocket_handler_with_key(
|
pub async fn versus_websocket_handler_with_key(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ip: ClientIp,
|
ip: InsecureClientIp,
|
||||||
Path(rpc_key): Path<String>,
|
Path(rpc_key): Path<String>,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
referer: Option<TypedHeader<Referer>>,
|
referer: Option<TypedHeader<Referer>>,
|
||||||
@ -192,7 +192,7 @@ pub async fn versus_websocket_handler_with_key(
|
|||||||
async fn _websocket_handler_with_key(
|
async fn _websocket_handler_with_key(
|
||||||
proxy_mode: ProxyMode,
|
proxy_mode: ProxyMode,
|
||||||
app: Arc<Web3ProxyApp>,
|
app: Arc<Web3ProxyApp>,
|
||||||
ClientIp(ip): ClientIp,
|
InsecureClientIp(ip): InsecureClientIp,
|
||||||
rpc_key: String,
|
rpc_key: String,
|
||||||
origin: Option<TypedHeader<Origin>>,
|
origin: Option<TypedHeader<Origin>>,
|
||||||
referer: Option<TypedHeader<Referer>>,
|
referer: Option<TypedHeader<Referer>>,
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
//! For ease of development, users can currently access these endponts.
|
//! For ease of development, users can currently access these endponts.
|
||||||
//! They will eventually move to another port.
|
//! They will eventually move to another port.
|
||||||
|
|
||||||
use super::{FrontendResponseCache, FrontendResponseCaches};
|
use super::{FrontendHealthCache, FrontendResponseCache, FrontendResponseCaches};
|
||||||
use crate::app::{Web3ProxyApp, APP_USER_AGENT};
|
use crate::app::{Web3ProxyApp, APP_USER_AGENT};
|
||||||
use axum::{http::StatusCode, response::IntoResponse, Extension, Json};
|
use axum::{http::StatusCode, response::IntoResponse, Extension, Json};
|
||||||
use axum_macros::debug_handler;
|
use axum_macros::debug_handler;
|
||||||
@ -12,9 +12,15 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
/// Health check page for load balancers to use.
|
/// Health check page for load balancers to use.
|
||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn health(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse {
|
pub async fn health(
|
||||||
// TODO: add a check that we aren't shutting down
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
if app.balanced_rpcs.synced() {
|
Extension(health_cache): Extension<FrontendHealthCache>,
|
||||||
|
) -> impl IntoResponse {
|
||||||
|
let synced = health_cache
|
||||||
|
.get_with((), async { app.balanced_rpcs.synced() })
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if synced {
|
||||||
(StatusCode::OK, "OK")
|
(StatusCode::OK, "OK")
|
||||||
} else {
|
} else {
|
||||||
(StatusCode::SERVICE_UNAVAILABLE, ":(")
|
(StatusCode::SERVICE_UNAVAILABLE, ":(")
|
||||||
|
@ -17,7 +17,7 @@ use axum::{
|
|||||||
response::IntoResponse,
|
response::IntoResponse,
|
||||||
Extension, Json, TypedHeader,
|
Extension, Json, TypedHeader,
|
||||||
};
|
};
|
||||||
use axum_client_ip::ClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use axum_macros::debug_handler;
|
use axum_macros::debug_handler;
|
||||||
use chrono::{TimeZone, Utc};
|
use chrono::{TimeZone, Utc};
|
||||||
use entities::sea_orm_active_enums::{LogLevel, Role};
|
use entities::sea_orm_active_enums::{LogLevel, Role};
|
||||||
@ -65,7 +65,7 @@ use crate::{PostLogin, PostLoginQuery};
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn user_login_get(
|
pub async fn user_login_get(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ClientIp(ip): ClientIp,
|
InsecureClientIp(ip): InsecureClientIp,
|
||||||
// TODO: what does axum's error handling look like if the path fails to parse?
|
// TODO: what does axum's error handling look like if the path fails to parse?
|
||||||
Path(mut params): Path<HashMap<String, String>>,
|
Path(mut params): Path<HashMap<String, String>>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
@ -165,7 +165,7 @@ pub async fn user_login_get(
|
|||||||
#[debug_handler]
|
#[debug_handler]
|
||||||
pub async fn user_login_post(
|
pub async fn user_login_post(
|
||||||
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
Extension(app): Extension<Arc<Web3ProxyApp>>,
|
||||||
ClientIp(ip): ClientIp,
|
InsecureClientIp(ip): InsecureClientIp,
|
||||||
Query(query): Query<PostLoginQuery>,
|
Query(query): Query<PostLoginQuery>,
|
||||||
Json(payload): Json<PostLogin>,
|
Json(payload): Json<PostLogin>,
|
||||||
) -> FrontendResult {
|
) -> FrontendResult {
|
||||||
|
@ -5,7 +5,6 @@ pub mod block_number;
|
|||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod frontend;
|
pub mod frontend;
|
||||||
pub mod jsonrpc;
|
pub mod jsonrpc;
|
||||||
pub mod metered;
|
|
||||||
pub mod metrics_frontend;
|
pub mod metrics_frontend;
|
||||||
pub mod pagerduty;
|
pub mod pagerduty;
|
||||||
pub mod rpcs;
|
pub mod rpcs;
|
||||||
|
@ -1,12 +1,6 @@
|
|||||||
//! A module providing the `JsonRpcErrorCount` metric.
|
//! A module providing the `JsonRpcErrorCount` metric.
|
||||||
|
|
||||||
use ethers::providers::ProviderError;
|
use ethers::providers::ProviderError;
|
||||||
use metered::metric::{Advice, Enter, OnResult};
|
|
||||||
use metered::{
|
|
||||||
atomic::AtomicInt,
|
|
||||||
clear::Clear,
|
|
||||||
metric::{Counter, Metric},
|
|
||||||
};
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
@ -1,12 +1,6 @@
|
|||||||
//! A module providing the `JsonRpcErrorCount` metric.
|
//! A module providing the `JsonRpcErrorCount` metric.
|
||||||
|
|
||||||
use ethers::providers::ProviderError;
|
use ethers::providers::ProviderError;
|
||||||
use metered::metric::{Advice, Enter, OnResult};
|
|
||||||
use metered::{
|
|
||||||
atomic::AtomicInt,
|
|
||||||
clear::Clear,
|
|
||||||
metric::{Counter, Metric},
|
|
||||||
};
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
@ -23,13 +23,14 @@ pub async fn serve(app: Arc<Web3ProxyApp>, port: u16) -> anyhow::Result<()> {
|
|||||||
// TODO: into_make_service is enough if we always run behind a proxy. make into_make_service_with_connect_info optional?
|
// TODO: into_make_service is enough if we always run behind a proxy. make into_make_service_with_connect_info optional?
|
||||||
|
|
||||||
/*
|
/*
|
||||||
It sequentially looks for an IP in:
|
InsecureClientIp sequentially looks for an IP in:
|
||||||
- x-forwarded-for header (de-facto standard)
|
- x-forwarded-for header (de-facto standard)
|
||||||
- x-real-ip header
|
- x-real-ip header
|
||||||
- forwarded header (new standard)
|
- forwarded header (new standard)
|
||||||
- axum::extract::ConnectInfo (if not behind proxy)
|
- axum::extract::ConnectInfo (if not behind proxy)
|
||||||
|
|
||||||
So we probably won't need into_make_service_with_connect_info, but it shouldn't hurt
|
Since we run behind haproxy, x-forwarded-for will be set.
|
||||||
|
We probably won't need into_make_service_with_connect_info, but it shouldn't hurt.
|
||||||
*/
|
*/
|
||||||
let service = app.into_make_service_with_connect_info::<SocketAddr>();
|
let service = app.into_make_service_with_connect_info::<SocketAddr>();
|
||||||
// let service = app.into_make_service();
|
// let service = app.into_make_service();
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
///! Keep track of the blockchain as seen by a Web3Connections.
|
use super::many::Web3Rpcs;
|
||||||
use super::connection::Web3Connection;
|
///! Keep track of the blockchain as seen by a Web3Rpcs.
|
||||||
use super::connections::Web3Connections;
|
use super::one::Web3Rpc;
|
||||||
use super::transactions::TxStatus;
|
use super::transactions::TxStatus;
|
||||||
use crate::frontend::authorization::Authorization;
|
use crate::frontend::authorization::Authorization;
|
||||||
use crate::{
|
use crate::{
|
||||||
config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusConnections,
|
config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusWeb3Rpcs,
|
||||||
};
|
};
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use derive_more::From;
|
use derive_more::From;
|
||||||
@ -92,7 +92,7 @@ impl Display for SavedBlock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Web3Connections {
|
impl Web3Rpcs {
|
||||||
/// add a block to our mappings and track the heaviest chain
|
/// add a block to our mappings and track the heaviest chain
|
||||||
pub async fn save_block(
|
pub async fn save_block(
|
||||||
&self,
|
&self,
|
||||||
@ -135,7 +135,7 @@ impl Web3Connections {
|
|||||||
&self,
|
&self,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
hash: &H256,
|
hash: &H256,
|
||||||
rpc: Option<&Arc<Web3Connection>>,
|
rpc: Option<&Arc<Web3Rpc>>,
|
||||||
) -> anyhow::Result<ArcBlock> {
|
) -> anyhow::Result<ArcBlock> {
|
||||||
// first, try to get the hash from our cache
|
// first, try to get the hash from our cache
|
||||||
// the cache is set last, so if its here, its everywhere
|
// the cache is set last, so if its here, its everywhere
|
||||||
@ -190,12 +190,12 @@ impl Web3Connections {
|
|||||||
&self,
|
&self,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
num: &U64,
|
num: &U64,
|
||||||
) -> anyhow::Result<(H256, bool)> {
|
) -> anyhow::Result<(H256, u64)> {
|
||||||
let (block, is_archive_block) = self.cannonical_block(authorization, num).await?;
|
let (block, block_depth) = self.cannonical_block(authorization, num).await?;
|
||||||
|
|
||||||
let hash = block.hash.expect("Saved blocks should always have hashes");
|
let hash = block.hash.expect("Saved blocks should always have hashes");
|
||||||
|
|
||||||
Ok((hash, is_archive_block))
|
Ok((hash, block_depth))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the heaviest chain's block from cache or backend rpc
|
/// Get the heaviest chain's block from cache or backend rpc
|
||||||
@ -204,7 +204,7 @@ impl Web3Connections {
|
|||||||
&self,
|
&self,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
num: &U64,
|
num: &U64,
|
||||||
) -> anyhow::Result<(ArcBlock, bool)> {
|
) -> anyhow::Result<(ArcBlock, u64)> {
|
||||||
// we only have blocks by hash now
|
// we only have blocks by hash now
|
||||||
// maybe save them during save_block in a blocks_by_number Cache<U64, Vec<ArcBlock>>
|
// maybe save them during save_block in a blocks_by_number Cache<U64, Vec<ArcBlock>>
|
||||||
// if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
|
// if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
|
||||||
@ -233,8 +233,11 @@ impl Web3Connections {
|
|||||||
let head_block_num =
|
let head_block_num =
|
||||||
head_block_num.expect("we should only get here if we have a head block");
|
head_block_num.expect("we should only get here if we have a head block");
|
||||||
|
|
||||||
// TODO: geth does 64, erigon does 90k. sometimes we run a mix
|
let block_depth = if num >= &head_block_num {
|
||||||
let archive_needed = num < &(head_block_num - U64::from(64));
|
0
|
||||||
|
} else {
|
||||||
|
(head_block_num - num).as_u64()
|
||||||
|
};
|
||||||
|
|
||||||
// try to get the hash from our cache
|
// try to get the hash from our cache
|
||||||
// deref to not keep the lock open
|
// deref to not keep the lock open
|
||||||
@ -243,7 +246,7 @@ impl Web3Connections {
|
|||||||
// TODO: pass authorization through here?
|
// TODO: pass authorization through here?
|
||||||
let block = self.block(authorization, &block_hash, None).await?;
|
let block = self.block(authorization, &block_hash, None).await?;
|
||||||
|
|
||||||
return Ok((block, archive_needed));
|
return Ok((block, block_depth));
|
||||||
}
|
}
|
||||||
|
|
||||||
// block number not in cache. we need to ask an rpc for it
|
// block number not in cache. we need to ask an rpc for it
|
||||||
@ -269,7 +272,7 @@ impl Web3Connections {
|
|||||||
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
|
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
|
||||||
let block = self.save_block(block, true).await?;
|
let block = self.save_block(block, true).await?;
|
||||||
|
|
||||||
Ok((block, archive_needed))
|
Ok((block, block_depth))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn process_incoming_blocks(
|
pub(super) async fn process_incoming_blocks(
|
||||||
@ -285,7 +288,9 @@ impl Web3Connections {
|
|||||||
// TODO: this will grow unbounded. prune old heads on this at the same time we prune the graph?
|
// TODO: this will grow unbounded. prune old heads on this at the same time we prune the graph?
|
||||||
let mut connection_heads = ConsensusFinder::default();
|
let mut connection_heads = ConsensusFinder::default();
|
||||||
|
|
||||||
while let Ok((new_block, rpc)) = block_receiver.recv_async().await {
|
loop {
|
||||||
|
match block_receiver.recv_async().await {
|
||||||
|
Ok((new_block, rpc)) => {
|
||||||
let new_block = new_block.map(Into::into);
|
let new_block = new_block.map(Into::into);
|
||||||
|
|
||||||
let rpc_name = rpc.name.clone();
|
let rpc_name = rpc.name.clone();
|
||||||
@ -304,11 +309,12 @@ impl Web3Connections {
|
|||||||
warn!("unable to process block from rpc {}: {:?}", rpc_name, err);
|
warn!("unable to process block from rpc {}: {:?}", rpc_name, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Err(err) => {
|
||||||
// TODO: if there was an error, should we return it instead of an Ok?
|
warn!("block_receiver exited! {:#?}", err);
|
||||||
warn!("block_receiver exited!");
|
return Err(err.into());
|
||||||
|
}
|
||||||
Ok(())
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `connection_heads` is a mapping of rpc_names to head block hashes.
|
/// `connection_heads` is a mapping of rpc_names to head block hashes.
|
||||||
@ -319,7 +325,7 @@ impl Web3Connections {
|
|||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
consensus_finder: &mut ConsensusFinder,
|
consensus_finder: &mut ConsensusFinder,
|
||||||
rpc_head_block: Option<SavedBlock>,
|
rpc_head_block: Option<SavedBlock>,
|
||||||
rpc: Arc<Web3Connection>,
|
rpc: Arc<Web3Rpc>,
|
||||||
head_block_sender: &watch::Sender<ArcBlock>,
|
head_block_sender: &watch::Sender<ArcBlock>,
|
||||||
pending_tx_sender: &Option<broadcast::Sender<TxStatus>>,
|
pending_tx_sender: &Option<broadcast::Sender<TxStatus>>,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
@ -388,6 +394,7 @@ impl Web3Connections {
|
|||||||
// multiple blocks with the same fork!
|
// multiple blocks with the same fork!
|
||||||
if consensus_saved_block.hash() == old_head_block.hash() {
|
if consensus_saved_block.hash() == old_head_block.hash() {
|
||||||
// no change in hash. no need to use head_block_sender
|
// no change in hash. no need to use head_block_sender
|
||||||
|
// TODO: trace level if rpc is backup
|
||||||
debug!(
|
debug!(
|
||||||
"con {}{}/{}/{}/{} con={} rpc={}@{}",
|
"con {}{}/{}/{}/{} con={} rpc={}@{}",
|
||||||
includes_backups_str,
|
includes_backups_str,
|
||||||
@ -546,11 +553,11 @@ impl ConnectionsGroup {
|
|||||||
Self::new(true)
|
Self::new(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&mut self, rpc: &Web3Connection) -> Option<H256> {
|
fn remove(&mut self, rpc: &Web3Rpc) -> Option<H256> {
|
||||||
self.rpc_name_to_hash.remove(rpc.name.as_str())
|
self.rpc_name_to_hash.remove(rpc.name.as_str())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&mut self, rpc: &Web3Connection, block_hash: H256) -> Option<H256> {
|
fn insert(&mut self, rpc: &Web3Rpc, block_hash: H256) -> Option<H256> {
|
||||||
self.rpc_name_to_hash.insert(rpc.name.clone(), block_hash)
|
self.rpc_name_to_hash.insert(rpc.name.clone(), block_hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -560,7 +567,7 @@ impl ConnectionsGroup {
|
|||||||
rpc_name: &str,
|
rpc_name: &str,
|
||||||
hash: &H256,
|
hash: &H256,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
web3_connections: &Web3Connections,
|
web3_rpcs: &Web3Rpcs,
|
||||||
) -> anyhow::Result<ArcBlock> {
|
) -> anyhow::Result<ArcBlock> {
|
||||||
// // TODO: why does this happen?!?! seems to only happen with uncled blocks
|
// // TODO: why does this happen?!?! seems to only happen with uncled blocks
|
||||||
// // TODO: maybe we should do try_get_with?
|
// // TODO: maybe we should do try_get_with?
|
||||||
@ -571,16 +578,17 @@ impl ConnectionsGroup {
|
|||||||
// );
|
// );
|
||||||
|
|
||||||
// this option should almost always be populated. if the connection reconnects at a bad time it might not be available though
|
// this option should almost always be populated. if the connection reconnects at a bad time it might not be available though
|
||||||
let rpc = web3_connections.conns.get(rpc_name);
|
// TODO: if this is None, I think we should error.
|
||||||
|
let rpc = web3_rpcs.conns.get(rpc_name);
|
||||||
|
|
||||||
web3_connections.block(authorization, hash, rpc).await
|
web3_rpcs.block(authorization, hash, rpc).await
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: do this during insert/remove?
|
// TODO: do this during insert/remove?
|
||||||
pub(self) async fn highest_block(
|
pub(self) async fn highest_block(
|
||||||
&self,
|
&self,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
web3_connections: &Web3Connections,
|
web3_rpcs: &Web3Rpcs,
|
||||||
) -> Option<ArcBlock> {
|
) -> Option<ArcBlock> {
|
||||||
let mut checked_heads = HashSet::with_capacity(self.rpc_name_to_hash.len());
|
let mut checked_heads = HashSet::with_capacity(self.rpc_name_to_hash.len());
|
||||||
let mut highest_block = None::<ArcBlock>;
|
let mut highest_block = None::<ArcBlock>;
|
||||||
@ -592,7 +600,7 @@ impl ConnectionsGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let rpc_block = match self
|
let rpc_block = match self
|
||||||
.get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_connections)
|
.get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_rpcs)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(x) => x,
|
Ok(x) => x,
|
||||||
@ -627,9 +635,9 @@ impl ConnectionsGroup {
|
|||||||
pub(self) async fn consensus_head_connections(
|
pub(self) async fn consensus_head_connections(
|
||||||
&self,
|
&self,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
web3_connections: &Web3Connections,
|
web3_rpcs: &Web3Rpcs,
|
||||||
) -> anyhow::Result<ConsensusConnections> {
|
) -> anyhow::Result<ConsensusWeb3Rpcs> {
|
||||||
let mut maybe_head_block = match self.highest_block(authorization, web3_connections).await {
|
let mut maybe_head_block = match self.highest_block(authorization, web3_rpcs).await {
|
||||||
None => return Err(anyhow::anyhow!("No blocks known")),
|
None => return Err(anyhow::anyhow!("No blocks known")),
|
||||||
Some(x) => x,
|
Some(x) => x,
|
||||||
};
|
};
|
||||||
@ -663,27 +671,25 @@ impl ConnectionsGroup {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(rpc) = web3_connections.conns.get(rpc_name.as_str()) {
|
if let Some(rpc) = web3_rpcs.conns.get(rpc_name.as_str()) {
|
||||||
highest_rpcs.insert(rpc_name);
|
highest_rpcs.insert(rpc_name);
|
||||||
highest_rpcs_sum_soft_limit += rpc.soft_limit;
|
highest_rpcs_sum_soft_limit += rpc.soft_limit;
|
||||||
} else {
|
} else {
|
||||||
// i don't think this is an error. i think its just if a reconnect is currently happening
|
// i don't think this is an error. i think its just if a reconnect is currently happening
|
||||||
warn!("connection missing: {}", rpc_name);
|
warn!("connection missing: {}", rpc_name);
|
||||||
|
debug!("web3_rpcs.conns: {:#?}", web3_rpcs.conns);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if highest_rpcs_sum_soft_limit >= web3_connections.min_sum_soft_limit
|
if highest_rpcs_sum_soft_limit >= web3_rpcs.min_sum_soft_limit
|
||||||
&& highest_rpcs.len() >= web3_connections.min_head_rpcs
|
&& highest_rpcs.len() >= web3_rpcs.min_head_rpcs
|
||||||
{
|
{
|
||||||
// we have enough servers with enough requests
|
// we have enough servers with enough requests
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// not enough rpcs yet. check the parent block
|
// not enough rpcs yet. check the parent block
|
||||||
if let Some(parent_block) = web3_connections
|
if let Some(parent_block) = web3_rpcs.block_hashes.get(&maybe_head_block.parent_hash) {
|
||||||
.block_hashes
|
|
||||||
.get(&maybe_head_block.parent_hash)
|
|
||||||
{
|
|
||||||
// trace!(
|
// trace!(
|
||||||
// child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd",
|
// child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd",
|
||||||
// );
|
// );
|
||||||
@ -691,25 +697,25 @@ impl ConnectionsGroup {
|
|||||||
maybe_head_block = parent_block;
|
maybe_head_block = parent_block;
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
if num_known < web3_connections.min_head_rpcs {
|
if num_known < web3_rpcs.min_head_rpcs {
|
||||||
return Err(anyhow::anyhow!(
|
return Err(anyhow::anyhow!(
|
||||||
"not enough rpcs connected: {}/{}/{}",
|
"not enough rpcs connected: {}/{}/{}",
|
||||||
highest_rpcs.len(),
|
highest_rpcs.len(),
|
||||||
num_known,
|
num_known,
|
||||||
web3_connections.min_head_rpcs,
|
web3_rpcs.min_head_rpcs,
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32
|
let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32
|
||||||
/ web3_connections.min_sum_soft_limit as f32)
|
/ web3_rpcs.min_sum_soft_limit as f32)
|
||||||
* 100.0;
|
* 100.0;
|
||||||
|
|
||||||
return Err(anyhow::anyhow!(
|
return Err(anyhow::anyhow!(
|
||||||
"ran out of parents to check. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})",
|
"ran out of parents to check. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})",
|
||||||
highest_rpcs.len(),
|
highest_rpcs.len(),
|
||||||
num_known,
|
num_known,
|
||||||
web3_connections.min_head_rpcs,
|
web3_rpcs.min_head_rpcs,
|
||||||
highest_rpcs_sum_soft_limit,
|
highest_rpcs_sum_soft_limit,
|
||||||
web3_connections.min_sum_soft_limit,
|
web3_rpcs.min_sum_soft_limit,
|
||||||
soft_limit_percent,
|
soft_limit_percent,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@ -719,29 +725,28 @@ impl ConnectionsGroup {
|
|||||||
// TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block. will need to change the return Err above into breaks.
|
// TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block. will need to change the return Err above into breaks.
|
||||||
|
|
||||||
// we've done all the searching for the heaviest block that we can
|
// we've done all the searching for the heaviest block that we can
|
||||||
if highest_rpcs.len() < web3_connections.min_head_rpcs
|
if highest_rpcs.len() < web3_rpcs.min_head_rpcs
|
||||||
|| highest_rpcs_sum_soft_limit < web3_connections.min_sum_soft_limit
|
|| highest_rpcs_sum_soft_limit < web3_rpcs.min_sum_soft_limit
|
||||||
{
|
{
|
||||||
// if we get here, not enough servers are synced. return an error
|
// if we get here, not enough servers are synced. return an error
|
||||||
let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32
|
let soft_limit_percent =
|
||||||
/ web3_connections.min_sum_soft_limit as f32)
|
(highest_rpcs_sum_soft_limit as f32 / web3_rpcs.min_sum_soft_limit as f32) * 100.0;
|
||||||
* 100.0;
|
|
||||||
|
|
||||||
return Err(anyhow::anyhow!(
|
return Err(anyhow::anyhow!(
|
||||||
"Not enough resources. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})",
|
"Not enough resources. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})",
|
||||||
highest_rpcs.len(),
|
highest_rpcs.len(),
|
||||||
num_known,
|
num_known,
|
||||||
web3_connections.min_head_rpcs,
|
web3_rpcs.min_head_rpcs,
|
||||||
highest_rpcs_sum_soft_limit,
|
highest_rpcs_sum_soft_limit,
|
||||||
web3_connections.min_sum_soft_limit,
|
web3_rpcs.min_sum_soft_limit,
|
||||||
soft_limit_percent,
|
soft_limit_percent,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// success! this block has enough soft limit and nodes on it (or on later blocks)
|
// success! this block has enough soft limit and nodes on it (or on later blocks)
|
||||||
let conns: Vec<Arc<Web3Connection>> = highest_rpcs
|
let conns: Vec<Arc<Web3Rpc>> = highest_rpcs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|conn_name| web3_connections.conns.get(conn_name).cloned())
|
.filter_map(|conn_name| web3_rpcs.conns.get(conn_name).cloned())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// TODO: DEBUG only check
|
// TODO: DEBUG only check
|
||||||
@ -754,7 +759,7 @@ impl ConnectionsGroup {
|
|||||||
|
|
||||||
let consensus_head_block: SavedBlock = maybe_head_block.into();
|
let consensus_head_block: SavedBlock = maybe_head_block.into();
|
||||||
|
|
||||||
Ok(ConsensusConnections {
|
Ok(ConsensusWeb3Rpcs {
|
||||||
head_block: Some(consensus_head_block),
|
head_block: Some(consensus_head_block),
|
||||||
conns,
|
conns,
|
||||||
num_checked_conns: self.rpc_name_to_hash.len(),
|
num_checked_conns: self.rpc_name_to_hash.len(),
|
||||||
@ -781,7 +786,7 @@ impl Default for ConsensusFinder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ConsensusFinder {
|
impl ConsensusFinder {
|
||||||
fn remove(&mut self, rpc: &Web3Connection) -> Option<H256> {
|
fn remove(&mut self, rpc: &Web3Rpc) -> Option<H256> {
|
||||||
// TODO: should we have multiple backup tiers? (remote datacenters vs third party)
|
// TODO: should we have multiple backup tiers? (remote datacenters vs third party)
|
||||||
if !rpc.backup {
|
if !rpc.backup {
|
||||||
self.main.remove(rpc);
|
self.main.remove(rpc);
|
||||||
@ -789,7 +794,7 @@ impl ConsensusFinder {
|
|||||||
self.all.remove(rpc)
|
self.all.remove(rpc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&mut self, rpc: &Web3Connection, new_hash: H256) -> Option<H256> {
|
fn insert(&mut self, rpc: &Web3Rpc, new_hash: H256) -> Option<H256> {
|
||||||
// TODO: should we have multiple backup tiers? (remote datacenters vs third party)
|
// TODO: should we have multiple backup tiers? (remote datacenters vs third party)
|
||||||
if !rpc.backup {
|
if !rpc.backup {
|
||||||
self.main.insert(rpc, new_hash);
|
self.main.insert(rpc, new_hash);
|
||||||
@ -801,9 +806,9 @@ impl ConsensusFinder {
|
|||||||
async fn update_rpc(
|
async fn update_rpc(
|
||||||
&mut self,
|
&mut self,
|
||||||
rpc_head_block: Option<SavedBlock>,
|
rpc_head_block: Option<SavedBlock>,
|
||||||
rpc: Arc<Web3Connection>,
|
rpc: Arc<Web3Rpc>,
|
||||||
// we need this so we can save the block to caches. i don't like it though. maybe we should use a lazy_static Cache wrapper that has a "save_block" method?. i generally dislike globals but i also dislike all the types having to pass eachother around
|
// we need this so we can save the block to caches. i don't like it though. maybe we should use a lazy_static Cache wrapper that has a "save_block" method?. i generally dislike globals but i also dislike all the types having to pass eachother around
|
||||||
web3_connections: &Web3Connections,
|
web3_connections: &Web3Rpcs,
|
||||||
) -> anyhow::Result<bool> {
|
) -> anyhow::Result<bool> {
|
||||||
// add the rpc's block to connection_heads, or remove the rpc from connection_heads
|
// add the rpc's block to connection_heads, or remove the rpc from connection_heads
|
||||||
let changed = match rpc_head_block {
|
let changed = match rpc_head_block {
|
||||||
@ -848,15 +853,15 @@ impl ConsensusFinder {
|
|||||||
async fn best_consensus_connections(
|
async fn best_consensus_connections(
|
||||||
&mut self,
|
&mut self,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
web3_connections: &Web3Connections,
|
web3_connections: &Web3Rpcs,
|
||||||
) -> ConsensusConnections {
|
) -> ConsensusWeb3Rpcs {
|
||||||
let highest_block_num = match self
|
let highest_block_num = match self
|
||||||
.all
|
.all
|
||||||
.highest_block(authorization, web3_connections)
|
.highest_block(authorization, web3_connections)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
None => {
|
None => {
|
||||||
return ConsensusConnections::default();
|
return ConsensusWeb3Rpcs::default();
|
||||||
}
|
}
|
||||||
Some(x) => x.number.expect("blocks here should always have a number"),
|
Some(x) => x.number.expect("blocks here should always have a number"),
|
||||||
};
|
};
|
||||||
@ -897,7 +902,7 @@ impl ConsensusFinder {
|
|||||||
if self.all.rpc_name_to_hash.len() < web3_connections.min_head_rpcs {
|
if self.all.rpc_name_to_hash.len() < web3_connections.min_head_rpcs {
|
||||||
debug!("No consensus head yet: {}", err);
|
debug!("No consensus head yet: {}", err);
|
||||||
}
|
}
|
||||||
return ConsensusConnections::default();
|
return ConsensusWeb3Rpcs::default();
|
||||||
}
|
}
|
||||||
Ok(x) => x,
|
Ok(x) => x,
|
||||||
};
|
};
|
||||||
@ -920,7 +925,7 @@ impl ConsensusFinder {
|
|||||||
} else {
|
} else {
|
||||||
// TODO: i don't think we need this error. and i doublt we'll ever even get here
|
// TODO: i don't think we need this error. and i doublt we'll ever even get here
|
||||||
error!("NO CONSENSUS HEAD!");
|
error!("NO CONSENSUS HEAD!");
|
||||||
ConsensusConnections::default()
|
ConsensusWeb3Rpcs::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,10 @@
|
|||||||
///! Load balanced communication with a group of web3 providers
|
///! Load balanced communication with a group of web3 rpc providers
|
||||||
use super::blockchain::{ArcBlock, BlockHashesCache};
|
use super::blockchain::{ArcBlock, BlockHashesCache};
|
||||||
use super::connection::Web3Connection;
|
use super::one::Web3Rpc;
|
||||||
use super::request::{
|
use super::request::{OpenRequestHandle, OpenRequestResult, RequestRevertHandler};
|
||||||
OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult, RequestRevertHandler,
|
use super::synced_connections::ConsensusWeb3Rpcs;
|
||||||
};
|
|
||||||
use super::synced_connections::ConsensusConnections;
|
|
||||||
use crate::app::{flatten_handle, AnyhowJoinHandle};
|
use crate::app::{flatten_handle, AnyhowJoinHandle};
|
||||||
use crate::config::{BlockAndRpc, TxHashAndRpc, Web3ConnectionConfig};
|
use crate::config::{BlockAndRpc, TxHashAndRpc, Web3RpcConfig};
|
||||||
use crate::frontend::authorization::{Authorization, RequestMetadata};
|
use crate::frontend::authorization::{Authorization, RequestMetadata};
|
||||||
use crate::frontend::rpc_proxy_ws::ProxyMode;
|
use crate::frontend::rpc_proxy_ws::ProxyMode;
|
||||||
use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest};
|
use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest};
|
||||||
@ -14,7 +12,7 @@ use crate::rpcs::transactions::TxStatus;
|
|||||||
use counter::Counter;
|
use counter::Counter;
|
||||||
use derive_more::From;
|
use derive_more::From;
|
||||||
use ethers::prelude::{ProviderError, TxHash, H256, U64};
|
use ethers::prelude::{ProviderError, TxHash, H256, U64};
|
||||||
use futures::future::{join_all, try_join_all};
|
use futures::future::try_join_all;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use hashbrown::{HashMap, HashSet};
|
use hashbrown::{HashMap, HashSet};
|
||||||
@ -36,11 +34,11 @@ use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBeh
|
|||||||
|
|
||||||
/// A collection of web3 connections. Sends requests either the current best server or all servers.
|
/// A collection of web3 connections. Sends requests either the current best server or all servers.
|
||||||
#[derive(From)]
|
#[derive(From)]
|
||||||
pub struct Web3Connections {
|
pub struct Web3Rpcs {
|
||||||
/// any requests will be forwarded to one (or more) of these connections
|
/// any requests will be forwarded to one (or more) of these connections
|
||||||
pub(crate) conns: HashMap<String, Arc<Web3Connection>>,
|
pub(crate) conns: HashMap<String, Arc<Web3Rpc>>,
|
||||||
/// all providers with the same consensus head block. won't update if there is no `self.watch_consensus_head_sender`
|
/// all providers with the same consensus head block. won't update if there is no `self.watch_consensus_head_sender`
|
||||||
pub(super) watch_consensus_connections_sender: watch::Sender<Arc<ConsensusConnections>>,
|
pub(super) watch_consensus_connections_sender: watch::Sender<Arc<ConsensusWeb3Rpcs>>,
|
||||||
/// this head receiver makes it easy to wait until there is a new block
|
/// this head receiver makes it easy to wait until there is a new block
|
||||||
pub(super) watch_consensus_head_receiver: Option<watch::Receiver<ArcBlock>>,
|
pub(super) watch_consensus_head_receiver: Option<watch::Receiver<ArcBlock>>,
|
||||||
pub(super) pending_transactions:
|
pub(super) pending_transactions:
|
||||||
@ -54,13 +52,13 @@ pub struct Web3Connections {
|
|||||||
pub(super) min_sum_soft_limit: u32,
|
pub(super) min_sum_soft_limit: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Web3Connections {
|
impl Web3Rpcs {
|
||||||
/// Spawn durable connections to multiple Web3 providers.
|
/// Spawn durable connections to multiple Web3 providers.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub async fn spawn(
|
pub async fn spawn(
|
||||||
chain_id: u64,
|
chain_id: u64,
|
||||||
db_conn: Option<DatabaseConnection>,
|
db_conn: Option<DatabaseConnection>,
|
||||||
server_configs: HashMap<String, Web3ConnectionConfig>,
|
server_configs: HashMap<String, Web3RpcConfig>,
|
||||||
http_client: Option<reqwest::Client>,
|
http_client: Option<reqwest::Client>,
|
||||||
redis_pool: Option<redis_rate_limiter::RedisPool>,
|
redis_pool: Option<redis_rate_limiter::RedisPool>,
|
||||||
block_map: BlockHashesCache,
|
block_map: BlockHashesCache,
|
||||||
@ -69,7 +67,6 @@ impl Web3Connections {
|
|||||||
min_head_rpcs: usize,
|
min_head_rpcs: usize,
|
||||||
pending_tx_sender: Option<broadcast::Sender<TxStatus>>,
|
pending_tx_sender: Option<broadcast::Sender<TxStatus>>,
|
||||||
pending_transactions: Cache<TxHash, TxStatus, hashbrown::hash_map::DefaultHashBuilder>,
|
pending_transactions: Cache<TxHash, TxStatus, hashbrown::hash_map::DefaultHashBuilder>,
|
||||||
open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
|
|
||||||
) -> anyhow::Result<(Arc<Self>, AnyhowJoinHandle<()>)> {
|
) -> anyhow::Result<(Arc<Self>, AnyhowJoinHandle<()>)> {
|
||||||
let (pending_tx_id_sender, pending_tx_id_receiver) = flume::unbounded();
|
let (pending_tx_id_sender, pending_tx_id_receiver) = flume::unbounded();
|
||||||
let (block_sender, block_receiver) = flume::unbounded::<BlockAndRpc>();
|
let (block_sender, block_receiver) = flume::unbounded::<BlockAndRpc>();
|
||||||
@ -92,12 +89,10 @@ impl Web3Connections {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let http_interval_sender = if http_client.is_some() {
|
let http_interval_sender = if http_client.is_some() {
|
||||||
let (sender, receiver) = broadcast::channel(1);
|
let (sender, _) = broadcast::channel(1);
|
||||||
|
|
||||||
drop(receiver);
|
|
||||||
|
|
||||||
// TODO: what interval? follow a websocket also? maybe by watching synced connections with a timeout. will need debounce
|
// TODO: what interval? follow a websocket also? maybe by watching synced connections with a timeout. will need debounce
|
||||||
let mut interval = interval(Duration::from_millis(expected_block_time_ms));
|
let mut interval = interval(Duration::from_millis(expected_block_time_ms / 2));
|
||||||
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
||||||
|
|
||||||
let sender = Arc::new(sender);
|
let sender = Arc::new(sender);
|
||||||
@ -107,13 +102,14 @@ impl Web3Connections {
|
|||||||
|
|
||||||
async move {
|
async move {
|
||||||
loop {
|
loop {
|
||||||
// TODO: every time a head_block arrives (with a small delay for known slow servers), or on the interval.
|
|
||||||
interval.tick().await;
|
interval.tick().await;
|
||||||
|
|
||||||
// // trace!("http interval ready");
|
// trace!("http interval ready");
|
||||||
|
|
||||||
// errors are okay. they mean that all receivers have been dropped
|
if let Err(_) = sender.send(()) {
|
||||||
let _ = sender.send(());
|
// errors are okay. they mean that all receivers have been dropped, or the rpcs just haven't started yet
|
||||||
|
trace!("no http receivers");
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -128,11 +124,11 @@ impl Web3Connections {
|
|||||||
|
|
||||||
// turn configs into connections (in parallel)
|
// turn configs into connections (in parallel)
|
||||||
// TODO: move this into a helper function. then we can use it when configs change (will need a remove function too)
|
// TODO: move this into a helper function. then we can use it when configs change (will need a remove function too)
|
||||||
// TODO: futures unordered?
|
let mut spawn_handles: FuturesUnordered<_> = server_configs
|
||||||
let spawn_handles: Vec<_> = server_configs
|
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|(server_name, server_config)| {
|
.filter_map(|(server_name, server_config)| {
|
||||||
if server_config.disabled {
|
if server_config.disabled {
|
||||||
|
info!("{} is disabled", server_name);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,7 +145,8 @@ impl Web3Connections {
|
|||||||
|
|
||||||
let pending_tx_id_sender = Some(pending_tx_id_sender.clone());
|
let pending_tx_id_sender = Some(pending_tx_id_sender.clone());
|
||||||
let block_map = block_map.clone();
|
let block_map = block_map.clone();
|
||||||
let open_request_handle_metrics = open_request_handle_metrics.clone();
|
|
||||||
|
debug!("spawning {}", server_name);
|
||||||
|
|
||||||
let handle = tokio::spawn(async move {
|
let handle = tokio::spawn(async move {
|
||||||
server_config
|
server_config
|
||||||
@ -163,7 +160,6 @@ impl Web3Connections {
|
|||||||
block_map,
|
block_map,
|
||||||
block_sender,
|
block_sender,
|
||||||
pending_tx_id_sender,
|
pending_tx_id_sender,
|
||||||
open_request_handle_metrics,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
});
|
});
|
||||||
@ -177,19 +173,20 @@ impl Web3Connections {
|
|||||||
let mut handles = vec![];
|
let mut handles = vec![];
|
||||||
|
|
||||||
// TODO: futures unordered?
|
// TODO: futures unordered?
|
||||||
for x in join_all(spawn_handles).await {
|
while let Some(x) = spawn_handles.next().await {
|
||||||
// TODO: how should we handle errors here? one rpc being down shouldn't cause the program to exit
|
|
||||||
match x {
|
match x {
|
||||||
Ok(Ok((connection, handle))) => {
|
Ok(Ok((connection, handle))) => {
|
||||||
|
// web3 connection worked
|
||||||
connections.insert(connection.name.clone(), connection);
|
connections.insert(connection.name.clone(), connection);
|
||||||
handles.push(handle);
|
handles.push(handle);
|
||||||
}
|
}
|
||||||
Ok(Err(err)) => {
|
Ok(Err(err)) => {
|
||||||
// if we got an error here, it is not retryable
|
// if we got an error here, the app can continue on
|
||||||
// TODO: include context about which connection failed
|
// TODO: include context about which connection failed
|
||||||
error!("Unable to create connection. err={:?}", err);
|
error!("Unable to create connection. err={:?}", err);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
// something actually bad happened. exit with an error
|
||||||
return Err(err.into());
|
return Err(err.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -229,7 +226,6 @@ impl Web3Connections {
|
|||||||
let connections = connections.clone();
|
let connections = connections.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
// TODO: try_join_all with the other handles here
|
|
||||||
connections
|
connections
|
||||||
.subscribe(
|
.subscribe(
|
||||||
authorization,
|
authorization,
|
||||||
@ -245,13 +241,13 @@ impl Web3Connections {
|
|||||||
Ok((connections, handle))
|
Ok((connections, handle))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get(&self, conn_name: &str) -> Option<&Arc<Web3Connection>> {
|
pub fn get(&self, conn_name: &str) -> Option<&Arc<Web3Rpc>> {
|
||||||
self.conns.get(conn_name)
|
self.conns.get(conn_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// subscribe to blocks and transactions from all the backend rpcs.
|
/// subscribe to blocks and transactions from all the backend rpcs.
|
||||||
/// blocks are processed by all the `Web3Connection`s and then sent to the `block_receiver`
|
/// blocks are processed by all the `Web3Rpc`s and then sent to the `block_receiver`
|
||||||
/// transaction ids from all the `Web3Connection`s are deduplicated and forwarded to `pending_tx_sender`
|
/// transaction ids from all the `Web3Rpc`s are deduplicated and forwarded to `pending_tx_sender`
|
||||||
async fn subscribe(
|
async fn subscribe(
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
authorization: Arc<Authorization>,
|
authorization: Arc<Authorization>,
|
||||||
@ -327,7 +323,6 @@ impl Web3Connections {
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!("subscriptions over: {:?}", self);
|
info!("subscriptions over: {:?}", self);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,7 +410,7 @@ impl Web3Connections {
|
|||||||
&self,
|
&self,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
request_metadata: Option<&Arc<RequestMetadata>>,
|
request_metadata: Option<&Arc<RequestMetadata>>,
|
||||||
skip: &[Arc<Web3Connection>],
|
skip: &[Arc<Web3Rpc>],
|
||||||
min_block_needed: Option<&U64>,
|
min_block_needed: Option<&U64>,
|
||||||
) -> anyhow::Result<OpenRequestResult> {
|
) -> anyhow::Result<OpenRequestResult> {
|
||||||
if let Ok(without_backups) = self
|
if let Ok(without_backups) = self
|
||||||
@ -450,13 +445,10 @@ impl Web3Connections {
|
|||||||
allow_backups: bool,
|
allow_backups: bool,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
request_metadata: Option<&Arc<RequestMetadata>>,
|
request_metadata: Option<&Arc<RequestMetadata>>,
|
||||||
skip: &[Arc<Web3Connection>],
|
skip: &[Arc<Web3Rpc>],
|
||||||
min_block_needed: Option<&U64>,
|
min_block_needed: Option<&U64>,
|
||||||
) -> anyhow::Result<OpenRequestResult> {
|
) -> anyhow::Result<OpenRequestResult> {
|
||||||
let usable_rpcs_by_head_num_and_weight: BTreeMap<
|
let usable_rpcs_by_head_num_and_weight: BTreeMap<(Option<U64>, u64), Vec<Arc<Web3Rpc>>> = {
|
||||||
(Option<U64>, u64),
|
|
||||||
Vec<Arc<Web3Connection>>,
|
|
||||||
> = {
|
|
||||||
let synced_connections = self.watch_consensus_connections_sender.borrow().clone();
|
let synced_connections = self.watch_consensus_connections_sender.borrow().clone();
|
||||||
|
|
||||||
let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() {
|
let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() {
|
||||||
@ -647,13 +639,16 @@ impl Web3Connections {
|
|||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
block_needed: Option<&U64>,
|
block_needed: Option<&U64>,
|
||||||
max_count: Option<usize>,
|
max_count: Option<usize>,
|
||||||
|
always_include_backups: bool,
|
||||||
) -> Result<Vec<OpenRequestHandle>, Option<Instant>> {
|
) -> Result<Vec<OpenRequestHandle>, Option<Instant>> {
|
||||||
|
if !always_include_backups {
|
||||||
if let Ok(without_backups) = self
|
if let Ok(without_backups) = self
|
||||||
._all_connections(false, authorization, block_needed, max_count)
|
._all_connections(false, authorization, block_needed, max_count)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Ok(without_backups);
|
return Ok(without_backups);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self._all_connections(true, authorization, block_needed, max_count)
|
self._all_connections(true, authorization, block_needed, max_count)
|
||||||
.await
|
.await
|
||||||
@ -678,17 +673,21 @@ impl Web3Connections {
|
|||||||
|
|
||||||
let mut tried = HashSet::new();
|
let mut tried = HashSet::new();
|
||||||
|
|
||||||
let conns_to_try = itertools::chain(
|
let mut synced_conns = self
|
||||||
// TODO: sort by tier
|
.watch_consensus_connections_sender
|
||||||
self.watch_consensus_connections_sender
|
|
||||||
.borrow()
|
.borrow()
|
||||||
.conns
|
.conns
|
||||||
.clone(),
|
.clone();
|
||||||
// TODO: sort by tier
|
|
||||||
self.conns.values().cloned(),
|
|
||||||
);
|
|
||||||
|
|
||||||
for connection in conns_to_try {
|
// synced connections are all on the same block. sort them by tier with higher soft limits first
|
||||||
|
synced_conns.sort_by_cached_key(|x| (x.tier, u32::MAX - x.soft_limit));
|
||||||
|
|
||||||
|
// if there aren't enough synced connections, include more connections
|
||||||
|
let mut all_conns: Vec<_> = self.conns.values().cloned().collect();
|
||||||
|
|
||||||
|
sort_connections_by_sync_status(&mut all_conns);
|
||||||
|
|
||||||
|
for connection in itertools::chain(synced_conns, all_conns) {
|
||||||
if max_count == 0 {
|
if max_count == 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -760,14 +759,9 @@ impl Web3Connections {
|
|||||||
loop {
|
loop {
|
||||||
let num_skipped = skip_rpcs.len();
|
let num_skipped = skip_rpcs.len();
|
||||||
|
|
||||||
if num_skipped > 0 {
|
|
||||||
// trace!("skip_rpcs: {:?}", skip_rpcs);
|
|
||||||
|
|
||||||
// TODO: is self.conns still right now that we split main and backup servers?
|
|
||||||
if num_skipped == self.conns.len() {
|
if num_skipped == self.conns.len() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.best_consensus_head_connection(
|
.best_consensus_head_connection(
|
||||||
@ -1017,10 +1011,16 @@ impl Web3Connections {
|
|||||||
block_needed: Option<&U64>,
|
block_needed: Option<&U64>,
|
||||||
error_level: Level,
|
error_level: Level,
|
||||||
max_count: Option<usize>,
|
max_count: Option<usize>,
|
||||||
|
always_include_backups: bool,
|
||||||
) -> anyhow::Result<JsonRpcForwardedResponse> {
|
) -> anyhow::Result<JsonRpcForwardedResponse> {
|
||||||
loop {
|
loop {
|
||||||
match self
|
match self
|
||||||
.all_connections(authorization, block_needed, max_count)
|
.all_connections(
|
||||||
|
authorization,
|
||||||
|
block_needed,
|
||||||
|
max_count,
|
||||||
|
always_include_backups,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(active_request_handles) => {
|
Ok(active_request_handles) => {
|
||||||
@ -1117,23 +1117,23 @@ impl Web3Connections {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for Web3Connections {
|
impl fmt::Debug for Web3Rpcs {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
// TODO: the default formatter takes forever to write. this is too quiet though
|
// TODO: the default formatter takes forever to write. this is too quiet though
|
||||||
f.debug_struct("Web3Connections")
|
f.debug_struct("Web3Rpcs")
|
||||||
.field("conns", &self.conns)
|
.field("conns", &self.conns)
|
||||||
.finish_non_exhaustive()
|
.finish_non_exhaustive()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for Web3Connections {
|
impl Serialize for Web3Rpcs {
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: Serializer,
|
S: Serializer,
|
||||||
{
|
{
|
||||||
let mut state = serializer.serialize_struct("Web3Connections", 6)?;
|
let mut state = serializer.serialize_struct("Web3Rpcs", 6)?;
|
||||||
|
|
||||||
let conns: Vec<&Web3Connection> = self.conns.values().map(|x| x.as_ref()).collect();
|
let conns: Vec<&Web3Rpc> = self.conns.values().map(|x| x.as_ref()).collect();
|
||||||
state.serialize_field("conns", &conns)?;
|
state.serialize_field("conns", &conns)?;
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -1152,13 +1152,29 @@ impl Serialize for Web3Connections {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// sort by block number (descending) and tier (ascending)
|
||||||
|
fn sort_connections_by_sync_status(rpcs: &mut Vec<Arc<Web3Rpc>>) {
|
||||||
|
rpcs.sort_by_cached_key(|x| {
|
||||||
|
let reversed_head_block = u64::MAX
|
||||||
|
- x.head_block
|
||||||
|
.read()
|
||||||
|
.as_ref()
|
||||||
|
.map(|x| x.number().as_u64())
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
let tier = x.tier;
|
||||||
|
|
||||||
|
(reversed_head_block, tier)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
mod tests {
|
mod tests {
|
||||||
// TODO: why is this allow needed? does tokio::test get in the way somehow?
|
// TODO: why is this allow needed? does tokio::test get in the way somehow?
|
||||||
#![allow(unused_imports)]
|
#![allow(unused_imports)]
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::rpcs::{
|
use crate::rpcs::{
|
||||||
blockchain::{ConsensusFinder, SavedBlock},
|
blockchain::{ConsensusFinder, SavedBlock},
|
||||||
connection::ProviderState,
|
one::ProviderState,
|
||||||
provider::Web3Provider,
|
provider::Web3Provider,
|
||||||
};
|
};
|
||||||
use ethers::types::{Block, U256};
|
use ethers::types::{Block, U256};
|
||||||
@ -1167,6 +1183,80 @@ mod tests {
|
|||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use tokio::sync::RwLock as AsyncRwLock;
|
use tokio::sync::RwLock as AsyncRwLock;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_sort_connections_by_sync_status() {
|
||||||
|
let block_0 = Block {
|
||||||
|
number: Some(0.into()),
|
||||||
|
hash: Some(H256::random()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let block_1 = Block {
|
||||||
|
number: Some(1.into()),
|
||||||
|
hash: Some(H256::random()),
|
||||||
|
parent_hash: block_0.hash.unwrap(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let block_2 = Block {
|
||||||
|
number: Some(2.into()),
|
||||||
|
hash: Some(H256::random()),
|
||||||
|
parent_hash: block_1.hash.unwrap(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let blocks: Vec<_> = [block_0, block_1, block_2]
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| SavedBlock::new(Arc::new(x)))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut rpcs = [
|
||||||
|
Web3Rpc {
|
||||||
|
name: "a".to_string(),
|
||||||
|
tier: 0,
|
||||||
|
head_block: RwLock::new(None),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
Web3Rpc {
|
||||||
|
name: "b".to_string(),
|
||||||
|
tier: 0,
|
||||||
|
head_block: RwLock::new(blocks.get(1).cloned()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
Web3Rpc {
|
||||||
|
name: "c".to_string(),
|
||||||
|
tier: 0,
|
||||||
|
head_block: RwLock::new(blocks.get(2).cloned()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
Web3Rpc {
|
||||||
|
name: "d".to_string(),
|
||||||
|
tier: 1,
|
||||||
|
head_block: RwLock::new(None),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
Web3Rpc {
|
||||||
|
name: "e".to_string(),
|
||||||
|
tier: 1,
|
||||||
|
head_block: RwLock::new(blocks.get(1).cloned()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
Web3Rpc {
|
||||||
|
name: "f".to_string(),
|
||||||
|
tier: 1,
|
||||||
|
head_block: RwLock::new(blocks.get(2).cloned()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.map(Arc::new)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
sort_connections_by_sync_status(&mut rpcs);
|
||||||
|
|
||||||
|
let names_in_sort_order: Vec<_> = rpcs.iter().map(|x| x.name.as_str()).collect();
|
||||||
|
|
||||||
|
assert_eq!(names_in_sort_order, ["c", "f", "b", "e", "a", "d"]);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_server_selection_by_height() {
|
async fn test_server_selection_by_height() {
|
||||||
// TODO: do this better. can test_env_logger and tokio test be stacked?
|
// TODO: do this better. can test_env_logger and tokio test be stacked?
|
||||||
@ -1206,50 +1296,32 @@ mod tests {
|
|||||||
|
|
||||||
let block_data_limit = u64::MAX;
|
let block_data_limit = u64::MAX;
|
||||||
|
|
||||||
let head_rpc = Web3Connection {
|
let head_rpc = Web3Rpc {
|
||||||
name: "synced".to_string(),
|
name: "synced".to_string(),
|
||||||
db_conn: None,
|
|
||||||
display_name: None,
|
|
||||||
url: "ws://example.com/synced".to_string(),
|
|
||||||
http_client: None,
|
|
||||||
active_requests: 0.into(),
|
|
||||||
frontend_requests: 0.into(),
|
|
||||||
internal_requests: 0.into(),
|
|
||||||
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
|
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
|
||||||
Web3Provider::Mock,
|
Web3Provider::Mock,
|
||||||
))),
|
))),
|
||||||
hard_limit: None,
|
|
||||||
hard_limit_until: None,
|
|
||||||
soft_limit: 1_000,
|
soft_limit: 1_000,
|
||||||
automatic_block_limit: true,
|
automatic_block_limit: false,
|
||||||
backup: false,
|
backup: false,
|
||||||
block_data_limit: block_data_limit.into(),
|
block_data_limit: block_data_limit.into(),
|
||||||
tier: 0,
|
tier: 0,
|
||||||
head_block: RwLock::new(Some(head_block.clone())),
|
head_block: RwLock::new(Some(head_block.clone())),
|
||||||
open_request_handle_metrics: Arc::new(Default::default()),
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let lagged_rpc = Web3Connection {
|
let lagged_rpc = Web3Rpc {
|
||||||
name: "lagged".to_string(),
|
name: "lagged".to_string(),
|
||||||
db_conn: None,
|
|
||||||
display_name: None,
|
|
||||||
url: "ws://example.com/lagged".to_string(),
|
|
||||||
http_client: None,
|
|
||||||
active_requests: 0.into(),
|
|
||||||
frontend_requests: 0.into(),
|
|
||||||
internal_requests: 0.into(),
|
|
||||||
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
|
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
|
||||||
Web3Provider::Mock,
|
Web3Provider::Mock,
|
||||||
))),
|
))),
|
||||||
hard_limit: None,
|
|
||||||
hard_limit_until: None,
|
|
||||||
soft_limit: 1_000,
|
soft_limit: 1_000,
|
||||||
automatic_block_limit: false,
|
automatic_block_limit: false,
|
||||||
backup: false,
|
backup: false,
|
||||||
block_data_limit: block_data_limit.into(),
|
block_data_limit: block_data_limit.into(),
|
||||||
tier: 0,
|
tier: 0,
|
||||||
head_block: RwLock::new(Some(lagged_block.clone())),
|
head_block: RwLock::new(Some(lagged_block.clone())),
|
||||||
open_request_handle_metrics: Arc::new(Default::default()),
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(head_rpc.has_block_data(&lagged_block.number()));
|
assert!(head_rpc.has_block_data(&lagged_block.number()));
|
||||||
@ -1268,8 +1340,8 @@ mod tests {
|
|||||||
|
|
||||||
let (watch_consensus_connections_sender, _) = watch::channel(Default::default());
|
let (watch_consensus_connections_sender, _) = watch::channel(Default::default());
|
||||||
|
|
||||||
// TODO: make a Web3Connections::new
|
// TODO: make a Web3Rpcs::new
|
||||||
let conns = Web3Connections {
|
let conns = Web3Rpcs {
|
||||||
conns,
|
conns,
|
||||||
watch_consensus_head_receiver: None,
|
watch_consensus_head_receiver: None,
|
||||||
watch_consensus_connections_sender,
|
watch_consensus_connections_sender,
|
||||||
@ -1319,10 +1391,10 @@ mod tests {
|
|||||||
// no head block because the rpcs haven't communicated through their channels
|
// no head block because the rpcs haven't communicated through their channels
|
||||||
assert!(conns.head_block_hash().is_none());
|
assert!(conns.head_block_hash().is_none());
|
||||||
|
|
||||||
// all_backend_connections gives everything regardless of sync status
|
// all_backend_connections gives all non-backup servers regardless of sync status
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
conns
|
conns
|
||||||
.all_connections(&authorization, None, None)
|
.all_connections(&authorization, None, None, false)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.len(),
|
.len(),
|
||||||
@ -1439,50 +1511,32 @@ mod tests {
|
|||||||
|
|
||||||
let head_block: SavedBlock = Arc::new(head_block).into();
|
let head_block: SavedBlock = Arc::new(head_block).into();
|
||||||
|
|
||||||
let pruned_rpc = Web3Connection {
|
let pruned_rpc = Web3Rpc {
|
||||||
name: "pruned".to_string(),
|
name: "pruned".to_string(),
|
||||||
db_conn: None,
|
|
||||||
display_name: None,
|
|
||||||
url: "ws://example.com/pruned".to_string(),
|
|
||||||
http_client: None,
|
|
||||||
active_requests: 0.into(),
|
|
||||||
frontend_requests: 0.into(),
|
|
||||||
internal_requests: 0.into(),
|
|
||||||
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
|
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
|
||||||
Web3Provider::Mock,
|
Web3Provider::Mock,
|
||||||
))),
|
))),
|
||||||
hard_limit: None,
|
|
||||||
hard_limit_until: None,
|
|
||||||
soft_limit: 3_000,
|
soft_limit: 3_000,
|
||||||
automatic_block_limit: false,
|
automatic_block_limit: false,
|
||||||
backup: false,
|
backup: false,
|
||||||
block_data_limit: 64.into(),
|
block_data_limit: 64.into(),
|
||||||
tier: 1,
|
tier: 1,
|
||||||
head_block: RwLock::new(Some(head_block.clone())),
|
head_block: RwLock::new(Some(head_block.clone())),
|
||||||
open_request_handle_metrics: Arc::new(Default::default()),
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let archive_rpc = Web3Connection {
|
let archive_rpc = Web3Rpc {
|
||||||
name: "archive".to_string(),
|
name: "archive".to_string(),
|
||||||
db_conn: None,
|
|
||||||
display_name: None,
|
|
||||||
url: "ws://example.com/archive".to_string(),
|
|
||||||
http_client: None,
|
|
||||||
active_requests: 0.into(),
|
|
||||||
frontend_requests: 0.into(),
|
|
||||||
internal_requests: 0.into(),
|
|
||||||
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
|
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
|
||||||
Web3Provider::Mock,
|
Web3Provider::Mock,
|
||||||
))),
|
))),
|
||||||
hard_limit: None,
|
|
||||||
hard_limit_until: None,
|
|
||||||
soft_limit: 1_000,
|
soft_limit: 1_000,
|
||||||
automatic_block_limit: false,
|
automatic_block_limit: false,
|
||||||
backup: false,
|
backup: false,
|
||||||
block_data_limit: u64::MAX.into(),
|
block_data_limit: u64::MAX.into(),
|
||||||
tier: 2,
|
tier: 2,
|
||||||
head_block: RwLock::new(Some(head_block.clone())),
|
head_block: RwLock::new(Some(head_block.clone())),
|
||||||
open_request_handle_metrics: Arc::new(Default::default()),
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(pruned_rpc.has_block_data(&head_block.number()));
|
assert!(pruned_rpc.has_block_data(&head_block.number()));
|
||||||
@ -1500,8 +1554,8 @@ mod tests {
|
|||||||
|
|
||||||
let (watch_consensus_connections_sender, _) = watch::channel(Default::default());
|
let (watch_consensus_connections_sender, _) = watch::channel(Default::default());
|
||||||
|
|
||||||
// TODO: make a Web3Connections::new
|
// TODO: make a Web3Rpcs::new
|
||||||
let conns = Web3Connections {
|
let conns = Web3Rpcs {
|
||||||
conns,
|
conns,
|
||||||
watch_consensus_head_receiver: None,
|
watch_consensus_head_receiver: None,
|
||||||
watch_consensus_connections_sender,
|
watch_consensus_connections_sender,
|
@ -1,7 +1,7 @@
|
|||||||
// TODO: all pub, or export useful things here instead?
|
// TODO: all pub, or export useful things here instead?
|
||||||
pub mod blockchain;
|
pub mod blockchain;
|
||||||
pub mod connection;
|
pub mod many;
|
||||||
pub mod connections;
|
pub mod one;
|
||||||
pub mod provider;
|
pub mod provider;
|
||||||
pub mod request;
|
pub mod request;
|
||||||
pub mod synced_connections;
|
pub mod synced_connections;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
///! Rate-limited communication with a web3 provider.
|
///! Rate-limited communication with a web3 provider.
|
||||||
use super::blockchain::{ArcBlock, BlockHashesCache, SavedBlock};
|
use super::blockchain::{ArcBlock, BlockHashesCache, SavedBlock};
|
||||||
use super::provider::Web3Provider;
|
use super::provider::Web3Provider;
|
||||||
use super::request::{OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult};
|
use super::request::{OpenRequestHandle, OpenRequestResult};
|
||||||
use crate::app::{flatten_handle, AnyhowJoinHandle};
|
use crate::app::{flatten_handle, AnyhowJoinHandle};
|
||||||
use crate::config::BlockAndRpc;
|
use crate::config::BlockAndRpc;
|
||||||
use crate::frontend::authorization::Authorization;
|
use crate::frontend::authorization::Authorization;
|
||||||
@ -10,6 +10,7 @@ use ethers::prelude::{Bytes, Middleware, ProviderError, TxHash, H256, U64};
|
|||||||
use ethers::types::U256;
|
use ethers::types::U256;
|
||||||
use futures::future::try_join_all;
|
use futures::future::try_join_all;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use hdrhistogram::Histogram;
|
||||||
use log::{debug, error, info, trace, warn, Level};
|
use log::{debug, error, info, trace, warn, Level};
|
||||||
use migration::sea_orm::DatabaseConnection;
|
use migration::sea_orm::DatabaseConnection;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
@ -25,7 +26,7 @@ use std::{cmp::Ordering, sync::Arc};
|
|||||||
use thread_fast_rng::rand::Rng;
|
use thread_fast_rng::rand::Rng;
|
||||||
use thread_fast_rng::thread_fast_rng;
|
use thread_fast_rng::thread_fast_rng;
|
||||||
use tokio::sync::{broadcast, oneshot, watch, RwLock as AsyncRwLock};
|
use tokio::sync::{broadcast, oneshot, watch, RwLock as AsyncRwLock};
|
||||||
use tokio::time::{interval, sleep, sleep_until, timeout, Duration, Instant, MissedTickBehavior};
|
use tokio::time::{sleep, sleep_until, timeout, Duration, Instant};
|
||||||
|
|
||||||
// TODO: maybe provider state should have the block data limit in it. but it is inside an async lock and we can't Serialize then
|
// TODO: maybe provider state should have the block data limit in it. but it is inside an async lock and we can't Serialize then
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@ -35,6 +36,12 @@ pub enum ProviderState {
|
|||||||
Connected(Arc<Web3Provider>),
|
Connected(Arc<Web3Provider>),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for ProviderState {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ProviderState {
|
impl ProviderState {
|
||||||
pub async fn provider(&self, allow_not_ready: bool) -> Option<&Arc<Web3Provider>> {
|
pub async fn provider(&self, allow_not_ready: bool) -> Option<&Arc<Web3Provider>> {
|
||||||
match self {
|
match self {
|
||||||
@ -58,8 +65,31 @@ impl ProviderState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct Web3RpcLatencies {
|
||||||
|
/// Traack how far behind the fastest node we are
|
||||||
|
new_head: Histogram<u64>,
|
||||||
|
/// exponentially weighted moving average of how far behind the fastest node we are
|
||||||
|
new_head_ewma: u32,
|
||||||
|
/// Track how long an rpc call takes on average
|
||||||
|
request: Histogram<u64>,
|
||||||
|
/// exponentially weighted moving average of how far behind the fastest node we are
|
||||||
|
request_ewma: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Web3RpcLatencies {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
new_head: Histogram::new(3).unwrap(),
|
||||||
|
new_head_ewma: 0,
|
||||||
|
request: Histogram::new(3).unwrap(),
|
||||||
|
request_ewma: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// An active connection to a Web3 RPC server like geth or erigon.
|
/// An active connection to a Web3 RPC server like geth or erigon.
|
||||||
pub struct Web3Connection {
|
#[derive(Default)]
|
||||||
|
pub struct Web3Rpc {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub display_name: Option<String>,
|
pub display_name: Option<String>,
|
||||||
pub db_conn: Option<DatabaseConnection>,
|
pub db_conn: Option<DatabaseConnection>,
|
||||||
@ -91,12 +121,13 @@ pub struct Web3Connection {
|
|||||||
pub(super) block_data_limit: AtomicU64,
|
pub(super) block_data_limit: AtomicU64,
|
||||||
/// Lower tiers are higher priority when sending requests
|
/// Lower tiers are higher priority when sending requests
|
||||||
pub(super) tier: u64,
|
pub(super) tier: u64,
|
||||||
/// TODO: should this be an AsyncRwLock?
|
/// TODO: change this to a watch channel so that http providers can subscribe and take action on change
|
||||||
pub(super) head_block: RwLock<Option<SavedBlock>>,
|
pub(super) head_block: RwLock<Option<SavedBlock>>,
|
||||||
pub(super) open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
|
/// Track how fast this RPC is
|
||||||
|
pub(super) latency: Web3RpcLatencies,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Web3Connection {
|
impl Web3Rpc {
|
||||||
/// Connect to a web3 rpc
|
/// Connect to a web3 rpc
|
||||||
// TODO: have this take a builder (which will have channels attached). or maybe just take the config and give the config public fields
|
// TODO: have this take a builder (which will have channels attached). or maybe just take the config and give the config public fields
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
@ -120,8 +151,7 @@ impl Web3Connection {
|
|||||||
tx_id_sender: Option<flume::Sender<(TxHash, Arc<Self>)>>,
|
tx_id_sender: Option<flume::Sender<(TxHash, Arc<Self>)>>,
|
||||||
reconnect: bool,
|
reconnect: bool,
|
||||||
tier: u64,
|
tier: u64,
|
||||||
open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
|
) -> anyhow::Result<(Arc<Web3Rpc>, AnyhowJoinHandle<()>)> {
|
||||||
) -> anyhow::Result<(Arc<Web3Connection>, AnyhowJoinHandle<()>)> {
|
|
||||||
let hard_limit = hard_limit.map(|(hard_rate_limit, redis_pool)| {
|
let hard_limit = hard_limit.map(|(hard_rate_limit, redis_pool)| {
|
||||||
// TODO: is cache size 1 okay? i think we need
|
// TODO: is cache size 1 okay? i think we need
|
||||||
RedisRateLimiter::new(
|
RedisRateLimiter::new(
|
||||||
@ -154,19 +184,14 @@ impl Web3Connection {
|
|||||||
display_name,
|
display_name,
|
||||||
http_client,
|
http_client,
|
||||||
url: url_str,
|
url: url_str,
|
||||||
active_requests: 0.into(),
|
|
||||||
frontend_requests: 0.into(),
|
|
||||||
internal_requests: 0.into(),
|
|
||||||
provider_state: AsyncRwLock::new(ProviderState::None),
|
|
||||||
hard_limit,
|
hard_limit,
|
||||||
hard_limit_until,
|
hard_limit_until,
|
||||||
soft_limit,
|
soft_limit,
|
||||||
automatic_block_limit,
|
automatic_block_limit,
|
||||||
backup,
|
backup,
|
||||||
block_data_limit,
|
block_data_limit,
|
||||||
head_block: RwLock::new(Default::default()),
|
|
||||||
tier,
|
tier,
|
||||||
open_request_handle_metrics,
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let new_connection = Arc::new(new_connection);
|
let new_connection = Arc::new(new_connection);
|
||||||
@ -506,7 +531,7 @@ impl Web3Connection {
|
|||||||
// we previously sent a None. return early
|
// we previously sent a None. return early
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
warn!("{} is not synced!", self);
|
warn!("clearing head block on {}!", self);
|
||||||
|
|
||||||
*head_block = None;
|
*head_block = None;
|
||||||
}
|
}
|
||||||
@ -885,34 +910,14 @@ impl Web3Connection {
|
|||||||
.clone()
|
.clone()
|
||||||
{
|
{
|
||||||
trace!("watching pending transactions on {}", self);
|
trace!("watching pending transactions on {}", self);
|
||||||
|
// TODO: does this keep the lock open for too long?
|
||||||
match provider.as_ref() {
|
match provider.as_ref() {
|
||||||
Web3Provider::Mock => unimplemented!(),
|
Web3Provider::Mock => unimplemented!(),
|
||||||
Web3Provider::Http(provider) => {
|
Web3Provider::Http(provider) => {
|
||||||
// there is a "watch_pending_transactions" function, but a lot of public nodes do not support the necessary rpc endpoints
|
// there is a "watch_pending_transactions" function, but a lot of public nodes do not support the necessary rpc endpoints
|
||||||
// TODO: what should this interval be? probably automatically set to some fraction of block time
|
// TODO: maybe subscribe to self.head_block?
|
||||||
// TODO: maybe it would be better to have one interval for all of the http providers, but this works for now
|
// TODO: this keeps a read lock guard open on provider_state forever. is that okay for an http client?
|
||||||
// TODO: if there are some websocket providers, maybe have a longer interval and a channel that tells the https to update when a websocket gets a new head? if they are slow this wouldn't work well though
|
futures::future::pending::<()>().await;
|
||||||
let mut interval = interval(Duration::from_secs(60));
|
|
||||||
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
// TODO: actually do something here
|
|
||||||
/*
|
|
||||||
match self.try_request_handle().await {
|
|
||||||
Ok(active_request_handle) => {
|
|
||||||
// TODO: check the filter
|
|
||||||
todo!("actually send a request");
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Failed getting latest block from {}: {:?}", self, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// wait for the interval
|
|
||||||
// TODO: if error or rate limit, increase interval?
|
|
||||||
interval.tick().await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Web3Provider::Ws(provider) => {
|
Web3Provider::Ws(provider) => {
|
||||||
// TODO: maybe the subscribe_pending_txs function should be on the active_request_handle
|
// TODO: maybe the subscribe_pending_txs function should be on the active_request_handle
|
||||||
@ -1084,46 +1089,48 @@ impl fmt::Debug for Web3Provider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Hash for Web3Connection {
|
impl Hash for Web3Rpc {
|
||||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||||
// TODO: is this enough?
|
// TODO: is this enough?
|
||||||
self.name.hash(state);
|
self.name.hash(state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eq for Web3Connection {}
|
impl Eq for Web3Rpc {}
|
||||||
|
|
||||||
impl Ord for Web3Connection {
|
impl Ord for Web3Rpc {
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
self.name.cmp(&other.name)
|
self.name.cmp(&other.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialOrd for Web3Connection {
|
impl PartialOrd for Web3Rpc {
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
Some(self.cmp(other))
|
Some(self.cmp(other))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialEq for Web3Connection {
|
impl PartialEq for Web3Rpc {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
self.name == other.name
|
self.name == other.name
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for Web3Connection {
|
impl Serialize for Web3Rpc {
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: Serializer,
|
S: Serializer,
|
||||||
{
|
{
|
||||||
// 3 is the number of fields in the struct.
|
// 3 is the number of fields in the struct.
|
||||||
let mut state = serializer.serialize_struct("Web3Connection", 8)?;
|
let mut state = serializer.serialize_struct("Web3Rpc", 9)?;
|
||||||
|
|
||||||
// the url is excluded because it likely includes private information. just show the name that we use in keys
|
// the url is excluded because it likely includes private information. just show the name that we use in keys
|
||||||
state.serialize_field("name", &self.name)?;
|
state.serialize_field("name", &self.name)?;
|
||||||
// a longer name for display to users
|
// a longer name for display to users
|
||||||
state.serialize_field("display_name", &self.display_name)?;
|
state.serialize_field("display_name", &self.display_name)?;
|
||||||
|
|
||||||
|
state.serialize_field("backup", &self.backup)?;
|
||||||
|
|
||||||
match self.block_data_limit.load(atomic::Ordering::Relaxed) {
|
match self.block_data_limit.load(atomic::Ordering::Relaxed) {
|
||||||
u64::MAX => {
|
u64::MAX => {
|
||||||
state.serialize_field("block_data_limit", &None::<()>)?;
|
state.serialize_field("block_data_limit", &None::<()>)?;
|
||||||
@ -1157,9 +1164,9 @@ impl Serialize for Web3Connection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for Web3Connection {
|
impl fmt::Debug for Web3Rpc {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
let mut f = f.debug_struct("Web3Connection");
|
let mut f = f.debug_struct("Web3Rpc");
|
||||||
|
|
||||||
f.field("name", &self.name);
|
f.field("name", &self.name);
|
||||||
|
|
||||||
@ -1174,7 +1181,7 @@ impl fmt::Debug for Web3Connection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for Web3Connection {
|
impl fmt::Display for Web3Rpc {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
// TODO: filter basic auth and api keys
|
// TODO: filter basic auth and api keys
|
||||||
write!(f, "{}", &self.name)
|
write!(f, "{}", &self.name)
|
||||||
@ -1207,27 +1214,16 @@ mod tests {
|
|||||||
let head_block = SavedBlock::new(random_block);
|
let head_block = SavedBlock::new(random_block);
|
||||||
let block_data_limit = u64::MAX;
|
let block_data_limit = u64::MAX;
|
||||||
|
|
||||||
let metrics = OpenRequestHandleMetrics::default();
|
let x = Web3Rpc {
|
||||||
|
|
||||||
let x = Web3Connection {
|
|
||||||
name: "name".to_string(),
|
name: "name".to_string(),
|
||||||
db_conn: None,
|
|
||||||
display_name: None,
|
|
||||||
url: "ws://example.com".to_string(),
|
url: "ws://example.com".to_string(),
|
||||||
http_client: None,
|
|
||||||
active_requests: 0.into(),
|
|
||||||
frontend_requests: 0.into(),
|
|
||||||
internal_requests: 0.into(),
|
|
||||||
provider_state: AsyncRwLock::new(ProviderState::None),
|
|
||||||
hard_limit: None,
|
|
||||||
hard_limit_until: None,
|
|
||||||
soft_limit: 1_000,
|
soft_limit: 1_000,
|
||||||
automatic_block_limit: false,
|
automatic_block_limit: false,
|
||||||
backup: false,
|
backup: false,
|
||||||
block_data_limit: block_data_limit.into(),
|
block_data_limit: block_data_limit.into(),
|
||||||
tier: 0,
|
tier: 0,
|
||||||
head_block: RwLock::new(Some(head_block.clone())),
|
head_block: RwLock::new(Some(head_block.clone())),
|
||||||
open_request_handle_metrics: Arc::new(metrics),
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(x.has_block_data(&0.into()));
|
assert!(x.has_block_data(&0.into()));
|
||||||
@ -1255,28 +1251,16 @@ mod tests {
|
|||||||
|
|
||||||
let block_data_limit = 64;
|
let block_data_limit = 64;
|
||||||
|
|
||||||
let metrics = OpenRequestHandleMetrics::default();
|
|
||||||
|
|
||||||
// TODO: this is getting long. have a `impl Default`
|
// TODO: this is getting long. have a `impl Default`
|
||||||
let x = Web3Connection {
|
let x = Web3Rpc {
|
||||||
name: "name".to_string(),
|
name: "name".to_string(),
|
||||||
db_conn: None,
|
|
||||||
display_name: None,
|
|
||||||
url: "ws://example.com".to_string(),
|
|
||||||
http_client: None,
|
|
||||||
active_requests: 0.into(),
|
|
||||||
frontend_requests: 0.into(),
|
|
||||||
internal_requests: 0.into(),
|
|
||||||
provider_state: AsyncRwLock::new(ProviderState::None),
|
|
||||||
hard_limit: None,
|
|
||||||
hard_limit_until: None,
|
|
||||||
soft_limit: 1_000,
|
soft_limit: 1_000,
|
||||||
automatic_block_limit: false,
|
automatic_block_limit: false,
|
||||||
backup: false,
|
backup: false,
|
||||||
block_data_limit: block_data_limit.into(),
|
block_data_limit: block_data_limit.into(),
|
||||||
tier: 0,
|
tier: 0,
|
||||||
head_block: RwLock::new(Some(head_block.clone())),
|
head_block: RwLock::new(Some(head_block.clone())),
|
||||||
open_request_handle_metrics: Arc::new(metrics),
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(!x.has_block_data(&0.into()));
|
assert!(!x.has_block_data(&0.into()));
|
||||||
@ -1313,7 +1297,7 @@ mod tests {
|
|||||||
|
|
||||||
let metrics = OpenRequestHandleMetrics::default();
|
let metrics = OpenRequestHandleMetrics::default();
|
||||||
|
|
||||||
let x = Web3Connection {
|
let x = Web3Rpc {
|
||||||
name: "name".to_string(),
|
name: "name".to_string(),
|
||||||
db_conn: None,
|
db_conn: None,
|
||||||
display_name: None,
|
display_name: None,
|
||||||
@ -1330,7 +1314,6 @@ mod tests {
|
|||||||
block_data_limit: block_data_limit.into(),
|
block_data_limit: block_data_limit.into(),
|
||||||
tier: 0,
|
tier: 0,
|
||||||
head_block: RwLock::new(Some(head_block.clone())),
|
head_block: RwLock::new(Some(head_block.clone())),
|
||||||
open_request_handle_metrics: Arc::new(metrics),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(!x.has_block_data(&0.into()));
|
assert!(!x.has_block_data(&0.into()));
|
@ -1,7 +1,6 @@
|
|||||||
use super::connection::Web3Connection;
|
use super::one::Web3Rpc;
|
||||||
use super::provider::Web3Provider;
|
use super::provider::Web3Provider;
|
||||||
use crate::frontend::authorization::{Authorization, AuthorizationType};
|
use crate::frontend::authorization::{Authorization, AuthorizationType};
|
||||||
use crate::metered::{JsonRpcErrorCount, ProviderErrorCount};
|
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use entities::revert_log;
|
use entities::revert_log;
|
||||||
@ -9,14 +8,10 @@ use entities::sea_orm_active_enums::Method;
|
|||||||
use ethers::providers::{HttpClientError, ProviderError, WsClientError};
|
use ethers::providers::{HttpClientError, ProviderError, WsClientError};
|
||||||
use ethers::types::{Address, Bytes};
|
use ethers::types::{Address, Bytes};
|
||||||
use log::{debug, error, trace, warn, Level};
|
use log::{debug, error, trace, warn, Level};
|
||||||
use metered::metered;
|
|
||||||
use metered::HitCount;
|
|
||||||
use metered::ResponseTime;
|
|
||||||
use metered::Throughput;
|
|
||||||
use migration::sea_orm::{self, ActiveEnum, ActiveModelTrait};
|
use migration::sea_orm::{self, ActiveEnum, ActiveModelTrait};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::atomic::{self, AtomicBool, Ordering};
|
use std::sync::atomic;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use thread_fast_rng::rand::Rng;
|
use thread_fast_rng::rand::Rng;
|
||||||
use tokio::time::{sleep, Duration, Instant};
|
use tokio::time::{sleep, Duration, Instant};
|
||||||
@ -35,11 +30,8 @@ pub enum OpenRequestResult {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct OpenRequestHandle {
|
pub struct OpenRequestHandle {
|
||||||
authorization: Arc<Authorization>,
|
authorization: Arc<Authorization>,
|
||||||
conn: Arc<Web3Connection>,
|
conn: Arc<Web3Rpc>,
|
||||||
// TODO: this is the same metrics on the conn. use a reference?
|
|
||||||
metrics: Arc<OpenRequestHandleMetrics>,
|
|
||||||
provider: Arc<Web3Provider>,
|
provider: Arc<Web3Provider>,
|
||||||
used: AtomicBool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Depending on the context, RPC errors can require different handling.
|
/// Depending on the context, RPC errors can require different handling.
|
||||||
@ -129,14 +121,11 @@ impl Authorization {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[metered(registry = OpenRequestHandleMetrics, visibility = pub)]
|
|
||||||
impl OpenRequestHandle {
|
impl OpenRequestHandle {
|
||||||
pub async fn new(authorization: Arc<Authorization>, conn: Arc<Web3Connection>) -> Self {
|
pub async fn new(authorization: Arc<Authorization>, conn: Arc<Web3Rpc>) -> Self {
|
||||||
// TODO: take request_id as an argument?
|
// TODO: take request_id as an argument?
|
||||||
// TODO: attach a unique id to this? customer requests have one, but not internal queries
|
// TODO: attach a unique id to this? customer requests have one, but not internal queries
|
||||||
// TODO: what ordering?!
|
// TODO: what ordering?!
|
||||||
// TODO: should we be using metered, or not? i think not because we want stats for each handle
|
|
||||||
// TODO: these should maybe be sent to an influxdb instance?
|
|
||||||
conn.active_requests.fetch_add(1, atomic::Ordering::Relaxed);
|
conn.active_requests.fetch_add(1, atomic::Ordering::Relaxed);
|
||||||
|
|
||||||
let mut provider = None;
|
let mut provider = None;
|
||||||
@ -184,15 +173,10 @@ impl OpenRequestHandle {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let metrics = conn.open_request_handle_metrics.clone();
|
|
||||||
let used = false.into();
|
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
authorization,
|
authorization,
|
||||||
conn,
|
conn,
|
||||||
metrics,
|
|
||||||
provider,
|
provider,
|
||||||
used,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,17 +185,14 @@ impl OpenRequestHandle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn clone_connection(&self) -> Arc<Web3Connection> {
|
pub fn clone_connection(&self) -> Arc<Web3Rpc> {
|
||||||
self.conn.clone()
|
self.conn.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a web3 request
|
/// Send a web3 request
|
||||||
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
|
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
|
||||||
/// TODO: we no longer take self because metered doesn't like that
|
|
||||||
/// TODO: ErrorCount includes too many types of errors, such as transaction reverts
|
|
||||||
#[measure([JsonRpcErrorCount, HitCount, ProviderErrorCount, ResponseTime, Throughput])]
|
|
||||||
pub async fn request<P, R>(
|
pub async fn request<P, R>(
|
||||||
&self,
|
self,
|
||||||
method: &str,
|
method: &str,
|
||||||
params: &P,
|
params: &P,
|
||||||
revert_handler: RequestRevertHandler,
|
revert_handler: RequestRevertHandler,
|
||||||
@ -221,20 +202,11 @@ impl OpenRequestHandle {
|
|||||||
P: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
|
P: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
|
||||||
R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug,
|
R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug,
|
||||||
{
|
{
|
||||||
// ensure this function only runs once
|
|
||||||
if self.used.swap(true, Ordering::Release) {
|
|
||||||
unimplemented!("a request handle should only be used once");
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: use tracing spans
|
// TODO: use tracing spans
|
||||||
// TODO: requests from customers have request ids, but we should add
|
// TODO: including params in this log is way too verbose
|
||||||
// TODO: including params in this is way too verbose
|
|
||||||
// the authorization field is already on a parent span
|
|
||||||
// trace!(rpc=%self.conn, %method, "request");
|
// trace!(rpc=%self.conn, %method, "request");
|
||||||
|
|
||||||
// trace!("got provider for {:?}", self);
|
// TODO: replace ethers-rs providers with our own that supports streaming the responses
|
||||||
|
|
||||||
// TODO: really sucks that we have to clone here
|
|
||||||
let response = match &*self.provider {
|
let response = match &*self.provider {
|
||||||
Web3Provider::Mock => unimplemented!(),
|
Web3Provider::Mock => unimplemented!(),
|
||||||
Web3Provider::Http(provider) => provider.request(method, params).await,
|
Web3Provider::Http(provider) => provider.request(method, params).await,
|
||||||
|
@ -1,25 +1,25 @@
|
|||||||
use super::blockchain::{ArcBlock, SavedBlock};
|
use super::blockchain::{ArcBlock, SavedBlock};
|
||||||
use super::connection::Web3Connection;
|
use super::many::Web3Rpcs;
|
||||||
use super::connections::Web3Connections;
|
use super::one::Web3Rpc;
|
||||||
use ethers::prelude::{H256, U64};
|
use ethers::prelude::{H256, U64};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// A collection of Web3Connections that are on the same block.
|
/// A collection of Web3Rpcs that are on the same block.
|
||||||
/// Serialize is so we can print it on our debug endpoint
|
/// Serialize is so we can print it on our debug endpoint
|
||||||
#[derive(Clone, Default, Serialize)]
|
#[derive(Clone, Default, Serialize)]
|
||||||
pub struct ConsensusConnections {
|
pub struct ConsensusWeb3Rpcs {
|
||||||
// TODO: store ArcBlock instead?
|
// TODO: store ArcBlock instead?
|
||||||
pub(super) head_block: Option<SavedBlock>,
|
pub(super) head_block: Option<SavedBlock>,
|
||||||
// TODO: this should be able to serialize, but it isn't
|
// TODO: this should be able to serialize, but it isn't
|
||||||
#[serde(skip_serializing)]
|
#[serde(skip_serializing)]
|
||||||
pub(super) conns: Vec<Arc<Web3Connection>>,
|
pub(super) conns: Vec<Arc<Web3Rpc>>,
|
||||||
pub(super) num_checked_conns: usize,
|
pub(super) num_checked_conns: usize,
|
||||||
pub(super) includes_backups: bool,
|
pub(super) includes_backups: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConsensusConnections {
|
impl ConsensusWeb3Rpcs {
|
||||||
pub fn num_conns(&self) -> usize {
|
pub fn num_conns(&self) -> usize {
|
||||||
self.conns.len()
|
self.conns.len()
|
||||||
}
|
}
|
||||||
@ -31,7 +31,7 @@ impl ConsensusConnections {
|
|||||||
// TODO: sum_hard_limit?
|
// TODO: sum_hard_limit?
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for ConsensusConnections {
|
impl fmt::Debug for ConsensusWeb3Rpcs {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
// TODO: the default formatter takes forever to write. this is too quiet though
|
// TODO: the default formatter takes forever to write. this is too quiet though
|
||||||
// TODO: print the actual conns?
|
// TODO: print the actual conns?
|
||||||
@ -42,7 +42,7 @@ impl fmt::Debug for ConsensusConnections {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Web3Connections {
|
impl Web3Rpcs {
|
||||||
pub fn head_block(&self) -> Option<ArcBlock> {
|
pub fn head_block(&self) -> Option<ArcBlock> {
|
||||||
self.watch_consensus_head_receiver
|
self.watch_consensus_head_receiver
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
use crate::frontend::authorization::Authorization;
|
use crate::frontend::authorization::Authorization;
|
||||||
|
|
||||||
|
use super::many::Web3Rpcs;
|
||||||
///! Load balanced communication with a group of web3 providers
|
///! Load balanced communication with a group of web3 providers
|
||||||
use super::connection::Web3Connection;
|
use super::one::Web3Rpc;
|
||||||
use super::connections::Web3Connections;
|
|
||||||
use super::request::OpenRequestResult;
|
use super::request::OpenRequestResult;
|
||||||
use ethers::prelude::{ProviderError, Transaction, TxHash};
|
use ethers::prelude::{ProviderError, Transaction, TxHash};
|
||||||
use log::{debug, trace, Level};
|
use log::{debug, trace, Level};
|
||||||
@ -17,11 +17,11 @@ pub enum TxStatus {
|
|||||||
Orphaned(Transaction),
|
Orphaned(Transaction),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Web3Connections {
|
impl Web3Rpcs {
|
||||||
async fn query_transaction_status(
|
async fn query_transaction_status(
|
||||||
&self,
|
&self,
|
||||||
authorization: &Arc<Authorization>,
|
authorization: &Arc<Authorization>,
|
||||||
rpc: Arc<Web3Connection>,
|
rpc: Arc<Web3Rpc>,
|
||||||
pending_tx_id: TxHash,
|
pending_tx_id: TxHash,
|
||||||
) -> Result<Option<TxStatus>, ProviderError> {
|
) -> Result<Option<TxStatus>, ProviderError> {
|
||||||
// TODO: there is a race here on geth. sometimes the rpc isn't yet ready to serve the transaction (even though they told us about it!)
|
// TODO: there is a race here on geth. sometimes the rpc isn't yet ready to serve the transaction (even though they told us about it!)
|
||||||
@ -66,7 +66,7 @@ impl Web3Connections {
|
|||||||
pub(super) async fn process_incoming_tx_id(
|
pub(super) async fn process_incoming_tx_id(
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
authorization: Arc<Authorization>,
|
authorization: Arc<Authorization>,
|
||||||
rpc: Arc<Web3Connection>,
|
rpc: Arc<Web3Rpc>,
|
||||||
pending_tx_id: TxHash,
|
pending_tx_id: TxHash,
|
||||||
pending_tx_sender: broadcast::Sender<TxStatus>,
|
pending_tx_sender: broadcast::Sender<TxStatus>,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
|
Loading…
Reference in New Issue
Block a user