Merge branch 'upstream-main' into 19-admin-imitate

This commit is contained in:
yenicelik 2023-02-10 17:12:22 +00:00
commit cc41e54cbf
38 changed files with 1283 additions and 918 deletions

@ -6,5 +6,6 @@ perf.data.old
/data/
/docker-compose*
/Dockerfile
/Jenkinsfile
/redis-cell-server/
/target

562
Cargo.lock generated

File diff suppressed because it is too large Load Diff

@ -11,5 +11,9 @@ members = [
[profile.release]
# `debug = true` so that sentry can give us line numbers
debug = true
[profile.faster_release]
inherits = "release"
# spend longer compiling for a slightly faster binary
codegen-units = 1

@ -1,21 +1,52 @@
FROM rust:1-bullseye as builder
#
# cargo-nextest
# We only pay the installation cost once,
# it will be cached from the second build onwards
#
FROM rust:1-bullseye AS builder
WORKDIR /app
ENV CARGO_TERM_COLOR always
# a next-generation test runner for Rust projects.
# TODO: more mount type cache?
RUN --mount=type=cache,target=/usr/local/cargo/registry \
cargo install cargo-nextest
# foundry is needed to run tests
ENV PATH /root/.foundry/bin:$PATH
RUN curl -L https://foundry.paradigm.xyz | bash && foundryup
WORKDIR /usr/src/web3_proxy
# copy the application
COPY . .
# test the application with cargo-nextest
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/src/web3_proxy/target \
cargo test &&\
cargo install --locked --no-default-features --root /opt/bin --path ./web3_proxy
--mount=type=cache,target=/app/target \
cargo nextest run
FROM debian:bullseye-slim
# build the application
# using a "release" profile (which install does) is **very** important
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/app/target \
cargo install --locked --no-default-features --profile faster_release --root /opt/bin --path ./web3_proxy
COPY --from=builder /opt/bin/* /usr/local/bin/
#
# We do not need the Rust toolchain to run the binary!
#
FROM debian:bullseye-slim AS runtime
# Create llama user to avoid running container with root
RUN mkdir /llama \
&& adduser --home /llama --shell /sbin/nologin --gecos '' --no-create-home --disabled-password --uid 1001 llama \
&& chown -R llama /llama
USER llama
ENTRYPOINT ["web3_proxy_cli"]
CMD [ "--config", "/web3-proxy.toml", "proxyd" ]
# TODO: lower log level when done with prototyping
ENV RUST_LOG "warn,web3_proxy=debug,web3_proxy_cli=debug"
COPY --from=builder /opt/bin/* /usr/local/bin/

174
Jenkinsfile vendored Normal file

@ -0,0 +1,174 @@
def buildAndPush() {
// env.BRANCH_NAME is set to the git branch name by default
// env.REGISTRY is the repository url for this pipeline
// env.GIT_SHORT is the git short hash of the currently checked out repo
// env.LATEST_BRANCH is the branch name that gets tagged latest
// env.ARCH is the system architecture. some apps can be generic (amd64, arm64),
// but apps that compile for specific hardware (like web3-proxy) will need more specific tags (amd64_epyc2, arm64_graviton2, intel_xeon3, etc.)
// TODO: check that this system actually matches the given arch
sh '''#!/bin/bash
set -eux -o pipefail
[ -n "$GIT_SHORT" ]
[ -n "$GIT_SHORT" ]
[ -n "$REGISTRY" ]
[ -n "$ARCH" ]
# deterministic mtime on .git keeps Dockerfiles that do 'ADD . .' or similar
# without this, the build process always thinks the directory has changes
git restore-mtime
touch -t "$(git show -s --date=format:'%Y%m%d%H%M.%S' --format=%cd HEAD)" .git
function buildAndPush {
image=$1
buildcache=$2
buildctl build \
--frontend=dockerfile.v0 \
--local context=. \
--local dockerfile=. \
--output "type=image,name=${image},push=true" \
--export-cache type=s3,region=us-east-2,bucket=llamarpc-buildctl-cache,name=${buildcache} \
--import-cache type=s3,region=us-east-2,bucket=llamarpc-buildctl-cache,name=${buildcache} \
;
}
BUILDCACHE="${REGISTRY}:buildcache_${ARCH}"
# build and push a docker image tagged with the short git commit
buildAndPush "${REGISTRY}:git_${GIT_SHORT}_${ARCH}" "${BUILDCACHE}"
# push an image tagged with the branch
# since buildAndPush just ran above, this should be very quick
# TODO: maybe replace slashes in the name with dashes or underscores
buildAndPush "${REGISTRY}:branch_${BRANCH_NAME}_${ARCH}" "${BUILDCACHE}"
if [ "${BRANCH_NAME}" = "${LATEST_BRANCH}" ]; then
buildAndPush "${REGISTRY}:latest_${ARCH}" "${BUILDCACHE}"
fi
'''
}
pipeline {
agent any
options {
ansiColor('xterm')
}
environment {
// AWS_ECR_URL needs to be set in jenkin's config.
// AWS_ECR_URL could really be any docker registry. we just use ECR so that we don't have to manage it
REGISTRY="${AWS_ECR_URL}/web3-proxy"
// branch that should get tagged with "latest_$arch" (stable, main, master, etc.)
LATEST_BRANCH="main"
// non-buildkit builds are officially deprecated
// buildkit is much faster and handles caching much better than the default build process.
DOCKER_BUILDKIT=1
GIT_SHORT="${GIT_COMMIT.substring(0,8)}"
}
stages {
stage('build and push') {
parallel {
stage('build and push amd64_epyc2 image') {
agent {
label 'amd64_epyc2'
}
environment {
ARCH="amd64_epyc2"
}
steps {
script {
buildAndPush()
}
}
}
stage('build and push amd64_epyc3 image') {
agent {
label 'amd64_epyc3'
}
environment {
ARCH="amd64_epyc3"
}
steps {
script {
buildAndPush()
}
}
}
stage('Build and push arm64_graviton1 image') {
agent {
label 'arm64_graviton1'
}
environment {
ARCH="arm64_graviton1"
}
steps {
script {
buildAndPush()
}
}
}
stage('Build and push arm64_graviton2 image') {
agent {
label 'arm64_graviton2'
}
environment {
ARCH="arm64_graviton2"
}
steps {
script {
buildAndPush()
}
}
}
stage('Build and push intel_xeon3 image') {
agent {
label 'intel_xeon3'
}
environment {
ARCH="intel_xeon3"
}
steps {
script {
buildAndPush()
}
}
}
}
}
stage('create (experimental) manifest') {
agent any
steps {
script {
sh '''#!/bin/bash
set -eux -o pipefail
[ -n "$BRANCH_NAME" ]
[ -n "$GIT_SHORT" ]
[ -n "$LATEST_BRANCH" ]
[ -n "$REGISTRY" ]
function manifest {
repo=$1
docker manifest create "${repo}" --amend "${repo}_arm64_graviton2" --amend "${repo}_amd64_epyc2" --amend "${repo}_intel_xeon3"
docker manifest push --purge "${repo}"
}
manifest "${REGISTRY}:git_${GIT_SHORT}"
manifest "${REGISTRY}:branch_${BRANCH_NAME}"
if [ "${BRANCH_NAME}" = "${LATEST_BRANCH}" ]; then
manifest "${REGISTRY}:latest"
fi
'''
}
}
}
}
}

@ -37,7 +37,7 @@ Options:
Start the server with the defaults (listen on `http://localhost:8544` and use `./config/development.toml` which uses the database and cache running under docker and proxies to a bunch of public nodes:
```
cargo run --release -- daemon
cargo run --release -- proxyd
```
## Common commands

21
TODO.md

@ -243,8 +243,8 @@ These are roughly in order of completition
- [x] cache the status page for a second
- [x] request accounting for websockets
- [x] database merge scripts
- [x] test that sets up a Web3Connection and asks "has_block" for old and new blocks
- [x] test that sets up Web3Connections with 2 nodes. one behind by several blocks. and see what the "next" server shows as
- [x] test that sets up a Web3Rpc and asks "has_block" for old and new blocks
- [x] test that sets up Web3Rpcs with 2 nodes. one behind by several blocks. and see what the "next" server shows as
- [x] ethspam on bsc and polygon gives 1/4 errors. fix whatever is causing this
- bugfix! we were using the whole connection list instead of just the synced connection list when picking servers. oops!
- [x] actually block unauthenticated requests instead of emitting warning of "allowing without auth during development!"
@ -289,7 +289,7 @@ These are not yet ordered. There might be duplicates. We might not actually need
- we were caching too aggressively
- [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message
- we just want to be sure that the server has our tx and in this case, it does.
- ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Connections { conns: {"local_erigon_alpha_archive_ws": Web3Connection { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Connection { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Connection { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None
- ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Rpcs { conns: {"local_erigon_alpha_archive_ws": Web3Rpc { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Rpc { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Rpc { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None
- [x] serde collect unknown fields in config instead of crash
- [x] upgrade user tier by address
- [x] all_backend_connections skips syncing servers
@ -324,6 +324,12 @@ These are not yet ordered. There might be duplicates. We might not actually need
- [x] improve waiting for sync when rate limited
- [x] improve pager duty errors for smarter deduping
- [x] add create_key cli command
- [x] short lived cache on /health
- [x] cache /status for longer
- [x] sort connections during eth_sendRawTransaction
- [x] block all admin_ rpc commands
- [x] remove the "metered" crate now that we save aggregate queries?
- [x] add archive depth to app config
- [-] proxy mode for benchmarking all backends
- [-] proxy mode for sending to multiple backends
- [-] let users choose a % of reverts to log (or maybe x/second). someone like curve logging all reverts will be a BIG database very quickly
@ -375,7 +381,6 @@ These are not yet ordered. There might be duplicates. We might not actually need
- [ ] cli commands to search users by key
- [ ] cli flag to set prometheus port
- [ ] flamegraphs show 25% of the time to be in moka-housekeeper. tune that
- [ ] remove the "metered" crate now that we save aggregate queries?
- [ ] remove/change the "active_requests" counter? maybe only once we have dynamic soft limits?
- [ ] refactor so configs can change while running
- this will probably be a rather large change, but is necessary when we have autoscaling
@ -551,10 +556,10 @@ in another repo: event subscriber
- [ ] weird flapping fork could have more useful logs. like, howd we get to 1/1/4 and fork. geth changed its mind 3 times?
- should we change our code to follow the same consensus rules as geth? our first seen still seems like a reasonable choice
- other chains might change all sorts of things about their fork choice rules
2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Rpc { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
- [ ] threshold should check actual available request limits (if any) instead of just the soft limit
- [ ] foreign key on_update and on_delete
- [ ] database creation timestamps

32
config/minimal.toml Normal file

@ -0,0 +1,32 @@
[app]
chain_id = 1
# no database
# no influxdb
# no redis
# no sentry
# no public limits means anon gets full access
# no thundering herd protection
min_sum_soft_limit = 1
min_synced_rpcs = 1
# 1GB of cache
response_cache_max_bytes = 1_000_000_000
[balanced_rpcs]
[balanced_rpcs.llama_public_wss]
# TODO: what should we do if all rpcs are disabled? warn and wait for a config change?
disabled = false
display_name = "LlamaNodes WSS"
url = "wss://eth.llamarpc.com/"
soft_limit = 1_000
tier = 0
[balanced_rpcs.llama_public_https]
disabled = false
display_name = "LlamaNodes HTTPS"
url = "https://eth.llamarpc.com/"
soft_limit = 1_000
tier = 0

@ -7,8 +7,8 @@ edition = "2021"
[dependencies]
redis-rate-limiter = { path = "../redis-rate-limiter" }
anyhow = "1.0.68"
anyhow = "1.0.69"
hashbrown = "0.13.2"
log = "0.4.17"
moka = { version = "0.9.6", default-features = false, features = ["future"] }
tokio = "1.24.2"
moka = { version = "0.9.7", default-features = false, features = ["future"] }
tokio = "1.25.0"

@ -10,8 +10,8 @@ path = "src/mod.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
sea-orm = "0.10.7"
sea-orm = "0.11.0"
serde = "1.0.152"
uuid = "1.2.2"
uuid = "1.3.0"
ethers = "1.0.2"
ulid = "1.0.0"

@ -9,10 +9,10 @@ name = "migration"
path = "src/lib.rs"
[dependencies]
tokio = { version = "1.24.2", features = ["full", "tracing"] }
tokio = { version = "1.25.0", features = ["full", "tracing"] }
[dependencies.sea-orm-migration]
version = "0.10.7"
version = "0.11.0"
features = [
# Enable at least one `ASYNC_RUNTIME` and `DATABASE_DRIVER` feature if you want to run migration via CLI.
# View the list of supported features at https://www.sea-ql.org/SeaORM/docs/install-and-config/database-and-async-runtime.

@ -5,6 +5,6 @@ authors = ["Bryan Stitt <bryan@stitthappens.com>"]
edition = "2021"
[dependencies]
anyhow = "1.0.68"
anyhow = "1.0.69"
deadpool-redis = { version = "0.11.1", features = ["rt_tokio_1", "serde"] }
tokio = "1.24.2"
tokio = "1.25.0"

@ -25,10 +25,10 @@ thread-fast-rng = { path = "../thread-fast-rng" }
# TODO: import chrono from sea-orm so we always have the same version
# TODO: make sure this time version matches siwe. PR to put this in their prelude
anyhow = { version = "1.0.68", features = ["backtrace"] }
anyhow = { version = "1.0.69", features = ["backtrace"] }
argh = "0.1.10"
axum = { version = "0.6.4", features = ["headers", "ws"] }
axum-client-ip = "0.3.1"
axum-client-ip = "0.4.0"
axum-macros = "0.3.2"
chrono = "0.4.23"
counter = "0.5.7"
@ -38,7 +38,7 @@ env_logger = "0.10.0"
ethers = { version = "1.0.2", default-features = false, features = ["rustls", "ws"] }
fdlimit = "0.2.1"
flume = "0.10.14"
futures = { version = "0.3.25", features = ["thread-pool"] }
futures = { version = "0.3.26", features = ["thread-pool"] }
gethostname = "0.4.1"
glob = "0.3.1"
handlebars = "4.3.6"
@ -48,28 +48,28 @@ http = "0.2.8"
ipnet = "2.7.1"
itertools = "0.10.5"
log = "0.4.17"
metered = { version = "0.9.0", features = ["serialize"] }
moka = { version = "0.9.6", default-features = false, features = ["future"] }
notify = "5.0.0"
moka = { version = "0.9.7", default-features = false, features = ["future"] }
notify = "5.1.0"
num = "0.4.0"
num-traits = "0.2.15"
pagerduty-rs = { version = "0.1.6", default-features = false, features = ["async", "rustls", "sync"] }
parking_lot = { version = "0.12.1", features = ["arc_lock"] }
prettytable = "*"
proctitle = "0.1.1"
regex = "1.7.1"
reqwest = { version = "0.11.14", default-features = false, features = ["json", "tokio-rustls"] }
rustc-hash = "1.1.0"
sentry = { version = "0.29.2", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] }
sentry = { version = "0.29.3", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] }
serde = { version = "1.0.152", features = [] }
serde_json = { version = "1.0.91", default-features = false, features = ["alloc", "raw_value"] }
serde_prometheus = "0.1.6"
serde_json = { version = "1.0.93", default-features = false, features = ["alloc", "raw_value"] }
serde_prometheus = "0.2.0"
siwe = "0.5.0"
time = "0.3.17"
tokio = { version = "1.24.2", features = ["full"] }
tokio = { version = "1.25.0", features = ["full"] }
tokio-stream = { version = "0.1.11", features = ["sync"] }
toml = "0.6.0"
toml = "0.7.2"
tower = "0.4.13"
tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] }
ulid = { version = "1.0.0", features = ["serde"] }
url = "2.3.1"
uuid = "1.2.2"
uuid = "1.3.0"

@ -1,32 +0,0 @@
use metered::{metered, HitCount, Throughput};
use serde::Serialize;
use thread_fast_rng::{rand::Rng, thread_fast_rng};
#[derive(Default, Debug, Serialize)]
pub struct Biz {
metrics: BizMetrics,
}
#[metered(registry = BizMetrics)]
impl Biz {
#[measure([HitCount, Throughput])]
pub fn biz(&self) {
let delay = std::time::Duration::from_millis(thread_fast_rng().gen::<u64>() % 200);
std::thread::sleep(delay);
}
}
fn main() {
let buz = Biz::default();
for _ in 0..100 {
buz.biz();
}
let mut globals = std::collections::HashMap::new();
globals.insert("service", "web3_proxy_prometheus_example");
let serialized = serde_prometheus::to_string(&buz.metrics, Some("example"), globals).unwrap();
println!("{}", serialized);
}

@ -11,9 +11,8 @@ use crate::jsonrpc::{
JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum,
};
use crate::rpcs::blockchain::{ArcBlock, SavedBlock};
use crate::rpcs::connection::Web3Connection;
use crate::rpcs::connections::Web3Connections;
use crate::rpcs::request::OpenRequestHandleMetrics;
use crate::rpcs::many::Web3Rpcs;
use crate::rpcs::one::Web3Rpc;
use crate::rpcs::transactions::TxStatus;
use crate::user_token::UserBearerToken;
use anyhow::Context;
@ -32,7 +31,6 @@ use futures::stream::{FuturesUnordered, StreamExt};
use hashbrown::{HashMap, HashSet};
use ipnet::IpNet;
use log::{debug, error, info, trace, warn, Level};
use metered::{metered, ErrorCount, HitCount, ResponseTime, Throughput};
use migration::sea_orm::{
self, ConnectionTrait, Database, DatabaseConnection, EntityTrait, PaginatorTrait,
};
@ -71,7 +69,9 @@ pub static REQUEST_PERIOD: u64 = 60;
#[derive(From)]
struct ResponseCacheKey {
// if none, this is cached until evicted
block: Option<SavedBlock>,
from_block: Option<SavedBlock>,
// to_block is only set when ranges of blocks are requested (like with eth_getLogs)
to_block: Option<SavedBlock>,
method: String,
// TODO: better type for this
params: Option<serde_json::Value>,
@ -96,7 +96,22 @@ impl PartialEq for ResponseCacheKey {
return false;
}
match (self.block.as_ref(), other.block.as_ref()) {
match (self.from_block.as_ref(), other.from_block.as_ref()) {
(None, None) => {}
(None, Some(_)) => {
return false;
}
(Some(_), None) => {
return false;
}
(Some(s), Some(o)) => {
if s != o {
return false;
}
}
}
match (self.to_block.as_ref(), other.to_block.as_ref()) {
(None, None) => {}
(None, Some(_)) => {
return false;
@ -123,7 +138,8 @@ impl Eq for ResponseCacheKey {}
impl Hash for ResponseCacheKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.block.as_ref().map(|x| x.hash()).hash(state);
self.from_block.as_ref().map(|x| x.hash()).hash(state);
self.to_block.as_ref().map(|x| x.hash()).hash(state);
self.method.hash(state);
self.params.as_ref().map(|x| x.to_string()).hash(state);
self.cache_errors.hash(state)
@ -182,9 +198,9 @@ impl DatabaseReplica {
// TODO: i'm sure this is more arcs than necessary, but spawning futures makes references hard
pub struct Web3ProxyApp {
/// Send requests to the best server available
pub balanced_rpcs: Arc<Web3Connections>,
pub balanced_rpcs: Arc<Web3Rpcs>,
/// Send private requests (like eth_sendRawTransaction) to all these servers
pub private_rpcs: Option<Arc<Web3Connections>>,
pub private_rpcs: Option<Arc<Web3Rpcs>>,
response_cache: ResponseCache,
// don't drop this or the sender will stop working
// TODO: broadcast channel instead?
@ -193,9 +209,6 @@ pub struct Web3ProxyApp {
pub config: AppConfig,
pub db_conn: Option<sea_orm::DatabaseConnection>,
pub db_replica: Option<DatabaseReplica>,
/// prometheus metrics
app_metrics: Arc<Web3ProxyAppMetrics>,
open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
/// store pending transactions that we've seen so that we don't send duplicates to subscribers
pub pending_transactions: Cache<TxHash, TxStatus, hashbrown::hash_map::DefaultHashBuilder>,
pub frontend_ip_rate_limiter: Option<DeferredRateLimiter<IpAddr>>,
@ -288,7 +301,7 @@ pub async fn migrate_db(
);
loop {
if Migrator::get_pending_migrations(&db_conn).await?.is_empty() {
if Migrator::get_pending_migrations(db_conn).await?.is_empty() {
info!("no migrations to apply");
return Ok(());
}
@ -314,10 +327,10 @@ pub async fn migrate_db(
break;
}
let migration_result = Migrator::up(&db_conn, None).await;
let migration_result = Migrator::up(db_conn, None).await;
// drop the distributed lock
drop_migration_lock(&db_conn).await?;
drop_migration_lock(db_conn).await?;
// return if migrations erred
migration_result
@ -347,7 +360,6 @@ pub struct Web3ProxyAppSpawn {
pub background_handles: FuturesUnordered<AnyhowJoinHandle<()>>,
}
#[metered(registry = Web3ProxyAppMetrics, registry_expr = self.app_metrics, visibility = pub)]
impl Web3ProxyApp {
/// The main entrypoint.
pub async fn spawn(
@ -377,10 +389,6 @@ impl Web3ProxyApp {
);
}
// setup metrics
let app_metrics = Default::default();
let open_request_handle_metrics: Arc<OpenRequestHandleMetrics> = Default::default();
let mut db_conn = None::<DatabaseConnection>;
let mut db_replica = None::<DatabaseReplica>;
@ -564,7 +572,7 @@ impl Web3ProxyApp {
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
// connect to the load balanced rpcs
let (balanced_rpcs, balanced_handle) = Web3Connections::spawn(
let (balanced_rpcs, balanced_handle) = Web3Rpcs::spawn(
top_config.app.chain_id,
db_conn.clone(),
balanced_rpcs,
@ -576,7 +584,6 @@ impl Web3ProxyApp {
top_config.app.min_synced_rpcs,
Some(pending_tx_sender.clone()),
pending_transactions.clone(),
open_request_handle_metrics.clone(),
)
.await
.context("spawning balanced rpcs")?;
@ -591,7 +598,7 @@ impl Web3ProxyApp {
warn!("No private relays configured. Any transactions will be broadcast to the public mempool!");
None
} else {
let (private_rpcs, private_handle) = Web3Connections::spawn(
let (private_rpcs, private_handle) = Web3Rpcs::spawn(
top_config.app.chain_id,
db_conn.clone(),
private_rpcs,
@ -607,7 +614,6 @@ impl Web3ProxyApp {
// TODO: subscribe to pending transactions on the private rpcs? they seem to have low rate limits
None,
pending_transactions.clone(),
open_request_handle_metrics.clone(),
)
.await
.context("spawning private_rpcs")?;
@ -663,14 +669,12 @@ impl Web3ProxyApp {
));
}
// keep 1GB of blocks in the cache
// responses can be very different in sizes, so this definitely needs a weigher
// TODO: max_capacity from config
// responses can be very different in sizes, so this is a cache with a max capacity and a weigher
// TODO: don't allow any response to be bigger than X% of the cache
let response_cache = Cache::builder()
.max_capacity(1024 * 1024 * 1024)
.max_capacity(top_config.app.response_cache_max_bytes)
.weigher(|k: &ResponseCacheKey, v| {
// TODO: is this good?
// TODO: is this good enough?
if let Ok(v) = serde_json::to_string(v) {
let weight = k.weight() + v.len();
@ -718,8 +722,6 @@ impl Web3ProxyApp {
db_conn,
db_replica,
vredis_pool,
app_metrics,
open_request_handle_metrics,
rpc_secret_key_cache,
bearer_token_semaphores,
ip_semaphores,
@ -893,9 +895,7 @@ impl Web3ProxyApp {
// "user_cache_size": app.rpc_secret_key_cache.weighted_size(),
#[derive(Serialize)]
struct CombinedMetrics<'a> {
app: &'a Web3ProxyAppMetrics,
backend_rpc: &'a OpenRequestHandleMetrics,
struct CombinedMetrics {
recent_ip_counts: RecentCounts,
recent_user_id_counts: RecentCounts,
recent_tx_counts: RecentCounts,
@ -903,14 +903,13 @@ impl Web3ProxyApp {
}
let metrics = CombinedMetrics {
app: &self.app_metrics,
backend_rpc: &self.open_request_handle_metrics,
recent_ip_counts,
recent_user_id_counts,
recent_tx_counts,
user_count,
};
// TODO: i don't like this library. it doesn't include HELP or TYPE lines and so our prometheus server fails to parse it
serde_prometheus::to_string(&metrics, Some("web3_proxy"), globals)
.expect("prometheus metrics should always serialize")
}
@ -921,8 +920,7 @@ impl Web3ProxyApp {
authorization: Arc<Authorization>,
request: JsonRpcRequestEnum,
proxy_mode: ProxyMode,
) -> Result<(JsonRpcForwardedResponseEnum, Vec<Arc<Web3Connection>>), FrontendErrorResponse>
{
) -> Result<(JsonRpcForwardedResponseEnum, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
// trace!(?request, "proxy_web3_rpc");
// even though we have timeouts on the requests to our backend providers,
@ -961,7 +959,7 @@ impl Web3ProxyApp {
authorization: &Arc<Authorization>,
requests: Vec<JsonRpcRequest>,
proxy_mode: ProxyMode,
) -> anyhow::Result<(Vec<JsonRpcForwardedResponse>, Vec<Arc<Web3Connection>>)> {
) -> Result<(Vec<JsonRpcForwardedResponse>, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
// TODO: we should probably change ethers-rs to support this directly. they pushed this off to v2 though
let num_requests = requests.len();
@ -978,7 +976,7 @@ impl Web3ProxyApp {
// TODO: i'm sure this could be done better with iterators
// TODO: stream the response?
let mut collected: Vec<JsonRpcForwardedResponse> = Vec::with_capacity(num_requests);
let mut collected_rpcs: HashSet<Arc<Web3Connection>> = HashSet::new();
let mut collected_rpcs: HashSet<Arc<Web3Rpc>> = HashSet::new();
for response in responses {
// TODO: any way to attach the tried rpcs to the error? it is likely helpful
let (response, rpcs) = response?;
@ -1013,13 +1011,13 @@ impl Web3ProxyApp {
}
}
#[measure([ErrorCount, HitCount, ResponseTime, Throughput])]
// #[measure([ErrorCount, HitCount, ResponseTime, Throughput])]
async fn proxy_cached_request(
self: &Arc<Self>,
authorization: &Arc<Authorization>,
mut request: JsonRpcRequest,
proxy_mode: ProxyMode,
) -> anyhow::Result<(JsonRpcForwardedResponse, Vec<Arc<Web3Connection>>)> {
) -> Result<(JsonRpcForwardedResponse, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
// trace!("Received request: {:?}", request);
let request_metadata = Arc::new(RequestMetadata::new(REQUEST_PERIOD, request.num_bytes())?);
@ -1033,13 +1031,7 @@ impl Web3ProxyApp {
// TODO: don't clone?
let partial_response: serde_json::Value = match request_method.as_ref() {
// lots of commands are blocked
method @ ("admin_addPeer"
| "admin_datadir"
| "admin_startRPC"
| "admin_startWS"
| "admin_stopRPC"
| "admin_stopWS"
| "db_getHex"
method @ ("db_getHex"
| "db_getString"
| "db_putHex"
| "db_putString"
@ -1114,6 +1106,7 @@ impl Web3ProxyApp {
| "eth_newBlockFilter"
| "eth_newFilter"
| "eth_newPendingTransactionFilter"
| "eth_pollSubscriptions"
| "eth_uninstallFilter") => {
// TODO: unsupported command stat
// TODO: what error code?
@ -1138,9 +1131,10 @@ impl Web3ProxyApp {
}
None => {
// TODO: what does geth do if this happens?
return Err(anyhow::anyhow!(
"no servers synced. unknown eth_blockNumber"
));
// TODO: i think we want a 502 so that haproxy retries on another server
return Err(
anyhow::anyhow!("no servers synced. unknown eth_blockNumber").into(),
);
}
}
}
@ -1211,7 +1205,7 @@ impl Web3ProxyApp {
ProxyMode::Fastest(0) => None,
// TODO: how many balanced rpcs should we send to? configurable? percentage of total?
// TODO: what if we do 2 per tier? we want to blast the third party rpcs
// TODO: maybe having the third party rpcs in their own Web3Connections would be good for this
// TODO: maybe having the third party rpcs in their own Web3Rpcs would be good for this
ProxyMode::Fastest(x) => Some(x * 4),
ProxyMode::Versus => None,
};
@ -1221,6 +1215,7 @@ impl Web3ProxyApp {
// if we are sending the transaction privately, no matter the proxy_mode, we send to ALL private rpcs
(private_rpcs, None)
} else {
// TODO: send to balanced_rpcs AND private_rpcs
(&self.balanced_rpcs, default_num)
}
} else {
@ -1236,6 +1231,7 @@ impl Web3ProxyApp {
None,
Level::Trace,
num,
true,
)
.await?;
@ -1376,12 +1372,17 @@ impl Web3ProxyApp {
));
}
// TODO: don't return with ? here. send a jsonrpc invalid request
let param = Bytes::from_str(
params[0]
.as_str()
.context("parsing params 0 into str then bytes")?,
)?;
)
.map_err(|x| {
trace!("bad request: {:?}", x);
FrontendErrorResponse::BadRequest(
"param 0 could not be read as H256".to_string(),
)
})?;
let hash = H256::from(keccak256(param));
@ -1413,6 +1414,11 @@ impl Web3ProxyApp {
}
// anything else gets sent to backend rpcs and cached
method => {
if method.starts_with("admin_") {
// TODO: emit a stat? will probably just be noise
return Err(FrontendErrorResponse::AccessDenied);
}
// emit stats
// TODO: if no servers synced, wait for them to be synced? probably better to error and let haproxy retry another server
@ -1434,7 +1440,8 @@ impl Web3ProxyApp {
.await?
{
BlockNeeded::CacheSuccessForever => Some(ResponseCacheKey {
block: None,
from_block: None,
to_block: None,
method: method.to_string(),
params: request.params.clone(),
cache_errors: false,
@ -1444,12 +1451,12 @@ impl Web3ProxyApp {
block_num,
cache_errors,
} => {
let (request_block_hash, archive_needed) = self
let (request_block_hash, block_depth) = self
.balanced_rpcs
.block_hash(authorization, &block_num)
.await?;
if archive_needed {
if block_depth < self.config.archive_depth {
request_metadata
.archive_request
.store(true, atomic::Ordering::Relaxed);
@ -1461,7 +1468,48 @@ impl Web3ProxyApp {
.await?;
Some(ResponseCacheKey {
block: Some(SavedBlock::new(request_block)),
from_block: Some(SavedBlock::new(request_block)),
to_block: None,
method: method.to_string(),
// TODO: hash here?
params: request.params.clone(),
cache_errors,
})
}
BlockNeeded::CacheRange {
from_block_num,
to_block_num,
cache_errors,
} => {
let (from_block_hash, block_depth) = self
.balanced_rpcs
.block_hash(authorization, &from_block_num)
.await?;
if block_depth < self.config.archive_depth {
request_metadata
.archive_request
.store(true, atomic::Ordering::Relaxed);
}
let from_block = self
.balanced_rpcs
.block(authorization, &from_block_hash, None)
.await?;
let (to_block_hash, _) = self
.balanced_rpcs
.block_hash(authorization, &to_block_num)
.await?;
let to_block = self
.balanced_rpcs
.block(authorization, &to_block_hash, None)
.await?;
Some(ResponseCacheKey {
from_block: Some(SavedBlock::new(from_block)),
to_block: Some(SavedBlock::new(to_block)),
method: method.to_string(),
// TODO: hash here?
params: request.params.clone(),
@ -1476,14 +1524,11 @@ impl Web3ProxyApp {
let authorization = authorization.clone();
if let Some(cache_key) = cache_key {
let request_block_number = cache_key.block.as_ref().map(|x| x.number());
let from_block_num = cache_key.from_block.as_ref().map(|x| x.number());
self.response_cache
.try_get_with(cache_key, async move {
// TODO: retry some failures automatically!
// TODO: try private_rpcs if all the balanced_rpcs fail!
// TODO: put the hash here instead of the block number? its in the request already.
let mut response = self
.balanced_rpcs
.try_proxy_connection(
@ -1491,7 +1536,7 @@ impl Web3ProxyApp {
&authorization,
request,
Some(&request_metadata),
request_block_number.as_ref(),
from_block_num.as_ref(),
)
.await?;
@ -1499,6 +1544,8 @@ impl Web3ProxyApp {
response.id = Default::default();
// TODO: only cache the inner response
// TODO: how are we going to stream this?
// TODO: check response size. if its very large, return it in a custom Error type that bypasses caching
Ok::<_, anyhow::Error>(response)
})
.await

@ -155,7 +155,7 @@ mod tests {
use std::env;
use web3_proxy::{
config::{AppConfig, Web3ConnectionConfig},
config::{AppConfig, Web3RpcConfig},
rpcs::blockchain::ArcBlock,
};
@ -196,7 +196,7 @@ mod tests {
min_sum_soft_limit: 1,
min_synced_rpcs: 1,
public_requests_per_period: Some(1_000_000),
response_cache_max_bytes: 10_usize.pow(7),
response_cache_max_bytes: 10_u64.pow(7),
redirect_public_url: Some("example.com/".to_string()),
redirect_rpc_key_url: Some("example.com/{{rpc_key_id}}".to_string()),
..Default::default()
@ -204,7 +204,7 @@ mod tests {
balanced_rpcs: HashMap::from([
(
"anvil".to_string(),
Web3ConnectionConfig {
Web3RpcConfig {
disabled: false,
display_name: None,
url: anvil.endpoint(),
@ -219,7 +219,7 @@ mod tests {
),
(
"anvil_ws".to_string(),
Web3ConnectionConfig {
Web3RpcConfig {
disabled: false,
display_name: None,
url: anvil.ws_endpoint(),

@ -11,6 +11,7 @@ mod daemon;
mod drop_migration_lock;
mod list_user_tier;
mod pagerduty;
mod popularity_contest;
mod rpc_accounting;
mod sentryd;
mod transfer_key;
@ -80,6 +81,7 @@ enum SubCommand {
CreateUser(create_user::CreateUserSubCommand),
DropMigrationLock(drop_migration_lock::DropMigrationLockSubCommand),
Pagerduty(pagerduty::PagerdutySubCommand),
PopularityContest(popularity_contest::PopularityContestSubCommand),
Proxyd(daemon::ProxydSubCommand),
RpcAccounting(rpc_accounting::RpcAccountingSubCommand),
Sentryd(sentryd::SentrydSubCommand),
@ -372,6 +374,7 @@ fn main() -> anyhow::Result<()> {
x.main(pagerduty_async, top_config).await
}
SubCommand::PopularityContest(x) => x.main().await,
SubCommand::Sentryd(x) => {
if cli_config.sentry_url.is_none() {
warn!("sentry_url is not set! Logs will only show in this console");

@ -0,0 +1,135 @@
use std::collections::BTreeMap;
// show what nodes are used most often
use argh::FromArgs;
use log::trace;
use prettytable::{row, Table};
#[derive(FromArgs, PartialEq, Debug)]
/// Second subcommand.
#[argh(subcommand, name = "popularity_contest")]
pub struct PopularityContestSubCommand {
#[argh(positional)]
/// the web3-proxy url
/// TODO: query multiple and add them together
rpc: String,
}
#[derive(Debug)]
struct BackendRpcData<'a> {
name: &'a str,
// tier: u64,
// backup: bool,
// block_data_limit: u64,
requests: u64,
}
impl PopularityContestSubCommand {
pub async fn main(self) -> anyhow::Result<()> {
let x: serde_json::Value = reqwest::get(format!("{}/status", self.rpc))
.await?
.json()
.await?;
let conns = x
.as_object()
.unwrap()
.get("balanced_rpcs")
.unwrap()
.as_object()
.unwrap()
.get("conns")
.unwrap()
.as_array()
.unwrap();
let mut by_tier = BTreeMap::<u64, Vec<_>>::new();
let mut tier_requests = BTreeMap::<u64, u64>::new();
let mut total_requests = 0;
for conn in conns {
let conn = conn.as_object().unwrap();
let name = conn
.get("display_name")
.unwrap_or_else(|| conn.get("name").unwrap())
.as_str()
.unwrap();
if name.ends_with("http") {
continue;
}
let tier = conn.get("tier").unwrap().as_u64().unwrap();
// let backup = conn.get("backup").unwrap().as_bool().unwrap();
// let block_data_limit = conn
// .get("block_data_limit")
// .unwrap()
// .as_u64()
// .unwrap_or(u64::MAX);
let requests = conn.get("total_requests").unwrap().as_u64().unwrap();
let rpc_data = BackendRpcData {
name,
// tier,
// backup,
// block_data_limit,
requests,
};
total_requests += rpc_data.requests;
*tier_requests.entry(tier).or_default() += rpc_data.requests;
by_tier.entry(tier).or_default().push(rpc_data);
}
trace!("tier_requests: {:#?}", tier_requests);
trace!("by_tier: {:#?}", by_tier);
let mut table = Table::new();
table.add_row(row![
"name",
"tier",
"rpc_requests",
"tier_request_pct",
"total_pct"
]);
let total_requests = total_requests as f32;
for (tier, rpcs) in by_tier.iter() {
let t = (*tier_requests.get(tier).unwrap()) as f32;
for rpc in rpcs.iter() {
let tier_request_pct = if t == 0.0 {
0.0
} else {
(rpc.requests as f32) / t * 100.0
};
let total_request_pct = if total_requests == 0.0 {
0.0
} else {
(rpc.requests as f32) / total_requests * 100.0
};
table.add_row(row![
rpc.name,
tier,
rpc.requests,
tier_request_pct,
total_request_pct
]);
}
}
table.printstd();
Ok(())
}
}

@ -4,38 +4,36 @@ use ethers::{
prelude::{BlockNumber, U64},
types::H256,
};
use log::{trace, warn};
use log::warn;
use serde_json::json;
use std::sync::Arc;
use crate::{frontend::authorization::Authorization, rpcs::connections::Web3Connections};
use crate::{frontend::authorization::Authorization, rpcs::many::Web3Rpcs};
#[allow(non_snake_case)]
pub fn block_num_to_U64(block_num: BlockNumber, latest_block: U64) -> U64 {
pub fn block_num_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bool) {
match block_num {
BlockNumber::Earliest => {
// modified is false because we want the backend to see "pending"
U64::zero()
}
BlockNumber::Earliest => (U64::zero(), false),
BlockNumber::Finalized => {
warn!("finalized block requested! not yet implemented!");
latest_block - 10
(latest_block - 10, false)
}
BlockNumber::Latest => {
// change "latest" to a number
latest_block
(latest_block, true)
}
BlockNumber::Number(x) => {
// we already have a number
x
(x, false)
}
BlockNumber::Pending => {
// modified is false because we want the backend to see "pending"
// TODO: think more about how to handle Pending
latest_block
(latest_block, false)
}
BlockNumber::Safe => {
warn!("finalized block requested! not yet implemented!");
latest_block - 3
(latest_block - 3, false)
}
}
}
@ -47,7 +45,7 @@ pub async fn clean_block_number(
params: &mut serde_json::Value,
block_param_id: usize,
latest_block: U64,
rpcs: &Web3Connections,
rpcs: &Web3Rpcs,
) -> anyhow::Result<U64> {
match params.as_array_mut() {
None => {
@ -58,7 +56,7 @@ pub async fn clean_block_number(
None => {
if params.len() == block_param_id {
// add the latest block number to the end of the params
params.push(serde_json::to_value(latest_block)?);
params.push(json!(latest_block));
} else {
// don't modify the request. only cache with current block
// TODO: more useful log that include the
@ -69,37 +67,41 @@ pub async fn clean_block_number(
Ok(latest_block)
}
Some(x) => {
let start = x.clone();
// convert the json value to a BlockNumber
let block_num = if let Some(obj) = x.as_object_mut() {
let (block_num, change) = if let Some(obj) = x.as_object_mut() {
// it might be a Map like `{"blockHash": String("0xa5626dc20d3a0a209b1de85521717a3e859698de8ce98bca1b16822b7501f74b")}`
if let Some(block_hash) = obj.remove("blockHash") {
let block_hash: H256 =
serde_json::from_value(block_hash).context("decoding blockHash")?;
let block = rpcs.block(authorization, &block_hash, None).await?;
let block = rpcs
.block(authorization, &block_hash, None)
.await
.context("fetching block number from hash")?;
block
.number
.expect("blocks here should always have numbers")
// TODO: set change to true? i think not we should probably use hashes for everything.
(
block
.number
.expect("blocks here should always have numbers"),
false,
)
} else {
return Err(anyhow::anyhow!("blockHash missing"));
}
} else {
// it might be a string like "latest" or a block number
// TODO: "BlockNumber" needs a better name
let block_number = serde_json::from_value::<BlockNumber>(x.take())?;
// TODO: use take instead of clone
let block_number = serde_json::from_value::<BlockNumber>(x.clone())
.context("checking params for BlockNumber")?;
block_num_to_U64(block_number, latest_block)
};
// if we changed "latest" to a number, update the params to match
*x = serde_json::to_value(block_num)?;
// TODO: only do this if trace logging is enabled
if x.as_u64() != start.as_u64() {
trace!("changed {} to {}", start, x);
if change {
*x = json!(block_num);
}
Ok(block_num)
@ -112,7 +114,15 @@ pub async fn clean_block_number(
pub enum BlockNeeded {
CacheSuccessForever,
CacheNever,
Cache { block_num: U64, cache_errors: bool },
Cache {
block_num: U64,
cache_errors: bool,
},
CacheRange {
from_block_num: U64,
to_block_num: U64,
cache_errors: bool,
},
}
pub async fn block_needed(
@ -120,21 +130,22 @@ pub async fn block_needed(
method: &str,
params: Option<&mut serde_json::Value>,
head_block_num: U64,
rpcs: &Web3Connections,
rpcs: &Web3Rpcs,
) -> anyhow::Result<BlockNeeded> {
// if no params, no block is needed
let params = if let Some(params) = params {
// grab the params so we can inspect and potentially modify them
params
} else {
// if no params, no block is needed
// TODO: check all the methods with no params, some might not be cacheable
// caching for one block should always be okay
// caching with the head block /should/ always be okay
return Ok(BlockNeeded::Cache {
block_num: head_block_num,
cache_errors: true,
});
};
// get the index for the BlockNumber or return None to say no block is needed.
// get the index for the BlockNumber
// The BlockNumber is usually the last element.
// TODO: double check these. i think some of the getBlock stuff will never need archive
let block_param_id = match method {
@ -168,39 +179,44 @@ pub async fn block_needed(
.as_object_mut()
.ok_or_else(|| anyhow::anyhow!("invalid format"))?;
if let Some(x) = obj.get_mut("fromBlock") {
let block_num: BlockNumber = serde_json::from_value(x.take())?;
let block_num = block_num_to_U64(block_num, head_block_num);
*x = json!(block_num);
// TODO: maybe don't return. instead check toBlock too?
// TODO: if there is a very wide fromBlock and toBlock, we need to check that our rpcs have both!
return Ok(BlockNeeded::Cache {
block_num,
cache_errors: false,
});
}
if let Some(x) = obj.get_mut("toBlock") {
let block_num: BlockNumber = serde_json::from_value(x.take())?;
let block_num = block_num_to_U64(block_num, head_block_num);
*x = json!(block_num);
return Ok(BlockNeeded::Cache {
block_num,
cache_errors: false,
});
}
if obj.contains_key("blockHash") {
1
} else {
return Ok(BlockNeeded::Cache {
block_num: head_block_num,
let from_block_num = if let Some(x) = obj.get_mut("fromBlock") {
// TODO: use .take instead of clone
let block_num: BlockNumber = serde_json::from_value(x.clone())?;
let (block_num, change) = block_num_to_U64(block_num, head_block_num);
if change {
*x = json!(block_num);
}
block_num
} else {
let (block_num, _) = block_num_to_U64(BlockNumber::Earliest, head_block_num);
block_num
};
let to_block_num = if let Some(x) = obj.get_mut("toBlock") {
// TODO: use .take instead of clone
let block_num: BlockNumber = serde_json::from_value(x.clone())?;
let (block_num, change) = block_num_to_U64(block_num, head_block_num);
if change {
*x = json!(block_num);
}
block_num
} else {
head_block_num
};
return Ok(BlockNeeded::CacheRange {
from_block_num: from_block_num,
to_block_num: to_block_num,
cache_errors: true,
});
}

@ -1,6 +1,5 @@
use crate::rpcs::blockchain::BlockHashesCache;
use crate::rpcs::connection::Web3Connection;
use crate::rpcs::request::OpenRequestHandleMetrics;
use crate::rpcs::one::Web3Rpc;
use crate::{app::AnyhowJoinHandle, rpcs::blockchain::ArcBlock};
use argh::FromArgs;
use ethers::prelude::TxHash;
@ -12,8 +11,8 @@ use serde::Deserialize;
use std::sync::Arc;
use tokio::sync::broadcast;
pub type BlockAndRpc = (Option<ArcBlock>, Arc<Web3Connection>);
pub type TxHashAndRpc = (TxHash, Arc<Web3Connection>);
pub type BlockAndRpc = (Option<ArcBlock>, Arc<Web3Rpc>);
pub type TxHashAndRpc = (TxHash, Arc<Web3Rpc>);
#[derive(Debug, FromArgs)]
/// Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or similar) JsonRPC servers.
@ -42,15 +41,15 @@ pub struct CliConfig {
#[derive(Clone, Debug, Deserialize)]
pub struct TopConfig {
pub app: AppConfig,
pub balanced_rpcs: HashMap<String, Web3ConnectionConfig>,
pub balanced_rpcs: HashMap<String, Web3RpcConfig>,
// TODO: instead of an option, give it a default
pub private_rpcs: Option<HashMap<String, Web3ConnectionConfig>>,
pub private_rpcs: Option<HashMap<String, Web3RpcConfig>>,
/// unknown config options get put here
#[serde(flatten, default = "HashMap::default")]
pub extra: HashMap<String, serde_json::Value>,
}
/// shared configuration between Web3Connections
/// shared configuration between Web3Rpcs
// TODO: no String, only &str
#[derive(Clone, Debug, Default, Deserialize)]
pub struct AppConfig {
@ -59,6 +58,10 @@ pub struct AppConfig {
#[serde(default = "default_allowed_origin_requests_per_period")]
pub allowed_origin_requests_per_period: HashMap<String, u64>,
/// erigon defaults to pruning beyond 90,000 blocks
#[serde(default = "default_archive_depth")]
pub archive_depth: u64,
/// EVM chain id. 1 for ETH
/// TODO: better type for chain_id? max of `u64::MAX / 2 - 36` <https://github.com/ethereum/EIPs/issues/2294>
pub chain_id: u64,
@ -135,7 +138,7 @@ pub struct AppConfig {
/// RPC responses are cached locally
#[serde(default = "default_response_cache_max_bytes")]
pub response_cache_max_bytes: usize,
pub response_cache_max_bytes: u64,
/// the stats page url for an anonymous user.
pub redirect_public_url: Option<String>,
@ -159,6 +162,10 @@ pub struct AppConfig {
pub extra: HashMap<String, serde_json::Value>,
}
fn default_archive_depth() -> u64 {
90_000
}
fn default_allowed_origin_requests_per_period() -> HashMap<String, u64> {
HashMap::new()
}
@ -183,15 +190,15 @@ fn default_login_rate_limit_per_period() -> u64 {
10
}
fn default_response_cache_max_bytes() -> usize {
fn default_response_cache_max_bytes() -> u64 {
// TODO: default to some percentage of the system?
// 100 megabytes
10_usize.pow(8)
10u64.pow(8)
}
/// Configuration for a backend web3 RPC server
#[derive(Clone, Debug, Deserialize)]
pub struct Web3ConnectionConfig {
pub struct Web3RpcConfig {
/// simple way to disable a connection without deleting the row
#[serde(default)]
pub disabled: bool,
@ -223,9 +230,9 @@ fn default_tier() -> u64 {
0
}
impl Web3ConnectionConfig {
/// Create a Web3Connection from config
/// TODO: move this into Web3Connection? (just need to make things pub(crate))
impl Web3RpcConfig {
/// Create a Web3Rpc from config
/// TODO: move this into Web3Rpc? (just need to make things pub(crate))
#[allow(clippy::too_many_arguments)]
pub async fn spawn(
self,
@ -238,13 +245,9 @@ impl Web3ConnectionConfig {
block_map: BlockHashesCache,
block_sender: Option<flume::Sender<BlockAndRpc>>,
tx_id_sender: Option<flume::Sender<TxHashAndRpc>>,
open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
) -> anyhow::Result<(Arc<Web3Connection>, AnyhowJoinHandle<()>)> {
) -> anyhow::Result<(Arc<Web3Rpc>, AnyhowJoinHandle<()>)> {
if !self.extra.is_empty() {
warn!(
"unknown Web3ConnectionConfig fields!: {:?}",
self.extra.keys()
);
warn!("unknown Web3RpcConfig fields!: {:?}", self.extra.keys());
}
let hard_limit = match (self.hard_limit, redis_pool) {
@ -266,7 +269,7 @@ impl Web3ConnectionConfig {
let backup = self.backup.unwrap_or(false);
Web3Connection::spawn(
Web3Rpc::spawn(
name,
self.display_name,
chain_id,
@ -283,7 +286,6 @@ impl Web3ConnectionConfig {
tx_id_sender,
true,
self.tier,
open_request_handle_metrics,
)
.await
}

@ -2,7 +2,7 @@
use super::errors::FrontendErrorResponse;
use crate::app::{AuthorizationChecks, Web3ProxyApp, APP_USER_AGENT};
use crate::rpcs::connection::Web3Connection;
use crate::rpcs::one::Web3Rpc;
use crate::user_token::UserBearerToken;
use anyhow::Context;
use axum::headers::authorization::Bearer;
@ -80,7 +80,7 @@ pub struct RequestMetadata {
// TODO: "archive" isn't really a boolean.
pub archive_request: AtomicBool,
/// if this is empty, there was a cache_hit
pub backend_requests: Mutex<Vec<Arc<Web3Connection>>>,
pub backend_requests: Mutex<Vec<Arc<Web3Rpc>>>,
pub no_servers: AtomicU64,
pub error_response: AtomicBool,
pub response_bytes: AtomicU64,

@ -11,7 +11,7 @@ use axum::{
use derive_more::From;
use http::header::InvalidHeaderValue;
use ipnet::AddrParseError;
use log::{trace, warn};
use log::{debug, trace, warn};
use migration::sea_orm::DbErr;
use redis_rate_limiter::redis::RedisError;
use reqwest::header::ToStrError;
@ -25,6 +25,7 @@ pub type FrontendResult = Result<Response, FrontendErrorResponse>;
pub enum FrontendErrorResponse {
AccessDenied,
Anyhow(anyhow::Error),
BadRequest(String),
SemaphoreAcquireError(AcquireError),
Database(DbErr),
HeadersError(headers::Error),
@ -71,18 +72,17 @@ impl FrontendErrorResponse {
),
)
}
// Self::(err) => {
// warn!("boxed err={:?}", err);
// (
// StatusCode::INTERNAL_SERVER_ERROR,
// JsonRpcForwardedResponse::from_str(
// // TODO: make this better. maybe include the error type?
// "boxed error!",
// Some(StatusCode::INTERNAL_SERVER_ERROR.as_u16().into()),
// None,
// ),
// )
// }
Self::BadRequest(err) => {
debug!("BAD_REQUEST: {}", err);
(
StatusCode::BAD_REQUEST,
JsonRpcForwardedResponse::from_str(
&format!("bad request: {}", err),
Some(StatusCode::BAD_REQUEST.as_u16().into()),
None,
),
)
}
Self::Database(err) => {
warn!("database err={:?}", err);
(

@ -1,4 +1,6 @@
//! `frontend` contains HTTP and websocket endpoints for use by users and admins.
//!
//! Important reading about axum extractors: https://docs.rs/axum/latest/axum/extract/index.html#the-order-of-extractors
pub mod admin;
pub mod authorization;
@ -31,6 +33,7 @@ pub enum FrontendResponseCaches {
// TODO: what should this cache's value be?
pub type FrontendResponseCache =
Cache<FrontendResponseCaches, Arc<serde_json::Value>, hashbrown::hash_map::DefaultHashBuilder>;
pub type FrontendHealthCache = Cache<(), bool, hashbrown::hash_map::DefaultHashBuilder>;
/// Start the frontend server.
pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()> {
@ -38,7 +41,11 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
// TODO: a moka cache is probably way overkill for this.
// no need for max items. only expire because of time to live
let response_cache: FrontendResponseCache = Cache::builder()
.time_to_live(Duration::from_secs(1))
.time_to_live(Duration::from_secs(2))
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
let health_cache: FrontendHealthCache = Cache::builder()
.time_to_live(Duration::from_millis(100))
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
// TODO: read config for if fastest/versus should be available publicly. default off
@ -182,6 +189,7 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
.layer(Extension(proxy_app.clone()))
// frontend caches
.layer(Extension(response_cache))
.layer(Extension(health_cache))
// 404 for any unknown routes
.fallback(errors::handler_404);
@ -199,7 +207,6 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
- axum::extract::ConnectInfo (if not behind proxy)
*/
let service = app.into_make_service_with_connect_info::<SocketAddr>();
// let service = app.into_make_service();
// `axum::Server` is a re-export of `hyper::Server`
axum::Server::bind(&addr)

@ -8,7 +8,7 @@ use axum::extract::Path;
use axum::headers::{Origin, Referer, UserAgent};
use axum::TypedHeader;
use axum::{response::IntoResponse, Extension, Json};
use axum_client_ip::ClientIp;
use axum_client_ip::InsecureClientIp;
use axum_macros::debug_handler;
use itertools::Itertools;
use std::sync::Arc;
@ -19,7 +19,7 @@ use std::sync::Arc;
#[debug_handler]
pub async fn proxy_web3_rpc(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
Json(payload): Json<JsonRpcRequestEnum>,
) -> FrontendResult {
@ -29,7 +29,7 @@ pub async fn proxy_web3_rpc(
#[debug_handler]
pub async fn fastest_proxy_web3_rpc(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
Json(payload): Json<JsonRpcRequestEnum>,
) -> FrontendResult {
@ -41,7 +41,7 @@ pub async fn fastest_proxy_web3_rpc(
#[debug_handler]
pub async fn versus_proxy_web3_rpc(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
Json(payload): Json<JsonRpcRequestEnum>,
) -> FrontendResult {
@ -50,7 +50,7 @@ pub async fn versus_proxy_web3_rpc(
async fn _proxy_web3_rpc(
app: Arc<Web3ProxyApp>,
ClientIp(ip): ClientIp,
InsecureClientIp(ip): InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
payload: JsonRpcRequestEnum,
proxy_mode: ProxyMode,
@ -91,7 +91,7 @@ async fn _proxy_web3_rpc(
#[debug_handler]
pub async fn proxy_web3_rpc_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>,
user_agent: Option<TypedHeader<UserAgent>>,
@ -114,7 +114,7 @@ pub async fn proxy_web3_rpc_with_key(
#[debug_handler]
pub async fn fastest_proxy_web3_rpc_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>,
user_agent: Option<TypedHeader<UserAgent>>,
@ -137,7 +137,7 @@ pub async fn fastest_proxy_web3_rpc_with_key(
#[debug_handler]
pub async fn versus_proxy_web3_rpc_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>,
user_agent: Option<TypedHeader<UserAgent>>,
@ -160,7 +160,7 @@ pub async fn versus_proxy_web3_rpc_with_key(
#[allow(clippy::too_many_arguments)]
async fn _proxy_web3_rpc_with_key(
app: Arc<Web3ProxyApp>,
ClientIp(ip): ClientIp,
InsecureClientIp(ip): InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>,
user_agent: Option<TypedHeader<UserAgent>>,

@ -17,7 +17,7 @@ use axum::{
response::{IntoResponse, Redirect},
Extension, TypedHeader,
};
use axum_client_ip::ClientIp;
use axum_client_ip::InsecureClientIp;
use axum_macros::debug_handler;
use futures::SinkExt;
use futures::{
@ -49,7 +49,7 @@ pub enum ProxyMode {
#[debug_handler]
pub async fn websocket_handler(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult {
@ -61,7 +61,7 @@ pub async fn websocket_handler(
#[debug_handler]
pub async fn fastest_websocket_handler(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult {
@ -75,7 +75,7 @@ pub async fn fastest_websocket_handler(
#[debug_handler]
pub async fn versus_websocket_handler(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult {
@ -86,7 +86,7 @@ pub async fn versus_websocket_handler(
async fn _websocket_handler(
proxy_mode: ProxyMode,
app: Arc<Web3ProxyApp>,
ClientIp(ip): ClientIp,
InsecureClientIp(ip): InsecureClientIp,
origin: Option<TypedHeader<Origin>>,
ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult {
@ -121,7 +121,7 @@ async fn _websocket_handler(
#[debug_handler]
pub async fn websocket_handler_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
Path(rpc_key): Path<String>,
origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>,
@ -144,7 +144,7 @@ pub async fn websocket_handler_with_key(
#[debug_handler]
pub async fn fastest_websocket_handler_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
Path(rpc_key): Path<String>,
origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>,
@ -168,7 +168,7 @@ pub async fn fastest_websocket_handler_with_key(
#[debug_handler]
pub async fn versus_websocket_handler_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp,
ip: InsecureClientIp,
Path(rpc_key): Path<String>,
origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>,
@ -192,7 +192,7 @@ pub async fn versus_websocket_handler_with_key(
async fn _websocket_handler_with_key(
proxy_mode: ProxyMode,
app: Arc<Web3ProxyApp>,
ClientIp(ip): ClientIp,
InsecureClientIp(ip): InsecureClientIp,
rpc_key: String,
origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>,

@ -3,7 +3,7 @@
//! For ease of development, users can currently access these endponts.
//! They will eventually move to another port.
use super::{FrontendResponseCache, FrontendResponseCaches};
use super::{FrontendHealthCache, FrontendResponseCache, FrontendResponseCaches};
use crate::app::{Web3ProxyApp, APP_USER_AGENT};
use axum::{http::StatusCode, response::IntoResponse, Extension, Json};
use axum_macros::debug_handler;
@ -12,9 +12,15 @@ use std::sync::Arc;
/// Health check page for load balancers to use.
#[debug_handler]
pub async fn health(Extension(app): Extension<Arc<Web3ProxyApp>>) -> impl IntoResponse {
// TODO: add a check that we aren't shutting down
if app.balanced_rpcs.synced() {
pub async fn health(
Extension(app): Extension<Arc<Web3ProxyApp>>,
Extension(health_cache): Extension<FrontendHealthCache>,
) -> impl IntoResponse {
let synced = health_cache
.get_with((), async { app.balanced_rpcs.synced() })
.await;
if synced {
(StatusCode::OK, "OK")
} else {
(StatusCode::SERVICE_UNAVAILABLE, ":(")

@ -17,7 +17,7 @@ use axum::{
response::IntoResponse,
Extension, Json, TypedHeader,
};
use axum_client_ip::ClientIp;
use axum_client_ip::InsecureClientIp;
use axum_macros::debug_handler;
use chrono::{TimeZone, Utc};
use entities::sea_orm_active_enums::{LogLevel, Role};
@ -65,7 +65,7 @@ use crate::{PostLogin, PostLoginQuery};
#[debug_handler]
pub async fn user_login_get(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp,
InsecureClientIp(ip): InsecureClientIp,
// TODO: what does axum's error handling look like if the path fails to parse?
Path(mut params): Path<HashMap<String, String>>,
) -> FrontendResult {
@ -165,7 +165,7 @@ pub async fn user_login_get(
#[debug_handler]
pub async fn user_login_post(
Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp,
InsecureClientIp(ip): InsecureClientIp,
Query(query): Query<PostLoginQuery>,
Json(payload): Json<PostLogin>,
) -> FrontendResult {

@ -5,7 +5,6 @@ pub mod block_number;
pub mod config;
pub mod frontend;
pub mod jsonrpc;
pub mod metered;
pub mod metrics_frontend;
pub mod pagerduty;
pub mod rpcs;

@ -1,12 +1,6 @@
//! A module providing the `JsonRpcErrorCount` metric.
use ethers::providers::ProviderError;
use metered::metric::{Advice, Enter, OnResult};
use metered::{
atomic::AtomicInt,
clear::Clear,
metric::{Counter, Metric},
};
use serde::Serialize;
use std::ops::Deref;

@ -1,12 +1,6 @@
//! A module providing the `JsonRpcErrorCount` metric.
use ethers::providers::ProviderError;
use metered::metric::{Advice, Enter, OnResult};
use metered::{
atomic::AtomicInt,
clear::Clear,
metric::{Counter, Metric},
};
use serde::Serialize;
use std::ops::Deref;

@ -23,13 +23,14 @@ pub async fn serve(app: Arc<Web3ProxyApp>, port: u16) -> anyhow::Result<()> {
// TODO: into_make_service is enough if we always run behind a proxy. make into_make_service_with_connect_info optional?
/*
It sequentially looks for an IP in:
InsecureClientIp sequentially looks for an IP in:
- x-forwarded-for header (de-facto standard)
- x-real-ip header
- forwarded header (new standard)
- axum::extract::ConnectInfo (if not behind proxy)
So we probably won't need into_make_service_with_connect_info, but it shouldn't hurt
Since we run behind haproxy, x-forwarded-for will be set.
We probably won't need into_make_service_with_connect_info, but it shouldn't hurt.
*/
let service = app.into_make_service_with_connect_info::<SocketAddr>();
// let service = app.into_make_service();

@ -1,10 +1,10 @@
///! Keep track of the blockchain as seen by a Web3Connections.
use super::connection::Web3Connection;
use super::connections::Web3Connections;
use super::many::Web3Rpcs;
///! Keep track of the blockchain as seen by a Web3Rpcs.
use super::one::Web3Rpc;
use super::transactions::TxStatus;
use crate::frontend::authorization::Authorization;
use crate::{
config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusConnections,
config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusWeb3Rpcs,
};
use anyhow::Context;
use derive_more::From;
@ -92,7 +92,7 @@ impl Display for SavedBlock {
}
}
impl Web3Connections {
impl Web3Rpcs {
/// add a block to our mappings and track the heaviest chain
pub async fn save_block(
&self,
@ -135,7 +135,7 @@ impl Web3Connections {
&self,
authorization: &Arc<Authorization>,
hash: &H256,
rpc: Option<&Arc<Web3Connection>>,
rpc: Option<&Arc<Web3Rpc>>,
) -> anyhow::Result<ArcBlock> {
// first, try to get the hash from our cache
// the cache is set last, so if its here, its everywhere
@ -190,12 +190,12 @@ impl Web3Connections {
&self,
authorization: &Arc<Authorization>,
num: &U64,
) -> anyhow::Result<(H256, bool)> {
let (block, is_archive_block) = self.cannonical_block(authorization, num).await?;
) -> anyhow::Result<(H256, u64)> {
let (block, block_depth) = self.cannonical_block(authorization, num).await?;
let hash = block.hash.expect("Saved blocks should always have hashes");
Ok((hash, is_archive_block))
Ok((hash, block_depth))
}
/// Get the heaviest chain's block from cache or backend rpc
@ -204,7 +204,7 @@ impl Web3Connections {
&self,
authorization: &Arc<Authorization>,
num: &U64,
) -> anyhow::Result<(ArcBlock, bool)> {
) -> anyhow::Result<(ArcBlock, u64)> {
// we only have blocks by hash now
// maybe save them during save_block in a blocks_by_number Cache<U64, Vec<ArcBlock>>
// if theres multiple, use petgraph to find the one on the main chain (and remove the others if they have enough confirmations)
@ -233,8 +233,11 @@ impl Web3Connections {
let head_block_num =
head_block_num.expect("we should only get here if we have a head block");
// TODO: geth does 64, erigon does 90k. sometimes we run a mix
let archive_needed = num < &(head_block_num - U64::from(64));
let block_depth = if num >= &head_block_num {
0
} else {
(head_block_num - num).as_u64()
};
// try to get the hash from our cache
// deref to not keep the lock open
@ -243,7 +246,7 @@ impl Web3Connections {
// TODO: pass authorization through here?
let block = self.block(authorization, &block_hash, None).await?;
return Ok((block, archive_needed));
return Ok((block, block_depth));
}
// block number not in cache. we need to ask an rpc for it
@ -269,7 +272,7 @@ impl Web3Connections {
// the block was fetched using eth_getBlockByNumber, so it should have all fields and be on the heaviest chain
let block = self.save_block(block, true).await?;
Ok((block, archive_needed))
Ok((block, block_depth))
}
pub(super) async fn process_incoming_blocks(
@ -285,30 +288,33 @@ impl Web3Connections {
// TODO: this will grow unbounded. prune old heads on this at the same time we prune the graph?
let mut connection_heads = ConsensusFinder::default();
while let Ok((new_block, rpc)) = block_receiver.recv_async().await {
let new_block = new_block.map(Into::into);
loop {
match block_receiver.recv_async().await {
Ok((new_block, rpc)) => {
let new_block = new_block.map(Into::into);
let rpc_name = rpc.name.clone();
let rpc_name = rpc.name.clone();
if let Err(err) = self
.process_block_from_rpc(
authorization,
&mut connection_heads,
new_block,
rpc,
&head_block_sender,
&pending_tx_sender,
)
.await
{
warn!("unable to process block from rpc {}: {:?}", rpc_name, err);
if let Err(err) = self
.process_block_from_rpc(
authorization,
&mut connection_heads,
new_block,
rpc,
&head_block_sender,
&pending_tx_sender,
)
.await
{
warn!("unable to process block from rpc {}: {:?}", rpc_name, err);
}
}
Err(err) => {
warn!("block_receiver exited! {:#?}", err);
return Err(err.into());
}
}
}
// TODO: if there was an error, should we return it instead of an Ok?
warn!("block_receiver exited!");
Ok(())
}
/// `connection_heads` is a mapping of rpc_names to head block hashes.
@ -319,7 +325,7 @@ impl Web3Connections {
authorization: &Arc<Authorization>,
consensus_finder: &mut ConsensusFinder,
rpc_head_block: Option<SavedBlock>,
rpc: Arc<Web3Connection>,
rpc: Arc<Web3Rpc>,
head_block_sender: &watch::Sender<ArcBlock>,
pending_tx_sender: &Option<broadcast::Sender<TxStatus>>,
) -> anyhow::Result<()> {
@ -388,6 +394,7 @@ impl Web3Connections {
// multiple blocks with the same fork!
if consensus_saved_block.hash() == old_head_block.hash() {
// no change in hash. no need to use head_block_sender
// TODO: trace level if rpc is backup
debug!(
"con {}{}/{}/{}/{} con={} rpc={}@{}",
includes_backups_str,
@ -546,11 +553,11 @@ impl ConnectionsGroup {
Self::new(true)
}
fn remove(&mut self, rpc: &Web3Connection) -> Option<H256> {
fn remove(&mut self, rpc: &Web3Rpc) -> Option<H256> {
self.rpc_name_to_hash.remove(rpc.name.as_str())
}
fn insert(&mut self, rpc: &Web3Connection, block_hash: H256) -> Option<H256> {
fn insert(&mut self, rpc: &Web3Rpc, block_hash: H256) -> Option<H256> {
self.rpc_name_to_hash.insert(rpc.name.clone(), block_hash)
}
@ -560,7 +567,7 @@ impl ConnectionsGroup {
rpc_name: &str,
hash: &H256,
authorization: &Arc<Authorization>,
web3_connections: &Web3Connections,
web3_rpcs: &Web3Rpcs,
) -> anyhow::Result<ArcBlock> {
// // TODO: why does this happen?!?! seems to only happen with uncled blocks
// // TODO: maybe we should do try_get_with?
@ -571,16 +578,17 @@ impl ConnectionsGroup {
// );
// this option should almost always be populated. if the connection reconnects at a bad time it might not be available though
let rpc = web3_connections.conns.get(rpc_name);
// TODO: if this is None, I think we should error.
let rpc = web3_rpcs.conns.get(rpc_name);
web3_connections.block(authorization, hash, rpc).await
web3_rpcs.block(authorization, hash, rpc).await
}
// TODO: do this during insert/remove?
pub(self) async fn highest_block(
&self,
authorization: &Arc<Authorization>,
web3_connections: &Web3Connections,
web3_rpcs: &Web3Rpcs,
) -> Option<ArcBlock> {
let mut checked_heads = HashSet::with_capacity(self.rpc_name_to_hash.len());
let mut highest_block = None::<ArcBlock>;
@ -592,7 +600,7 @@ impl ConnectionsGroup {
}
let rpc_block = match self
.get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_connections)
.get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_rpcs)
.await
{
Ok(x) => x,
@ -627,9 +635,9 @@ impl ConnectionsGroup {
pub(self) async fn consensus_head_connections(
&self,
authorization: &Arc<Authorization>,
web3_connections: &Web3Connections,
) -> anyhow::Result<ConsensusConnections> {
let mut maybe_head_block = match self.highest_block(authorization, web3_connections).await {
web3_rpcs: &Web3Rpcs,
) -> anyhow::Result<ConsensusWeb3Rpcs> {
let mut maybe_head_block = match self.highest_block(authorization, web3_rpcs).await {
None => return Err(anyhow::anyhow!("No blocks known")),
Some(x) => x,
};
@ -663,27 +671,25 @@ impl ConnectionsGroup {
continue;
}
if let Some(rpc) = web3_connections.conns.get(rpc_name.as_str()) {
if let Some(rpc) = web3_rpcs.conns.get(rpc_name.as_str()) {
highest_rpcs.insert(rpc_name);
highest_rpcs_sum_soft_limit += rpc.soft_limit;
} else {
// i don't think this is an error. i think its just if a reconnect is currently happening
warn!("connection missing: {}", rpc_name);
debug!("web3_rpcs.conns: {:#?}", web3_rpcs.conns);
}
}
if highest_rpcs_sum_soft_limit >= web3_connections.min_sum_soft_limit
&& highest_rpcs.len() >= web3_connections.min_head_rpcs
if highest_rpcs_sum_soft_limit >= web3_rpcs.min_sum_soft_limit
&& highest_rpcs.len() >= web3_rpcs.min_head_rpcs
{
// we have enough servers with enough requests
break;
}
// not enough rpcs yet. check the parent block
if let Some(parent_block) = web3_connections
.block_hashes
.get(&maybe_head_block.parent_hash)
{
if let Some(parent_block) = web3_rpcs.block_hashes.get(&maybe_head_block.parent_hash) {
// trace!(
// child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd",
// );
@ -691,25 +697,25 @@ impl ConnectionsGroup {
maybe_head_block = parent_block;
continue;
} else {
if num_known < web3_connections.min_head_rpcs {
if num_known < web3_rpcs.min_head_rpcs {
return Err(anyhow::anyhow!(
"not enough rpcs connected: {}/{}/{}",
highest_rpcs.len(),
num_known,
web3_connections.min_head_rpcs,
web3_rpcs.min_head_rpcs,
));
} else {
let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32
/ web3_connections.min_sum_soft_limit as f32)
/ web3_rpcs.min_sum_soft_limit as f32)
* 100.0;
return Err(anyhow::anyhow!(
"ran out of parents to check. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})",
highest_rpcs.len(),
num_known,
web3_connections.min_head_rpcs,
web3_rpcs.min_head_rpcs,
highest_rpcs_sum_soft_limit,
web3_connections.min_sum_soft_limit,
web3_rpcs.min_sum_soft_limit,
soft_limit_percent,
));
}
@ -719,29 +725,28 @@ impl ConnectionsGroup {
// TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block. will need to change the return Err above into breaks.
// we've done all the searching for the heaviest block that we can
if highest_rpcs.len() < web3_connections.min_head_rpcs
|| highest_rpcs_sum_soft_limit < web3_connections.min_sum_soft_limit
if highest_rpcs.len() < web3_rpcs.min_head_rpcs
|| highest_rpcs_sum_soft_limit < web3_rpcs.min_sum_soft_limit
{
// if we get here, not enough servers are synced. return an error
let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32
/ web3_connections.min_sum_soft_limit as f32)
* 100.0;
let soft_limit_percent =
(highest_rpcs_sum_soft_limit as f32 / web3_rpcs.min_sum_soft_limit as f32) * 100.0;
return Err(anyhow::anyhow!(
"Not enough resources. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})",
highest_rpcs.len(),
num_known,
web3_connections.min_head_rpcs,
web3_rpcs.min_head_rpcs,
highest_rpcs_sum_soft_limit,
web3_connections.min_sum_soft_limit,
web3_rpcs.min_sum_soft_limit,
soft_limit_percent,
));
}
// success! this block has enough soft limit and nodes on it (or on later blocks)
let conns: Vec<Arc<Web3Connection>> = highest_rpcs
let conns: Vec<Arc<Web3Rpc>> = highest_rpcs
.into_iter()
.filter_map(|conn_name| web3_connections.conns.get(conn_name).cloned())
.filter_map(|conn_name| web3_rpcs.conns.get(conn_name).cloned())
.collect();
// TODO: DEBUG only check
@ -754,7 +759,7 @@ impl ConnectionsGroup {
let consensus_head_block: SavedBlock = maybe_head_block.into();
Ok(ConsensusConnections {
Ok(ConsensusWeb3Rpcs {
head_block: Some(consensus_head_block),
conns,
num_checked_conns: self.rpc_name_to_hash.len(),
@ -781,7 +786,7 @@ impl Default for ConsensusFinder {
}
impl ConsensusFinder {
fn remove(&mut self, rpc: &Web3Connection) -> Option<H256> {
fn remove(&mut self, rpc: &Web3Rpc) -> Option<H256> {
// TODO: should we have multiple backup tiers? (remote datacenters vs third party)
if !rpc.backup {
self.main.remove(rpc);
@ -789,7 +794,7 @@ impl ConsensusFinder {
self.all.remove(rpc)
}
fn insert(&mut self, rpc: &Web3Connection, new_hash: H256) -> Option<H256> {
fn insert(&mut self, rpc: &Web3Rpc, new_hash: H256) -> Option<H256> {
// TODO: should we have multiple backup tiers? (remote datacenters vs third party)
if !rpc.backup {
self.main.insert(rpc, new_hash);
@ -801,9 +806,9 @@ impl ConsensusFinder {
async fn update_rpc(
&mut self,
rpc_head_block: Option<SavedBlock>,
rpc: Arc<Web3Connection>,
rpc: Arc<Web3Rpc>,
// we need this so we can save the block to caches. i don't like it though. maybe we should use a lazy_static Cache wrapper that has a "save_block" method?. i generally dislike globals but i also dislike all the types having to pass eachother around
web3_connections: &Web3Connections,
web3_connections: &Web3Rpcs,
) -> anyhow::Result<bool> {
// add the rpc's block to connection_heads, or remove the rpc from connection_heads
let changed = match rpc_head_block {
@ -848,15 +853,15 @@ impl ConsensusFinder {
async fn best_consensus_connections(
&mut self,
authorization: &Arc<Authorization>,
web3_connections: &Web3Connections,
) -> ConsensusConnections {
web3_connections: &Web3Rpcs,
) -> ConsensusWeb3Rpcs {
let highest_block_num = match self
.all
.highest_block(authorization, web3_connections)
.await
{
None => {
return ConsensusConnections::default();
return ConsensusWeb3Rpcs::default();
}
Some(x) => x.number.expect("blocks here should always have a number"),
};
@ -897,7 +902,7 @@ impl ConsensusFinder {
if self.all.rpc_name_to_hash.len() < web3_connections.min_head_rpcs {
debug!("No consensus head yet: {}", err);
}
return ConsensusConnections::default();
return ConsensusWeb3Rpcs::default();
}
Ok(x) => x,
};
@ -920,7 +925,7 @@ impl ConsensusFinder {
} else {
// TODO: i don't think we need this error. and i doublt we'll ever even get here
error!("NO CONSENSUS HEAD!");
ConsensusConnections::default()
ConsensusWeb3Rpcs::default()
}
}
}

@ -1,12 +1,10 @@
///! Load balanced communication with a group of web3 providers
///! Load balanced communication with a group of web3 rpc providers
use super::blockchain::{ArcBlock, BlockHashesCache};
use super::connection::Web3Connection;
use super::request::{
OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult, RequestRevertHandler,
};
use super::synced_connections::ConsensusConnections;
use super::one::Web3Rpc;
use super::request::{OpenRequestHandle, OpenRequestResult, RequestRevertHandler};
use super::synced_connections::ConsensusWeb3Rpcs;
use crate::app::{flatten_handle, AnyhowJoinHandle};
use crate::config::{BlockAndRpc, TxHashAndRpc, Web3ConnectionConfig};
use crate::config::{BlockAndRpc, TxHashAndRpc, Web3RpcConfig};
use crate::frontend::authorization::{Authorization, RequestMetadata};
use crate::frontend::rpc_proxy_ws::ProxyMode;
use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest};
@ -14,7 +12,7 @@ use crate::rpcs::transactions::TxStatus;
use counter::Counter;
use derive_more::From;
use ethers::prelude::{ProviderError, TxHash, H256, U64};
use futures::future::{join_all, try_join_all};
use futures::future::try_join_all;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use hashbrown::{HashMap, HashSet};
@ -36,11 +34,11 @@ use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBeh
/// A collection of web3 connections. Sends requests either the current best server or all servers.
#[derive(From)]
pub struct Web3Connections {
pub struct Web3Rpcs {
/// any requests will be forwarded to one (or more) of these connections
pub(crate) conns: HashMap<String, Arc<Web3Connection>>,
pub(crate) conns: HashMap<String, Arc<Web3Rpc>>,
/// all providers with the same consensus head block. won't update if there is no `self.watch_consensus_head_sender`
pub(super) watch_consensus_connections_sender: watch::Sender<Arc<ConsensusConnections>>,
pub(super) watch_consensus_connections_sender: watch::Sender<Arc<ConsensusWeb3Rpcs>>,
/// this head receiver makes it easy to wait until there is a new block
pub(super) watch_consensus_head_receiver: Option<watch::Receiver<ArcBlock>>,
pub(super) pending_transactions:
@ -54,13 +52,13 @@ pub struct Web3Connections {
pub(super) min_sum_soft_limit: u32,
}
impl Web3Connections {
impl Web3Rpcs {
/// Spawn durable connections to multiple Web3 providers.
#[allow(clippy::too_many_arguments)]
pub async fn spawn(
chain_id: u64,
db_conn: Option<DatabaseConnection>,
server_configs: HashMap<String, Web3ConnectionConfig>,
server_configs: HashMap<String, Web3RpcConfig>,
http_client: Option<reqwest::Client>,
redis_pool: Option<redis_rate_limiter::RedisPool>,
block_map: BlockHashesCache,
@ -69,7 +67,6 @@ impl Web3Connections {
min_head_rpcs: usize,
pending_tx_sender: Option<broadcast::Sender<TxStatus>>,
pending_transactions: Cache<TxHash, TxStatus, hashbrown::hash_map::DefaultHashBuilder>,
open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
) -> anyhow::Result<(Arc<Self>, AnyhowJoinHandle<()>)> {
let (pending_tx_id_sender, pending_tx_id_receiver) = flume::unbounded();
let (block_sender, block_receiver) = flume::unbounded::<BlockAndRpc>();
@ -92,12 +89,10 @@ impl Web3Connections {
};
let http_interval_sender = if http_client.is_some() {
let (sender, receiver) = broadcast::channel(1);
drop(receiver);
let (sender, _) = broadcast::channel(1);
// TODO: what interval? follow a websocket also? maybe by watching synced connections with a timeout. will need debounce
let mut interval = interval(Duration::from_millis(expected_block_time_ms));
let mut interval = interval(Duration::from_millis(expected_block_time_ms / 2));
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
let sender = Arc::new(sender);
@ -107,13 +102,14 @@ impl Web3Connections {
async move {
loop {
// TODO: every time a head_block arrives (with a small delay for known slow servers), or on the interval.
interval.tick().await;
// // trace!("http interval ready");
// trace!("http interval ready");
// errors are okay. they mean that all receivers have been dropped
let _ = sender.send(());
if let Err(_) = sender.send(()) {
// errors are okay. they mean that all receivers have been dropped, or the rpcs just haven't started yet
trace!("no http receivers");
};
}
}
};
@ -128,11 +124,11 @@ impl Web3Connections {
// turn configs into connections (in parallel)
// TODO: move this into a helper function. then we can use it when configs change (will need a remove function too)
// TODO: futures unordered?
let spawn_handles: Vec<_> = server_configs
let mut spawn_handles: FuturesUnordered<_> = server_configs
.into_iter()
.filter_map(|(server_name, server_config)| {
if server_config.disabled {
info!("{} is disabled", server_name);
return None;
}
@ -149,7 +145,8 @@ impl Web3Connections {
let pending_tx_id_sender = Some(pending_tx_id_sender.clone());
let block_map = block_map.clone();
let open_request_handle_metrics = open_request_handle_metrics.clone();
debug!("spawning {}", server_name);
let handle = tokio::spawn(async move {
server_config
@ -163,7 +160,6 @@ impl Web3Connections {
block_map,
block_sender,
pending_tx_id_sender,
open_request_handle_metrics,
)
.await
});
@ -177,19 +173,20 @@ impl Web3Connections {
let mut handles = vec![];
// TODO: futures unordered?
for x in join_all(spawn_handles).await {
// TODO: how should we handle errors here? one rpc being down shouldn't cause the program to exit
while let Some(x) = spawn_handles.next().await {
match x {
Ok(Ok((connection, handle))) => {
// web3 connection worked
connections.insert(connection.name.clone(), connection);
handles.push(handle);
}
Ok(Err(err)) => {
// if we got an error here, it is not retryable
// if we got an error here, the app can continue on
// TODO: include context about which connection failed
error!("Unable to create connection. err={:?}", err);
}
Err(err) => {
// something actually bad happened. exit with an error
return Err(err.into());
}
}
@ -229,7 +226,6 @@ impl Web3Connections {
let connections = connections.clone();
tokio::spawn(async move {
// TODO: try_join_all with the other handles here
connections
.subscribe(
authorization,
@ -245,13 +241,13 @@ impl Web3Connections {
Ok((connections, handle))
}
pub fn get(&self, conn_name: &str) -> Option<&Arc<Web3Connection>> {
pub fn get(&self, conn_name: &str) -> Option<&Arc<Web3Rpc>> {
self.conns.get(conn_name)
}
/// subscribe to blocks and transactions from all the backend rpcs.
/// blocks are processed by all the `Web3Connection`s and then sent to the `block_receiver`
/// transaction ids from all the `Web3Connection`s are deduplicated and forwarded to `pending_tx_sender`
/// blocks are processed by all the `Web3Rpc`s and then sent to the `block_receiver`
/// transaction ids from all the `Web3Rpc`s are deduplicated and forwarded to `pending_tx_sender`
async fn subscribe(
self: Arc<Self>,
authorization: Arc<Authorization>,
@ -327,7 +323,6 @@ impl Web3Connections {
}
info!("subscriptions over: {:?}", self);
Ok(())
}
@ -415,7 +410,7 @@ impl Web3Connections {
&self,
authorization: &Arc<Authorization>,
request_metadata: Option<&Arc<RequestMetadata>>,
skip: &[Arc<Web3Connection>],
skip: &[Arc<Web3Rpc>],
min_block_needed: Option<&U64>,
) -> anyhow::Result<OpenRequestResult> {
if let Ok(without_backups) = self
@ -450,13 +445,10 @@ impl Web3Connections {
allow_backups: bool,
authorization: &Arc<Authorization>,
request_metadata: Option<&Arc<RequestMetadata>>,
skip: &[Arc<Web3Connection>],
skip: &[Arc<Web3Rpc>],
min_block_needed: Option<&U64>,
) -> anyhow::Result<OpenRequestResult> {
let usable_rpcs_by_head_num_and_weight: BTreeMap<
(Option<U64>, u64),
Vec<Arc<Web3Connection>>,
> = {
let usable_rpcs_by_head_num_and_weight: BTreeMap<(Option<U64>, u64), Vec<Arc<Web3Rpc>>> = {
let synced_connections = self.watch_consensus_connections_sender.borrow().clone();
let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() {
@ -647,12 +639,15 @@ impl Web3Connections {
authorization: &Arc<Authorization>,
block_needed: Option<&U64>,
max_count: Option<usize>,
always_include_backups: bool,
) -> Result<Vec<OpenRequestHandle>, Option<Instant>> {
if let Ok(without_backups) = self
._all_connections(false, authorization, block_needed, max_count)
.await
{
return Ok(without_backups);
if !always_include_backups {
if let Ok(without_backups) = self
._all_connections(false, authorization, block_needed, max_count)
.await
{
return Ok(without_backups);
}
}
self._all_connections(true, authorization, block_needed, max_count)
@ -678,17 +673,21 @@ impl Web3Connections {
let mut tried = HashSet::new();
let conns_to_try = itertools::chain(
// TODO: sort by tier
self.watch_consensus_connections_sender
.borrow()
.conns
.clone(),
// TODO: sort by tier
self.conns.values().cloned(),
);
let mut synced_conns = self
.watch_consensus_connections_sender
.borrow()
.conns
.clone();
for connection in conns_to_try {
// synced connections are all on the same block. sort them by tier with higher soft limits first
synced_conns.sort_by_cached_key(|x| (x.tier, u32::MAX - x.soft_limit));
// if there aren't enough synced connections, include more connections
let mut all_conns: Vec<_> = self.conns.values().cloned().collect();
sort_connections_by_sync_status(&mut all_conns);
for connection in itertools::chain(synced_conns, all_conns) {
if max_count == 0 {
break;
}
@ -760,13 +759,8 @@ impl Web3Connections {
loop {
let num_skipped = skip_rpcs.len();
if num_skipped > 0 {
// trace!("skip_rpcs: {:?}", skip_rpcs);
// TODO: is self.conns still right now that we split main and backup servers?
if num_skipped == self.conns.len() {
break;
}
if num_skipped == self.conns.len() {
break;
}
match self
@ -1017,10 +1011,16 @@ impl Web3Connections {
block_needed: Option<&U64>,
error_level: Level,
max_count: Option<usize>,
always_include_backups: bool,
) -> anyhow::Result<JsonRpcForwardedResponse> {
loop {
match self
.all_connections(authorization, block_needed, max_count)
.all_connections(
authorization,
block_needed,
max_count,
always_include_backups,
)
.await
{
Ok(active_request_handles) => {
@ -1117,23 +1117,23 @@ impl Web3Connections {
}
}
impl fmt::Debug for Web3Connections {
impl fmt::Debug for Web3Rpcs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// TODO: the default formatter takes forever to write. this is too quiet though
f.debug_struct("Web3Connections")
f.debug_struct("Web3Rpcs")
.field("conns", &self.conns)
.finish_non_exhaustive()
}
}
impl Serialize for Web3Connections {
impl Serialize for Web3Rpcs {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("Web3Connections", 6)?;
let mut state = serializer.serialize_struct("Web3Rpcs", 6)?;
let conns: Vec<&Web3Connection> = self.conns.values().map(|x| x.as_ref()).collect();
let conns: Vec<&Web3Rpc> = self.conns.values().map(|x| x.as_ref()).collect();
state.serialize_field("conns", &conns)?;
{
@ -1152,13 +1152,29 @@ impl Serialize for Web3Connections {
}
}
/// sort by block number (descending) and tier (ascending)
fn sort_connections_by_sync_status(rpcs: &mut Vec<Arc<Web3Rpc>>) {
rpcs.sort_by_cached_key(|x| {
let reversed_head_block = u64::MAX
- x.head_block
.read()
.as_ref()
.map(|x| x.number().as_u64())
.unwrap_or(0);
let tier = x.tier;
(reversed_head_block, tier)
});
}
mod tests {
// TODO: why is this allow needed? does tokio::test get in the way somehow?
#![allow(unused_imports)]
use super::*;
use crate::rpcs::{
blockchain::{ConsensusFinder, SavedBlock},
connection::ProviderState,
one::ProviderState,
provider::Web3Provider,
};
use ethers::types::{Block, U256};
@ -1167,6 +1183,80 @@ mod tests {
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::RwLock as AsyncRwLock;
#[tokio::test]
async fn test_sort_connections_by_sync_status() {
let block_0 = Block {
number: Some(0.into()),
hash: Some(H256::random()),
..Default::default()
};
let block_1 = Block {
number: Some(1.into()),
hash: Some(H256::random()),
parent_hash: block_0.hash.unwrap(),
..Default::default()
};
let block_2 = Block {
number: Some(2.into()),
hash: Some(H256::random()),
parent_hash: block_1.hash.unwrap(),
..Default::default()
};
let blocks: Vec<_> = [block_0, block_1, block_2]
.into_iter()
.map(|x| SavedBlock::new(Arc::new(x)))
.collect();
let mut rpcs = [
Web3Rpc {
name: "a".to_string(),
tier: 0,
head_block: RwLock::new(None),
..Default::default()
},
Web3Rpc {
name: "b".to_string(),
tier: 0,
head_block: RwLock::new(blocks.get(1).cloned()),
..Default::default()
},
Web3Rpc {
name: "c".to_string(),
tier: 0,
head_block: RwLock::new(blocks.get(2).cloned()),
..Default::default()
},
Web3Rpc {
name: "d".to_string(),
tier: 1,
head_block: RwLock::new(None),
..Default::default()
},
Web3Rpc {
name: "e".to_string(),
tier: 1,
head_block: RwLock::new(blocks.get(1).cloned()),
..Default::default()
},
Web3Rpc {
name: "f".to_string(),
tier: 1,
head_block: RwLock::new(blocks.get(2).cloned()),
..Default::default()
},
]
.into_iter()
.map(Arc::new)
.collect();
sort_connections_by_sync_status(&mut rpcs);
let names_in_sort_order: Vec<_> = rpcs.iter().map(|x| x.name.as_str()).collect();
assert_eq!(names_in_sort_order, ["c", "f", "b", "e", "a", "d"]);
}
#[tokio::test]
async fn test_server_selection_by_height() {
// TODO: do this better. can test_env_logger and tokio test be stacked?
@ -1206,50 +1296,32 @@ mod tests {
let block_data_limit = u64::MAX;
let head_rpc = Web3Connection {
let head_rpc = Web3Rpc {
name: "synced".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com/synced".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
Web3Provider::Mock,
))),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000,
automatic_block_limit: true,
automatic_block_limit: false,
backup: false,
block_data_limit: block_data_limit.into(),
tier: 0,
head_block: RwLock::new(Some(head_block.clone())),
open_request_handle_metrics: Arc::new(Default::default()),
..Default::default()
};
let lagged_rpc = Web3Connection {
let lagged_rpc = Web3Rpc {
name: "lagged".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com/lagged".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
Web3Provider::Mock,
))),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000,
automatic_block_limit: false,
backup: false,
block_data_limit: block_data_limit.into(),
tier: 0,
head_block: RwLock::new(Some(lagged_block.clone())),
open_request_handle_metrics: Arc::new(Default::default()),
..Default::default()
};
assert!(head_rpc.has_block_data(&lagged_block.number()));
@ -1268,8 +1340,8 @@ mod tests {
let (watch_consensus_connections_sender, _) = watch::channel(Default::default());
// TODO: make a Web3Connections::new
let conns = Web3Connections {
// TODO: make a Web3Rpcs::new
let conns = Web3Rpcs {
conns,
watch_consensus_head_receiver: None,
watch_consensus_connections_sender,
@ -1319,10 +1391,10 @@ mod tests {
// no head block because the rpcs haven't communicated through their channels
assert!(conns.head_block_hash().is_none());
// all_backend_connections gives everything regardless of sync status
// all_backend_connections gives all non-backup servers regardless of sync status
assert_eq!(
conns
.all_connections(&authorization, None, None)
.all_connections(&authorization, None, None, false)
.await
.unwrap()
.len(),
@ -1439,50 +1511,32 @@ mod tests {
let head_block: SavedBlock = Arc::new(head_block).into();
let pruned_rpc = Web3Connection {
let pruned_rpc = Web3Rpc {
name: "pruned".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com/pruned".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
Web3Provider::Mock,
))),
hard_limit: None,
hard_limit_until: None,
soft_limit: 3_000,
automatic_block_limit: false,
backup: false,
block_data_limit: 64.into(),
tier: 1,
head_block: RwLock::new(Some(head_block.clone())),
open_request_handle_metrics: Arc::new(Default::default()),
..Default::default()
};
let archive_rpc = Web3Connection {
let archive_rpc = Web3Rpc {
name: "archive".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com/archive".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
Web3Provider::Mock,
))),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000,
automatic_block_limit: false,
backup: false,
block_data_limit: u64::MAX.into(),
tier: 2,
head_block: RwLock::new(Some(head_block.clone())),
open_request_handle_metrics: Arc::new(Default::default()),
..Default::default()
};
assert!(pruned_rpc.has_block_data(&head_block.number()));
@ -1500,8 +1554,8 @@ mod tests {
let (watch_consensus_connections_sender, _) = watch::channel(Default::default());
// TODO: make a Web3Connections::new
let conns = Web3Connections {
// TODO: make a Web3Rpcs::new
let conns = Web3Rpcs {
conns,
watch_consensus_head_receiver: None,
watch_consensus_connections_sender,

@ -1,7 +1,7 @@
// TODO: all pub, or export useful things here instead?
pub mod blockchain;
pub mod connection;
pub mod connections;
pub mod many;
pub mod one;
pub mod provider;
pub mod request;
pub mod synced_connections;

@ -1,7 +1,7 @@
///! Rate-limited communication with a web3 provider.
use super::blockchain::{ArcBlock, BlockHashesCache, SavedBlock};
use super::provider::Web3Provider;
use super::request::{OpenRequestHandle, OpenRequestHandleMetrics, OpenRequestResult};
use super::request::{OpenRequestHandle, OpenRequestResult};
use crate::app::{flatten_handle, AnyhowJoinHandle};
use crate::config::BlockAndRpc;
use crate::frontend::authorization::Authorization;
@ -10,6 +10,7 @@ use ethers::prelude::{Bytes, Middleware, ProviderError, TxHash, H256, U64};
use ethers::types::U256;
use futures::future::try_join_all;
use futures::StreamExt;
use hdrhistogram::Histogram;
use log::{debug, error, info, trace, warn, Level};
use migration::sea_orm::DatabaseConnection;
use parking_lot::RwLock;
@ -25,7 +26,7 @@ use std::{cmp::Ordering, sync::Arc};
use thread_fast_rng::rand::Rng;
use thread_fast_rng::thread_fast_rng;
use tokio::sync::{broadcast, oneshot, watch, RwLock as AsyncRwLock};
use tokio::time::{interval, sleep, sleep_until, timeout, Duration, Instant, MissedTickBehavior};
use tokio::time::{sleep, sleep_until, timeout, Duration, Instant};
// TODO: maybe provider state should have the block data limit in it. but it is inside an async lock and we can't Serialize then
#[derive(Clone, Debug)]
@ -35,6 +36,12 @@ pub enum ProviderState {
Connected(Arc<Web3Provider>),
}
impl Default for ProviderState {
fn default() -> Self {
Self::None
}
}
impl ProviderState {
pub async fn provider(&self, allow_not_ready: bool) -> Option<&Arc<Web3Provider>> {
match self {
@ -58,8 +65,31 @@ impl ProviderState {
}
}
pub struct Web3RpcLatencies {
/// Traack how far behind the fastest node we are
new_head: Histogram<u64>,
/// exponentially weighted moving average of how far behind the fastest node we are
new_head_ewma: u32,
/// Track how long an rpc call takes on average
request: Histogram<u64>,
/// exponentially weighted moving average of how far behind the fastest node we are
request_ewma: u32,
}
impl Default for Web3RpcLatencies {
fn default() -> Self {
Self {
new_head: Histogram::new(3).unwrap(),
new_head_ewma: 0,
request: Histogram::new(3).unwrap(),
request_ewma: 0,
}
}
}
/// An active connection to a Web3 RPC server like geth or erigon.
pub struct Web3Connection {
#[derive(Default)]
pub struct Web3Rpc {
pub name: String,
pub display_name: Option<String>,
pub db_conn: Option<DatabaseConnection>,
@ -91,12 +121,13 @@ pub struct Web3Connection {
pub(super) block_data_limit: AtomicU64,
/// Lower tiers are higher priority when sending requests
pub(super) tier: u64,
/// TODO: should this be an AsyncRwLock?
/// TODO: change this to a watch channel so that http providers can subscribe and take action on change
pub(super) head_block: RwLock<Option<SavedBlock>>,
pub(super) open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
/// Track how fast this RPC is
pub(super) latency: Web3RpcLatencies,
}
impl Web3Connection {
impl Web3Rpc {
/// Connect to a web3 rpc
// TODO: have this take a builder (which will have channels attached). or maybe just take the config and give the config public fields
#[allow(clippy::too_many_arguments)]
@ -120,8 +151,7 @@ impl Web3Connection {
tx_id_sender: Option<flume::Sender<(TxHash, Arc<Self>)>>,
reconnect: bool,
tier: u64,
open_request_handle_metrics: Arc<OpenRequestHandleMetrics>,
) -> anyhow::Result<(Arc<Web3Connection>, AnyhowJoinHandle<()>)> {
) -> anyhow::Result<(Arc<Web3Rpc>, AnyhowJoinHandle<()>)> {
let hard_limit = hard_limit.map(|(hard_rate_limit, redis_pool)| {
// TODO: is cache size 1 okay? i think we need
RedisRateLimiter::new(
@ -154,19 +184,14 @@ impl Web3Connection {
display_name,
http_client,
url: url_str,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::None),
hard_limit,
hard_limit_until,
soft_limit,
automatic_block_limit,
backup,
block_data_limit,
head_block: RwLock::new(Default::default()),
tier,
open_request_handle_metrics,
..Default::default()
};
let new_connection = Arc::new(new_connection);
@ -506,7 +531,7 @@ impl Web3Connection {
// we previously sent a None. return early
return Ok(());
}
warn!("{} is not synced!", self);
warn!("clearing head block on {}!", self);
*head_block = None;
}
@ -885,34 +910,14 @@ impl Web3Connection {
.clone()
{
trace!("watching pending transactions on {}", self);
// TODO: does this keep the lock open for too long?
match provider.as_ref() {
Web3Provider::Mock => unimplemented!(),
Web3Provider::Http(provider) => {
// there is a "watch_pending_transactions" function, but a lot of public nodes do not support the necessary rpc endpoints
// TODO: what should this interval be? probably automatically set to some fraction of block time
// TODO: maybe it would be better to have one interval for all of the http providers, but this works for now
// TODO: if there are some websocket providers, maybe have a longer interval and a channel that tells the https to update when a websocket gets a new head? if they are slow this wouldn't work well though
let mut interval = interval(Duration::from_secs(60));
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
loop {
// TODO: actually do something here
/*
match self.try_request_handle().await {
Ok(active_request_handle) => {
// TODO: check the filter
todo!("actually send a request");
}
Err(e) => {
warn!("Failed getting latest block from {}: {:?}", self, e);
}
}
*/
// wait for the interval
// TODO: if error or rate limit, increase interval?
interval.tick().await;
}
// TODO: maybe subscribe to self.head_block?
// TODO: this keeps a read lock guard open on provider_state forever. is that okay for an http client?
futures::future::pending::<()>().await;
}
Web3Provider::Ws(provider) => {
// TODO: maybe the subscribe_pending_txs function should be on the active_request_handle
@ -1084,46 +1089,48 @@ impl fmt::Debug for Web3Provider {
}
}
impl Hash for Web3Connection {
impl Hash for Web3Rpc {
fn hash<H: Hasher>(&self, state: &mut H) {
// TODO: is this enough?
self.name.hash(state);
}
}
impl Eq for Web3Connection {}
impl Eq for Web3Rpc {}
impl Ord for Web3Connection {
impl Ord for Web3Rpc {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.name.cmp(&other.name)
}
}
impl PartialOrd for Web3Connection {
impl PartialOrd for Web3Rpc {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Web3Connection {
impl PartialEq for Web3Rpc {
fn eq(&self, other: &Self) -> bool {
self.name == other.name
}
}
impl Serialize for Web3Connection {
impl Serialize for Web3Rpc {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// 3 is the number of fields in the struct.
let mut state = serializer.serialize_struct("Web3Connection", 8)?;
let mut state = serializer.serialize_struct("Web3Rpc", 9)?;
// the url is excluded because it likely includes private information. just show the name that we use in keys
state.serialize_field("name", &self.name)?;
// a longer name for display to users
state.serialize_field("display_name", &self.display_name)?;
state.serialize_field("backup", &self.backup)?;
match self.block_data_limit.load(atomic::Ordering::Relaxed) {
u64::MAX => {
state.serialize_field("block_data_limit", &None::<()>)?;
@ -1157,9 +1164,9 @@ impl Serialize for Web3Connection {
}
}
impl fmt::Debug for Web3Connection {
impl fmt::Debug for Web3Rpc {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut f = f.debug_struct("Web3Connection");
let mut f = f.debug_struct("Web3Rpc");
f.field("name", &self.name);
@ -1174,7 +1181,7 @@ impl fmt::Debug for Web3Connection {
}
}
impl fmt::Display for Web3Connection {
impl fmt::Display for Web3Rpc {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// TODO: filter basic auth and api keys
write!(f, "{}", &self.name)
@ -1207,27 +1214,16 @@ mod tests {
let head_block = SavedBlock::new(random_block);
let block_data_limit = u64::MAX;
let metrics = OpenRequestHandleMetrics::default();
let x = Web3Connection {
let x = Web3Rpc {
name: "name".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::None),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000,
automatic_block_limit: false,
backup: false,
block_data_limit: block_data_limit.into(),
tier: 0,
head_block: RwLock::new(Some(head_block.clone())),
open_request_handle_metrics: Arc::new(metrics),
..Default::default()
};
assert!(x.has_block_data(&0.into()));
@ -1255,28 +1251,16 @@ mod tests {
let block_data_limit = 64;
let metrics = OpenRequestHandleMetrics::default();
// TODO: this is getting long. have a `impl Default`
let x = Web3Connection {
let x = Web3Rpc {
name: "name".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::None),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000,
automatic_block_limit: false,
backup: false,
block_data_limit: block_data_limit.into(),
tier: 0,
head_block: RwLock::new(Some(head_block.clone())),
open_request_handle_metrics: Arc::new(metrics),
..Default::default()
};
assert!(!x.has_block_data(&0.into()));
@ -1313,7 +1297,7 @@ mod tests {
let metrics = OpenRequestHandleMetrics::default();
let x = Web3Connection {
let x = Web3Rpc {
name: "name".to_string(),
db_conn: None,
display_name: None,
@ -1330,7 +1314,6 @@ mod tests {
block_data_limit: block_data_limit.into(),
tier: 0,
head_block: RwLock::new(Some(head_block.clone())),
open_request_handle_metrics: Arc::new(metrics),
};
assert!(!x.has_block_data(&0.into()));

@ -1,7 +1,6 @@
use super::connection::Web3Connection;
use super::one::Web3Rpc;
use super::provider::Web3Provider;
use crate::frontend::authorization::{Authorization, AuthorizationType};
use crate::metered::{JsonRpcErrorCount, ProviderErrorCount};
use anyhow::Context;
use chrono::Utc;
use entities::revert_log;
@ -9,14 +8,10 @@ use entities::sea_orm_active_enums::Method;
use ethers::providers::{HttpClientError, ProviderError, WsClientError};
use ethers::types::{Address, Bytes};
use log::{debug, error, trace, warn, Level};
use metered::metered;
use metered::HitCount;
use metered::ResponseTime;
use metered::Throughput;
use migration::sea_orm::{self, ActiveEnum, ActiveModelTrait};
use serde_json::json;
use std::fmt;
use std::sync::atomic::{self, AtomicBool, Ordering};
use std::sync::atomic;
use std::sync::Arc;
use thread_fast_rng::rand::Rng;
use tokio::time::{sleep, Duration, Instant};
@ -35,11 +30,8 @@ pub enum OpenRequestResult {
#[derive(Debug)]
pub struct OpenRequestHandle {
authorization: Arc<Authorization>,
conn: Arc<Web3Connection>,
// TODO: this is the same metrics on the conn. use a reference?
metrics: Arc<OpenRequestHandleMetrics>,
conn: Arc<Web3Rpc>,
provider: Arc<Web3Provider>,
used: AtomicBool,
}
/// Depending on the context, RPC errors can require different handling.
@ -129,14 +121,11 @@ impl Authorization {
}
}
#[metered(registry = OpenRequestHandleMetrics, visibility = pub)]
impl OpenRequestHandle {
pub async fn new(authorization: Arc<Authorization>, conn: Arc<Web3Connection>) -> Self {
pub async fn new(authorization: Arc<Authorization>, conn: Arc<Web3Rpc>) -> Self {
// TODO: take request_id as an argument?
// TODO: attach a unique id to this? customer requests have one, but not internal queries
// TODO: what ordering?!
// TODO: should we be using metered, or not? i think not because we want stats for each handle
// TODO: these should maybe be sent to an influxdb instance?
conn.active_requests.fetch_add(1, atomic::Ordering::Relaxed);
let mut provider = None;
@ -184,15 +173,10 @@ impl OpenRequestHandle {
}
}
let metrics = conn.open_request_handle_metrics.clone();
let used = false.into();
Self {
authorization,
conn,
metrics,
provider,
used,
}
}
@ -201,17 +185,14 @@ impl OpenRequestHandle {
}
#[inline]
pub fn clone_connection(&self) -> Arc<Web3Connection> {
pub fn clone_connection(&self) -> Arc<Web3Rpc> {
self.conn.clone()
}
/// Send a web3 request
/// By having the request method here, we ensure that the rate limiter was called and connection counts were properly incremented
/// TODO: we no longer take self because metered doesn't like that
/// TODO: ErrorCount includes too many types of errors, such as transaction reverts
#[measure([JsonRpcErrorCount, HitCount, ProviderErrorCount, ResponseTime, Throughput])]
pub async fn request<P, R>(
&self,
self,
method: &str,
params: &P,
revert_handler: RequestRevertHandler,
@ -221,20 +202,11 @@ impl OpenRequestHandle {
P: Clone + fmt::Debug + serde::Serialize + Send + Sync + 'static,
R: serde::Serialize + serde::de::DeserializeOwned + fmt::Debug,
{
// ensure this function only runs once
if self.used.swap(true, Ordering::Release) {
unimplemented!("a request handle should only be used once");
}
// TODO: use tracing spans
// TODO: requests from customers have request ids, but we should add
// TODO: including params in this is way too verbose
// the authorization field is already on a parent span
// TODO: including params in this log is way too verbose
// trace!(rpc=%self.conn, %method, "request");
// trace!("got provider for {:?}", self);
// TODO: really sucks that we have to clone here
// TODO: replace ethers-rs providers with our own that supports streaming the responses
let response = match &*self.provider {
Web3Provider::Mock => unimplemented!(),
Web3Provider::Http(provider) => provider.request(method, params).await,

@ -1,25 +1,25 @@
use super::blockchain::{ArcBlock, SavedBlock};
use super::connection::Web3Connection;
use super::connections::Web3Connections;
use super::many::Web3Rpcs;
use super::one::Web3Rpc;
use ethers::prelude::{H256, U64};
use serde::Serialize;
use std::fmt;
use std::sync::Arc;
/// A collection of Web3Connections that are on the same block.
/// A collection of Web3Rpcs that are on the same block.
/// Serialize is so we can print it on our debug endpoint
#[derive(Clone, Default, Serialize)]
pub struct ConsensusConnections {
pub struct ConsensusWeb3Rpcs {
// TODO: store ArcBlock instead?
pub(super) head_block: Option<SavedBlock>,
// TODO: this should be able to serialize, but it isn't
#[serde(skip_serializing)]
pub(super) conns: Vec<Arc<Web3Connection>>,
pub(super) conns: Vec<Arc<Web3Rpc>>,
pub(super) num_checked_conns: usize,
pub(super) includes_backups: bool,
}
impl ConsensusConnections {
impl ConsensusWeb3Rpcs {
pub fn num_conns(&self) -> usize {
self.conns.len()
}
@ -31,7 +31,7 @@ impl ConsensusConnections {
// TODO: sum_hard_limit?
}
impl fmt::Debug for ConsensusConnections {
impl fmt::Debug for ConsensusWeb3Rpcs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// TODO: the default formatter takes forever to write. this is too quiet though
// TODO: print the actual conns?
@ -42,7 +42,7 @@ impl fmt::Debug for ConsensusConnections {
}
}
impl Web3Connections {
impl Web3Rpcs {
pub fn head_block(&self) -> Option<ArcBlock> {
self.watch_consensus_head_receiver
.as_ref()

@ -1,8 +1,8 @@
use crate::frontend::authorization::Authorization;
use super::many::Web3Rpcs;
///! Load balanced communication with a group of web3 providers
use super::connection::Web3Connection;
use super::connections::Web3Connections;
use super::one::Web3Rpc;
use super::request::OpenRequestResult;
use ethers::prelude::{ProviderError, Transaction, TxHash};
use log::{debug, trace, Level};
@ -17,11 +17,11 @@ pub enum TxStatus {
Orphaned(Transaction),
}
impl Web3Connections {
impl Web3Rpcs {
async fn query_transaction_status(
&self,
authorization: &Arc<Authorization>,
rpc: Arc<Web3Connection>,
rpc: Arc<Web3Rpc>,
pending_tx_id: TxHash,
) -> Result<Option<TxStatus>, ProviderError> {
// TODO: there is a race here on geth. sometimes the rpc isn't yet ready to serve the transaction (even though they told us about it!)
@ -66,7 +66,7 @@ impl Web3Connections {
pub(super) async fn process_incoming_tx_id(
self: Arc<Self>,
authorization: Arc<Authorization>,
rpc: Arc<Web3Connection>,
rpc: Arc<Web3Rpc>,
pending_tx_id: TxHash,
pending_tx_sender: broadcast::Sender<TxStatus>,
) -> anyhow::Result<()> {