cargo upgrade and shorten variable names

also begin adding a latency tracker for rpc stats
This commit is contained in:
Bryan Stitt 2023-02-06 09:55:27 -08:00
parent 266c410f15
commit 0f280ce483
24 changed files with 304 additions and 327 deletions

137
Cargo.lock generated
View File

@ -98,9 +98,9 @@ dependencies = [
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.68" version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
dependencies = [ dependencies = [
"backtrace", "backtrace",
] ]
@ -230,6 +230,15 @@ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "atomic-polyfill"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ff7eb3f316534d83a8a2c3d1674ace8a5a71198eba31e2e2b597833f699b28"
dependencies = [
"critical-section",
]
[[package]] [[package]]
name = "atty" name = "atty"
version = "0.2.14" version = "0.2.14"
@ -310,12 +319,13 @@ dependencies = [
[[package]] [[package]]
name = "axum-client-ip" name = "axum-client-ip"
version = "0.3.1" version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddfb5a3ddd6367075d50629546fb46710584016ae7704cd03b6d41cb5be82e5a" checksum = "0d719fabd6813392bbc10e1fe67f2977fad52791a836e51236f7e02f2482e017"
dependencies = [ dependencies = [
"axum", "axum",
"forwarded-header-value", "forwarded-header-value",
"serde",
] ]
[[package]] [[package]]
@ -1009,6 +1019,12 @@ dependencies = [
"cfg-if", "cfg-if",
] ]
[[package]]
name = "critical-section"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52"
[[package]] [[package]]
name = "crossbeam-channel" name = "crossbeam-channel"
version = "0.5.6" version = "0.5.6"
@ -1216,7 +1232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d"
dependencies = [ dependencies = [
"serde", "serde",
"uuid 1.2.2", "uuid 1.3.0",
] ]
[[package]] [[package]]
@ -1333,12 +1349,6 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "doc-comment"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
[[package]] [[package]]
name = "dotenv" name = "dotenv"
version = "0.15.0" version = "0.15.0"
@ -1433,7 +1443,7 @@ dependencies = [
"sea-orm", "sea-orm",
"serde", "serde",
"ulid", "ulid",
"uuid 1.2.2", "uuid 1.3.0",
] ]
[[package]] [[package]]
@ -1944,12 +1954,6 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "ftoa"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca45aac12b6c561b6289bc68957cb1db3dccf870e1951d590202de5e24f1dd35"
[[package]] [[package]]
name = "funty" name = "funty"
version = "2.0.0" version = "2.0.0"
@ -2195,6 +2199,15 @@ dependencies = [
"thiserror", "thiserror",
] ]
[[package]]
name = "hash32"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67"
dependencies = [
"byteorder",
]
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.11.2" version = "0.11.2"
@ -2280,6 +2293,19 @@ dependencies = [
"http", "http",
] ]
[[package]]
name = "heapless"
version = "0.7.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743"
dependencies = [
"atomic-polyfill",
"hash32",
"rustc_version",
"spin 0.9.4",
"stable_deref_trait",
]
[[package]] [[package]]
name = "heck" name = "heck"
version = "0.3.3" version = "0.3.3"
@ -2520,7 +2546,6 @@ checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
dependencies = [ dependencies = [
"autocfg", "autocfg",
"hashbrown 0.12.3", "hashbrown 0.12.3",
"serde",
] ]
[[package]] [[package]]
@ -2890,7 +2915,7 @@ dependencies = [
"tagptr", "tagptr",
"thiserror", "thiserror",
"triomphe", "triomphe",
"uuid 1.2.2", "uuid 1.3.0",
] ]
[[package]] [[package]]
@ -4174,7 +4199,7 @@ dependencies = [
"time 0.3.17", "time 0.3.17",
"tracing", "tracing",
"url", "url",
"uuid 1.2.2", "uuid 1.3.0",
] ]
[[package]] [[package]]
@ -4233,7 +4258,7 @@ dependencies = [
"sea-query-derive", "sea-query-derive",
"serde_json", "serde_json",
"time 0.3.17", "time 0.3.17",
"uuid 1.2.2", "uuid 1.3.0",
] ]
[[package]] [[package]]
@ -4248,7 +4273,7 @@ dependencies = [
"serde_json", "serde_json",
"sqlx", "sqlx",
"time 0.3.17", "time 0.3.17",
"uuid 1.2.2", "uuid 1.3.0",
] ]
[[package]] [[package]]
@ -4448,7 +4473,7 @@ dependencies = [
"thiserror", "thiserror",
"time 0.3.17", "time 0.3.17",
"url", "url",
"uuid 1.2.2", "uuid 1.3.0",
] ]
[[package]] [[package]]
@ -4483,9 +4508,9 @@ dependencies = [
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.91" version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a"
dependencies = [ dependencies = [
"itoa 1.0.5", "itoa 1.0.5",
"ryu", "ryu",
@ -4502,18 +4527,25 @@ dependencies = [
] ]
[[package]] [[package]]
name = "serde_prometheus" name = "serde_plain"
version = "0.1.6" version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25fcd6131bac47a32328d1ba1ee15a27f8d91ab2e5920dba71dbe93d2648f6b1" checksum = "d6018081315db179d0ce57b1fe4b62a12a0028c9cf9bbef868c9cf477b3c34ae"
dependencies = [ dependencies = [
"ftoa",
"indexmap",
"itoa 0.4.8",
"lazy_static",
"regex",
"serde", "serde",
"snafu", ]
[[package]]
name = "serde_prometheus"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb6048d9e4ebc41f7d1a42c79b04c5b460633be307620a0e34a8f81970ea47"
dependencies = [
"heapless",
"nom",
"serde",
"serde_plain",
"thiserror",
] ]
[[package]] [[package]]
@ -4685,27 +4717,6 @@ version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
[[package]]
name = "snafu"
version = "0.6.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7"
dependencies = [
"doc-comment",
"snafu-derive",
]
[[package]]
name = "snafu-derive"
version = "0.6.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "socket2" name = "socket2"
version = "0.4.7" version = "0.4.7"
@ -4836,7 +4847,7 @@ dependencies = [
"time 0.3.17", "time 0.3.17",
"tokio-stream", "tokio-stream",
"url", "url",
"uuid 1.2.2", "uuid 1.3.0",
"webpki-roots", "webpki-roots",
] ]
@ -4871,6 +4882,12 @@ dependencies = [
"tokio-rustls", "tokio-rustls",
] ]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]] [[package]]
name = "static_assertions" name = "static_assertions"
version = "1.1.0" version = "1.1.0"
@ -5631,9 +5648,9 @@ dependencies = [
[[package]] [[package]]
name = "uuid" name = "uuid"
version = "1.2.2" version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79"
dependencies = [ dependencies = [
"getrandom", "getrandom",
"serde", "serde",
@ -5837,7 +5854,7 @@ dependencies = [
"tower-http", "tower-http",
"ulid", "ulid",
"url", "url",
"uuid 1.2.2", "uuid 1.3.0",
] ]
[[package]] [[package]]

14
TODO.md
View File

@ -243,8 +243,8 @@ These are roughly in order of completition
- [x] cache the status page for a second - [x] cache the status page for a second
- [x] request accounting for websockets - [x] request accounting for websockets
- [x] database merge scripts - [x] database merge scripts
- [x] test that sets up a Web3Connection and asks "has_block" for old and new blocks - [x] test that sets up a Web3Rpc and asks "has_block" for old and new blocks
- [x] test that sets up Web3Connections with 2 nodes. one behind by several blocks. and see what the "next" server shows as - [x] test that sets up Web3Rpcs with 2 nodes. one behind by several blocks. and see what the "next" server shows as
- [x] ethspam on bsc and polygon gives 1/4 errors. fix whatever is causing this - [x] ethspam on bsc and polygon gives 1/4 errors. fix whatever is causing this
- bugfix! we were using the whole connection list instead of just the synced connection list when picking servers. oops! - bugfix! we were using the whole connection list instead of just the synced connection list when picking servers. oops!
- [x] actually block unauthenticated requests instead of emitting warning of "allowing without auth during development!" - [x] actually block unauthenticated requests instead of emitting warning of "allowing without auth during development!"
@ -289,7 +289,7 @@ These are not yet ordered. There might be duplicates. We might not actually need
- we were caching too aggressively - we were caching too aggressively
- [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message - [x] BUG! if sending transactions gets "INTERNAL_ERROR: existing tx with same hash", create a success message
- we just want to be sure that the server has our tx and in this case, it does. - we just want to be sure that the server has our tx and in this case, it does.
- ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Connections { conns: {"local_erigon_alpha_archive_ws": Web3Connection { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Connection { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Connection { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None - ERROR http_request:request:try_send_all_upstream_servers: web3_proxy::rpcs::request: bad response! err=JsonRpcClientError(JsonRpcError(JsonRpcError { code: -32000, message: "INTERNAL_ERROR: existing tx with same hash", data: None })) method=eth_sendRawTransaction rpc=local_erigon_alpha_archive id=01GF4HV03Y4ZNKQV8DW5NDQ5CG method=POST authorized_request=User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 }) self=Web3Rpcs { conns: {"local_erigon_alpha_archive_ws": Web3Rpc { name: "local_erigon_alpha_archive_ws", blocks: "all", .. }, "local_geth_ws": Web3Rpc { name: "local_geth_ws", blocks: 64, .. }, "local_erigon_alpha_archive": Web3Rpc { name: "local_erigon_alpha_archive", blocks: "all", .. }}, .. } authorized_request=Some(User(Some(SqlxMySqlPoolConnection), AuthorizedKey { ip: 10.11.12.15, origin: None, user_key_id: 4, log_revert_chance: 0.0000 })) request=JsonRpcRequest { id: RawValue(39), method: "eth_sendRawTransaction", .. } request_metadata=Some(RequestMetadata { datetime: 2022-10-11T22:14:57.406829095Z, period_seconds: 60, request_bytes: 633, backend_requests: 0, no_servers: 0, error_response: false, response_bytes: 0, response_millis: 0 }) block_needed=None
- [x] serde collect unknown fields in config instead of crash - [x] serde collect unknown fields in config instead of crash
- [x] upgrade user tier by address - [x] upgrade user tier by address
- [x] all_backend_connections skips syncing servers - [x] all_backend_connections skips syncing servers
@ -556,10 +556,10 @@ in another repo: event subscriber
- [ ] weird flapping fork could have more useful logs. like, howd we get to 1/1/4 and fork. geth changed its mind 3 times? - [ ] weird flapping fork could have more useful logs. like, howd we get to 1/1/4 and fork. geth changed its mind 3 times?
- should we change our code to follow the same consensus rules as geth? our first seen still seems like a reasonable choice - should we change our code to follow the same consensus rules as geth? our first seen still seems like a reasonable choice
- other chains might change all sorts of things about their fork choice rules - other chains might change all sorts of things about their fork choice rules
2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517 2022-07-22T23:52:18.593956Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0xa906…5bc1 rpc=Web3Rpc { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517 2022-07-22T23:52:18.983441Z WARN block_receiver: web3_proxy::connections: chain is forked! 1 possible heads. 1/1/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "ws://127.0.0.1:8546", data: 64, .. } new_block_num=15195517
2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517 2022-07-22T23:52:19.350720Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 1/2/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "ws://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Connection { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517 2022-07-22T23:52:26.041140Z WARN block_receiver: web3_proxy::connections: chain is forked! 2 possible heads. 2/4/4 rpcs have 0x70e8…48e0 rpc=Web3Rpc { url: "http://127.0.0.1:8549", data: "archive", .. } new_block_num=15195517
- [ ] threshold should check actual available request limits (if any) instead of just the soft limit - [ ] threshold should check actual available request limits (if any) instead of just the soft limit
- [ ] foreign key on_update and on_delete - [ ] foreign key on_update and on_delete
- [ ] database creation timestamps - [ ] database creation timestamps

View File

@ -7,7 +7,7 @@ edition = "2021"
[dependencies] [dependencies]
redis-rate-limiter = { path = "../redis-rate-limiter" } redis-rate-limiter = { path = "../redis-rate-limiter" }
anyhow = "1.0.68" anyhow = "1.0.69"
hashbrown = "0.13.2" hashbrown = "0.13.2"
log = "0.4.17" log = "0.4.17"
moka = { version = "0.9.6", default-features = false, features = ["future"] } moka = { version = "0.9.6", default-features = false, features = ["future"] }

View File

@ -12,6 +12,6 @@ path = "src/mod.rs"
[dependencies] [dependencies]
sea-orm = "0.10.7" sea-orm = "0.10.7"
serde = "1.0.152" serde = "1.0.152"
uuid = "1.2.2" uuid = "1.3.0"
ethers = "1.0.2" ethers = "1.0.2"
ulid = "1.0.0" ulid = "1.0.0"

View File

@ -5,6 +5,6 @@ authors = ["Bryan Stitt <bryan@stitthappens.com>"]
edition = "2021" edition = "2021"
[dependencies] [dependencies]
anyhow = "1.0.68" anyhow = "1.0.69"
deadpool-redis = { version = "0.11.1", features = ["rt_tokio_1", "serde"] } deadpool-redis = { version = "0.11.1", features = ["rt_tokio_1", "serde"] }
tokio = "1.25.0" tokio = "1.25.0"

View File

@ -25,10 +25,10 @@ thread-fast-rng = { path = "../thread-fast-rng" }
# TODO: import chrono from sea-orm so we always have the same version # TODO: import chrono from sea-orm so we always have the same version
# TODO: make sure this time version matches siwe. PR to put this in their prelude # TODO: make sure this time version matches siwe. PR to put this in their prelude
anyhow = { version = "1.0.68", features = ["backtrace"] } anyhow = { version = "1.0.69", features = ["backtrace"] }
argh = "0.1.10" argh = "0.1.10"
axum = { version = "0.6.4", features = ["headers", "ws"] } axum = { version = "0.6.4", features = ["headers", "ws"] }
axum-client-ip = "0.3.1" axum-client-ip = "0.4.0"
axum-macros = "0.3.2" axum-macros = "0.3.2"
chrono = "0.4.23" chrono = "0.4.23"
counter = "0.5.7" counter = "0.5.7"
@ -61,8 +61,8 @@ reqwest = { version = "0.11.14", default-features = false, features = ["json", "
rustc-hash = "1.1.0" rustc-hash = "1.1.0"
sentry = { version = "0.29.2", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] } sentry = { version = "0.29.2", default-features = false, features = ["backtrace", "contexts", "panic", "anyhow", "reqwest", "rustls", "log", "sentry-log"] }
serde = { version = "1.0.152", features = [] } serde = { version = "1.0.152", features = [] }
serde_json = { version = "1.0.91", default-features = false, features = ["alloc", "raw_value"] } serde_json = { version = "1.0.92", default-features = false, features = ["alloc", "raw_value"] }
serde_prometheus = "0.1.6" serde_prometheus = "0.2.0"
siwe = "0.5.0" siwe = "0.5.0"
time = "0.3.17" time = "0.3.17"
tokio = { version = "1.25.0", features = ["full"] } tokio = { version = "1.25.0", features = ["full"] }
@ -72,4 +72,4 @@ tower = "0.4.13"
tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] } tower-http = { version = "0.3.5", features = ["cors", "sensitive-headers"] }
ulid = { version = "1.0.0", features = ["serde"] } ulid = { version = "1.0.0", features = ["serde"] }
url = "2.3.1" url = "2.3.1"
uuid = "1.2.2" uuid = "1.3.0"

View File

@ -11,8 +11,8 @@ use crate::jsonrpc::{
JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum, JsonRpcForwardedResponse, JsonRpcForwardedResponseEnum, JsonRpcRequest, JsonRpcRequestEnum,
}; };
use crate::rpcs::blockchain::{ArcBlock, SavedBlock}; use crate::rpcs::blockchain::{ArcBlock, SavedBlock};
use crate::rpcs::connection::Web3Connection; use crate::rpcs::many::Web3Rpcs;
use crate::rpcs::connections::Web3Connections; use crate::rpcs::one::Web3Rpc;
use crate::rpcs::transactions::TxStatus; use crate::rpcs::transactions::TxStatus;
use crate::user_token::UserBearerToken; use crate::user_token::UserBearerToken;
use anyhow::Context; use anyhow::Context;
@ -198,9 +198,9 @@ impl DatabaseReplica {
// TODO: i'm sure this is more arcs than necessary, but spawning futures makes references hard // TODO: i'm sure this is more arcs than necessary, but spawning futures makes references hard
pub struct Web3ProxyApp { pub struct Web3ProxyApp {
/// Send requests to the best server available /// Send requests to the best server available
pub balanced_rpcs: Arc<Web3Connections>, pub balanced_rpcs: Arc<Web3Rpcs>,
/// Send private requests (like eth_sendRawTransaction) to all these servers /// Send private requests (like eth_sendRawTransaction) to all these servers
pub private_rpcs: Option<Arc<Web3Connections>>, pub private_rpcs: Option<Arc<Web3Rpcs>>,
response_cache: ResponseCache, response_cache: ResponseCache,
// don't drop this or the sender will stop working // don't drop this or the sender will stop working
// TODO: broadcast channel instead? // TODO: broadcast channel instead?
@ -572,7 +572,7 @@ impl Web3ProxyApp {
.build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default()); .build_with_hasher(hashbrown::hash_map::DefaultHashBuilder::default());
// connect to the load balanced rpcs // connect to the load balanced rpcs
let (balanced_rpcs, balanced_handle) = Web3Connections::spawn( let (balanced_rpcs, balanced_handle) = Web3Rpcs::spawn(
top_config.app.chain_id, top_config.app.chain_id,
db_conn.clone(), db_conn.clone(),
balanced_rpcs, balanced_rpcs,
@ -598,7 +598,7 @@ impl Web3ProxyApp {
warn!("No private relays configured. Any transactions will be broadcast to the public mempool!"); warn!("No private relays configured. Any transactions will be broadcast to the public mempool!");
None None
} else { } else {
let (private_rpcs, private_handle) = Web3Connections::spawn( let (private_rpcs, private_handle) = Web3Rpcs::spawn(
top_config.app.chain_id, top_config.app.chain_id,
db_conn.clone(), db_conn.clone(),
private_rpcs, private_rpcs,
@ -911,6 +911,7 @@ impl Web3ProxyApp {
user_count, user_count,
}; };
// TODO: i don't like this library. it doesn't include HELP or TYPE lines and so our prometheus server fails to parse it
serde_prometheus::to_string(&metrics, Some("web3_proxy"), globals) serde_prometheus::to_string(&metrics, Some("web3_proxy"), globals)
.expect("prometheus metrics should always serialize") .expect("prometheus metrics should always serialize")
} }
@ -921,8 +922,7 @@ impl Web3ProxyApp {
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
request: JsonRpcRequestEnum, request: JsonRpcRequestEnum,
proxy_mode: ProxyMode, proxy_mode: ProxyMode,
) -> Result<(JsonRpcForwardedResponseEnum, Vec<Arc<Web3Connection>>), FrontendErrorResponse> ) -> Result<(JsonRpcForwardedResponseEnum, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
{
// trace!(?request, "proxy_web3_rpc"); // trace!(?request, "proxy_web3_rpc");
// even though we have timeouts on the requests to our backend providers, // even though we have timeouts on the requests to our backend providers,
@ -961,8 +961,7 @@ impl Web3ProxyApp {
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
requests: Vec<JsonRpcRequest>, requests: Vec<JsonRpcRequest>,
proxy_mode: ProxyMode, proxy_mode: ProxyMode,
) -> Result<(Vec<JsonRpcForwardedResponse>, Vec<Arc<Web3Connection>>), FrontendErrorResponse> ) -> Result<(Vec<JsonRpcForwardedResponse>, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
{
// TODO: we should probably change ethers-rs to support this directly. they pushed this off to v2 though // TODO: we should probably change ethers-rs to support this directly. they pushed this off to v2 though
let num_requests = requests.len(); let num_requests = requests.len();
@ -979,7 +978,7 @@ impl Web3ProxyApp {
// TODO: i'm sure this could be done better with iterators // TODO: i'm sure this could be done better with iterators
// TODO: stream the response? // TODO: stream the response?
let mut collected: Vec<JsonRpcForwardedResponse> = Vec::with_capacity(num_requests); let mut collected: Vec<JsonRpcForwardedResponse> = Vec::with_capacity(num_requests);
let mut collected_rpcs: HashSet<Arc<Web3Connection>> = HashSet::new(); let mut collected_rpcs: HashSet<Arc<Web3Rpc>> = HashSet::new();
for response in responses { for response in responses {
// TODO: any way to attach the tried rpcs to the error? it is likely helpful // TODO: any way to attach the tried rpcs to the error? it is likely helpful
let (response, rpcs) = response?; let (response, rpcs) = response?;
@ -1020,7 +1019,7 @@ impl Web3ProxyApp {
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
mut request: JsonRpcRequest, mut request: JsonRpcRequest,
proxy_mode: ProxyMode, proxy_mode: ProxyMode,
) -> Result<(JsonRpcForwardedResponse, Vec<Arc<Web3Connection>>), FrontendErrorResponse> { ) -> Result<(JsonRpcForwardedResponse, Vec<Arc<Web3Rpc>>), FrontendErrorResponse> {
// trace!("Received request: {:?}", request); // trace!("Received request: {:?}", request);
let request_metadata = Arc::new(RequestMetadata::new(REQUEST_PERIOD, request.num_bytes())?); let request_metadata = Arc::new(RequestMetadata::new(REQUEST_PERIOD, request.num_bytes())?);
@ -1208,7 +1207,7 @@ impl Web3ProxyApp {
ProxyMode::Fastest(0) => None, ProxyMode::Fastest(0) => None,
// TODO: how many balanced rpcs should we send to? configurable? percentage of total? // TODO: how many balanced rpcs should we send to? configurable? percentage of total?
// TODO: what if we do 2 per tier? we want to blast the third party rpcs // TODO: what if we do 2 per tier? we want to blast the third party rpcs
// TODO: maybe having the third party rpcs in their own Web3Connections would be good for this // TODO: maybe having the third party rpcs in their own Web3Rpcs would be good for this
ProxyMode::Fastest(x) => Some(x * 4), ProxyMode::Fastest(x) => Some(x * 4),
ProxyMode::Versus => None, ProxyMode::Versus => None,
}; };

View File

@ -155,7 +155,7 @@ mod tests {
use std::env; use std::env;
use web3_proxy::{ use web3_proxy::{
config::{AppConfig, Web3ConnectionConfig}, config::{AppConfig, Web3RpcConfig},
rpcs::blockchain::ArcBlock, rpcs::blockchain::ArcBlock,
}; };
@ -204,7 +204,7 @@ mod tests {
balanced_rpcs: HashMap::from([ balanced_rpcs: HashMap::from([
( (
"anvil".to_string(), "anvil".to_string(),
Web3ConnectionConfig { Web3RpcConfig {
disabled: false, disabled: false,
display_name: None, display_name: None,
url: anvil.endpoint(), url: anvil.endpoint(),
@ -219,7 +219,7 @@ mod tests {
), ),
( (
"anvil_ws".to_string(), "anvil_ws".to_string(),
Web3ConnectionConfig { Web3RpcConfig {
disabled: false, disabled: false,
display_name: None, display_name: None,
url: anvil.ws_endpoint(), url: anvil.ws_endpoint(),

View File

@ -2,7 +2,7 @@ use std::collections::BTreeMap;
// show what nodes are used most often // show what nodes are used most often
use argh::FromArgs; use argh::FromArgs;
use log::info; use log::trace;
use prettytable::{row, Table}; use prettytable::{row, Table};
#[derive(FromArgs, PartialEq, Debug)] #[derive(FromArgs, PartialEq, Debug)]
@ -87,10 +87,8 @@ impl PopularityContestSubCommand {
by_tier.entry(tier).or_default().push(rpc_data); by_tier.entry(tier).or_default().push(rpc_data);
} }
// TODO: sort by_tier trace!("tier_requests: {:#?}", tier_requests);
trace!("by_tier: {:#?}", by_tier);
info!("tier_requests: {:#?}", tier_requests);
info!("by_tier: {:#?}", by_tier);
let mut table = Table::new(); let mut table = Table::new();

View File

@ -8,7 +8,7 @@ use log::warn;
use serde_json::json; use serde_json::json;
use std::sync::Arc; use std::sync::Arc;
use crate::{frontend::authorization::Authorization, rpcs::connections::Web3Connections}; use crate::{frontend::authorization::Authorization, rpcs::many::Web3Rpcs};
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub fn block_num_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bool) { pub fn block_num_to_U64(block_num: BlockNumber, latest_block: U64) -> (U64, bool) {
@ -45,7 +45,7 @@ pub async fn clean_block_number(
params: &mut serde_json::Value, params: &mut serde_json::Value,
block_param_id: usize, block_param_id: usize,
latest_block: U64, latest_block: U64,
rpcs: &Web3Connections, rpcs: &Web3Rpcs,
) -> anyhow::Result<U64> { ) -> anyhow::Result<U64> {
match params.as_array_mut() { match params.as_array_mut() {
None => { None => {
@ -130,7 +130,7 @@ pub async fn block_needed(
method: &str, method: &str,
params: Option<&mut serde_json::Value>, params: Option<&mut serde_json::Value>,
head_block_num: U64, head_block_num: U64,
rpcs: &Web3Connections, rpcs: &Web3Rpcs,
) -> anyhow::Result<BlockNeeded> { ) -> anyhow::Result<BlockNeeded> {
let params = if let Some(params) = params { let params = if let Some(params) = params {
// grab the params so we can inspect and potentially modify them // grab the params so we can inspect and potentially modify them

View File

@ -1,5 +1,5 @@
use crate::rpcs::blockchain::BlockHashesCache; use crate::rpcs::blockchain::BlockHashesCache;
use crate::rpcs::connection::Web3Connection; use crate::rpcs::one::Web3Rpc;
use crate::{app::AnyhowJoinHandle, rpcs::blockchain::ArcBlock}; use crate::{app::AnyhowJoinHandle, rpcs::blockchain::ArcBlock};
use argh::FromArgs; use argh::FromArgs;
use ethers::prelude::TxHash; use ethers::prelude::TxHash;
@ -11,8 +11,8 @@ use serde::Deserialize;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
pub type BlockAndRpc = (Option<ArcBlock>, Arc<Web3Connection>); pub type BlockAndRpc = (Option<ArcBlock>, Arc<Web3Rpc>);
pub type TxHashAndRpc = (TxHash, Arc<Web3Connection>); pub type TxHashAndRpc = (TxHash, Arc<Web3Rpc>);
#[derive(Debug, FromArgs)] #[derive(Debug, FromArgs)]
/// Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or similar) JsonRPC servers. /// Web3_proxy is a fast caching and load balancing proxy for web3 (Ethereum or similar) JsonRPC servers.
@ -41,15 +41,15 @@ pub struct CliConfig {
#[derive(Clone, Debug, Deserialize)] #[derive(Clone, Debug, Deserialize)]
pub struct TopConfig { pub struct TopConfig {
pub app: AppConfig, pub app: AppConfig,
pub balanced_rpcs: HashMap<String, Web3ConnectionConfig>, pub balanced_rpcs: HashMap<String, Web3RpcConfig>,
// TODO: instead of an option, give it a default // TODO: instead of an option, give it a default
pub private_rpcs: Option<HashMap<String, Web3ConnectionConfig>>, pub private_rpcs: Option<HashMap<String, Web3RpcConfig>>,
/// unknown config options get put here /// unknown config options get put here
#[serde(flatten, default = "HashMap::default")] #[serde(flatten, default = "HashMap::default")]
pub extra: HashMap<String, serde_json::Value>, pub extra: HashMap<String, serde_json::Value>,
} }
/// shared configuration between Web3Connections /// shared configuration between Web3Rpcs
// TODO: no String, only &str // TODO: no String, only &str
#[derive(Clone, Debug, Default, Deserialize)] #[derive(Clone, Debug, Default, Deserialize)]
pub struct AppConfig { pub struct AppConfig {
@ -198,7 +198,7 @@ fn default_response_cache_max_bytes() -> usize {
/// Configuration for a backend web3 RPC server /// Configuration for a backend web3 RPC server
#[derive(Clone, Debug, Deserialize)] #[derive(Clone, Debug, Deserialize)]
pub struct Web3ConnectionConfig { pub struct Web3RpcConfig {
/// simple way to disable a connection without deleting the row /// simple way to disable a connection without deleting the row
#[serde(default)] #[serde(default)]
pub disabled: bool, pub disabled: bool,
@ -230,9 +230,9 @@ fn default_tier() -> u64 {
0 0
} }
impl Web3ConnectionConfig { impl Web3RpcConfig {
/// Create a Web3Connection from config /// Create a Web3Rpc from config
/// TODO: move this into Web3Connection? (just need to make things pub(crate)) /// TODO: move this into Web3Rpc? (just need to make things pub(crate))
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub async fn spawn( pub async fn spawn(
self, self,
@ -245,12 +245,9 @@ impl Web3ConnectionConfig {
block_map: BlockHashesCache, block_map: BlockHashesCache,
block_sender: Option<flume::Sender<BlockAndRpc>>, block_sender: Option<flume::Sender<BlockAndRpc>>,
tx_id_sender: Option<flume::Sender<TxHashAndRpc>>, tx_id_sender: Option<flume::Sender<TxHashAndRpc>>,
) -> anyhow::Result<(Arc<Web3Connection>, AnyhowJoinHandle<()>)> { ) -> anyhow::Result<(Arc<Web3Rpc>, AnyhowJoinHandle<()>)> {
if !self.extra.is_empty() { if !self.extra.is_empty() {
warn!( warn!("unknown Web3RpcConfig fields!: {:?}", self.extra.keys());
"unknown Web3ConnectionConfig fields!: {:?}",
self.extra.keys()
);
} }
let hard_limit = match (self.hard_limit, redis_pool) { let hard_limit = match (self.hard_limit, redis_pool) {
@ -272,7 +269,7 @@ impl Web3ConnectionConfig {
let backup = self.backup.unwrap_or(false); let backup = self.backup.unwrap_or(false);
Web3Connection::spawn( Web3Rpc::spawn(
name, name,
self.display_name, self.display_name,
chain_id, chain_id,

View File

@ -2,7 +2,7 @@
use super::errors::FrontendErrorResponse; use super::errors::FrontendErrorResponse;
use crate::app::{AuthorizationChecks, Web3ProxyApp, APP_USER_AGENT}; use crate::app::{AuthorizationChecks, Web3ProxyApp, APP_USER_AGENT};
use crate::rpcs::connection::Web3Connection; use crate::rpcs::one::Web3Rpc;
use crate::user_token::UserBearerToken; use crate::user_token::UserBearerToken;
use anyhow::Context; use anyhow::Context;
use axum::headers::authorization::Bearer; use axum::headers::authorization::Bearer;
@ -80,7 +80,7 @@ pub struct RequestMetadata {
// TODO: "archive" isn't really a boolean. // TODO: "archive" isn't really a boolean.
pub archive_request: AtomicBool, pub archive_request: AtomicBool,
/// if this is empty, there was a cache_hit /// if this is empty, there was a cache_hit
pub backend_requests: Mutex<Vec<Arc<Web3Connection>>>, pub backend_requests: Mutex<Vec<Arc<Web3Rpc>>>,
pub no_servers: AtomicU64, pub no_servers: AtomicU64,
pub error_response: AtomicBool, pub error_response: AtomicBool,
pub response_bytes: AtomicU64, pub response_bytes: AtomicU64,

View File

@ -1,4 +1,6 @@
//! `frontend` contains HTTP and websocket endpoints for use by users and admins. //! `frontend` contains HTTP and websocket endpoints for use by users and admins.
//!
//! Important reading about axum extractors: https://docs.rs/axum/latest/axum/extract/index.html#the-order-of-extractors
pub mod authorization; pub mod authorization;
pub mod errors; pub mod errors;
@ -196,7 +198,6 @@ pub async fn serve(port: u16, proxy_app: Arc<Web3ProxyApp>) -> anyhow::Result<()
- axum::extract::ConnectInfo (if not behind proxy) - axum::extract::ConnectInfo (if not behind proxy)
*/ */
let service = app.into_make_service_with_connect_info::<SocketAddr>(); let service = app.into_make_service_with_connect_info::<SocketAddr>();
// let service = app.into_make_service();
// `axum::Server` is a re-export of `hyper::Server` // `axum::Server` is a re-export of `hyper::Server`
axum::Server::bind(&addr) axum::Server::bind(&addr)

View File

@ -8,7 +8,7 @@ use axum::extract::Path;
use axum::headers::{Origin, Referer, UserAgent}; use axum::headers::{Origin, Referer, UserAgent};
use axum::TypedHeader; use axum::TypedHeader;
use axum::{response::IntoResponse, Extension, Json}; use axum::{response::IntoResponse, Extension, Json};
use axum_client_ip::ClientIp; use axum_client_ip::InsecureClientIp;
use axum_macros::debug_handler; use axum_macros::debug_handler;
use itertools::Itertools; use itertools::Itertools;
use std::sync::Arc; use std::sync::Arc;
@ -19,7 +19,7 @@ use std::sync::Arc;
#[debug_handler] #[debug_handler]
pub async fn proxy_web3_rpc( pub async fn proxy_web3_rpc(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
Json(payload): Json<JsonRpcRequestEnum>, Json(payload): Json<JsonRpcRequestEnum>,
) -> FrontendResult { ) -> FrontendResult {
@ -29,7 +29,7 @@ pub async fn proxy_web3_rpc(
#[debug_handler] #[debug_handler]
pub async fn fastest_proxy_web3_rpc( pub async fn fastest_proxy_web3_rpc(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
Json(payload): Json<JsonRpcRequestEnum>, Json(payload): Json<JsonRpcRequestEnum>,
) -> FrontendResult { ) -> FrontendResult {
@ -41,7 +41,7 @@ pub async fn fastest_proxy_web3_rpc(
#[debug_handler] #[debug_handler]
pub async fn versus_proxy_web3_rpc( pub async fn versus_proxy_web3_rpc(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
Json(payload): Json<JsonRpcRequestEnum>, Json(payload): Json<JsonRpcRequestEnum>,
) -> FrontendResult { ) -> FrontendResult {
@ -50,7 +50,7 @@ pub async fn versus_proxy_web3_rpc(
async fn _proxy_web3_rpc( async fn _proxy_web3_rpc(
app: Arc<Web3ProxyApp>, app: Arc<Web3ProxyApp>,
ClientIp(ip): ClientIp, InsecureClientIp(ip): InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
payload: JsonRpcRequestEnum, payload: JsonRpcRequestEnum,
proxy_mode: ProxyMode, proxy_mode: ProxyMode,
@ -91,7 +91,7 @@ async fn _proxy_web3_rpc(
#[debug_handler] #[debug_handler]
pub async fn proxy_web3_rpc_with_key( pub async fn proxy_web3_rpc_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>, referer: Option<TypedHeader<Referer>>,
user_agent: Option<TypedHeader<UserAgent>>, user_agent: Option<TypedHeader<UserAgent>>,
@ -114,7 +114,7 @@ pub async fn proxy_web3_rpc_with_key(
#[debug_handler] #[debug_handler]
pub async fn fastest_proxy_web3_rpc_with_key( pub async fn fastest_proxy_web3_rpc_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>, referer: Option<TypedHeader<Referer>>,
user_agent: Option<TypedHeader<UserAgent>>, user_agent: Option<TypedHeader<UserAgent>>,
@ -137,7 +137,7 @@ pub async fn fastest_proxy_web3_rpc_with_key(
#[debug_handler] #[debug_handler]
pub async fn versus_proxy_web3_rpc_with_key( pub async fn versus_proxy_web3_rpc_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>, referer: Option<TypedHeader<Referer>>,
user_agent: Option<TypedHeader<UserAgent>>, user_agent: Option<TypedHeader<UserAgent>>,
@ -160,7 +160,7 @@ pub async fn versus_proxy_web3_rpc_with_key(
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn _proxy_web3_rpc_with_key( async fn _proxy_web3_rpc_with_key(
app: Arc<Web3ProxyApp>, app: Arc<Web3ProxyApp>,
ClientIp(ip): ClientIp, InsecureClientIp(ip): InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>, referer: Option<TypedHeader<Referer>>,
user_agent: Option<TypedHeader<UserAgent>>, user_agent: Option<TypedHeader<UserAgent>>,

View File

@ -17,7 +17,7 @@ use axum::{
response::{IntoResponse, Redirect}, response::{IntoResponse, Redirect},
Extension, TypedHeader, Extension, TypedHeader,
}; };
use axum_client_ip::ClientIp; use axum_client_ip::InsecureClientIp;
use axum_macros::debug_handler; use axum_macros::debug_handler;
use futures::SinkExt; use futures::SinkExt;
use futures::{ use futures::{
@ -49,7 +49,7 @@ pub enum ProxyMode {
#[debug_handler] #[debug_handler]
pub async fn websocket_handler( pub async fn websocket_handler(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
ws_upgrade: Option<WebSocketUpgrade>, ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult { ) -> FrontendResult {
@ -61,7 +61,7 @@ pub async fn websocket_handler(
#[debug_handler] #[debug_handler]
pub async fn fastest_websocket_handler( pub async fn fastest_websocket_handler(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
ws_upgrade: Option<WebSocketUpgrade>, ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult { ) -> FrontendResult {
@ -75,7 +75,7 @@ pub async fn fastest_websocket_handler(
#[debug_handler] #[debug_handler]
pub async fn versus_websocket_handler( pub async fn versus_websocket_handler(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
ws_upgrade: Option<WebSocketUpgrade>, ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult { ) -> FrontendResult {
@ -86,7 +86,7 @@ pub async fn versus_websocket_handler(
async fn _websocket_handler( async fn _websocket_handler(
proxy_mode: ProxyMode, proxy_mode: ProxyMode,
app: Arc<Web3ProxyApp>, app: Arc<Web3ProxyApp>,
ClientIp(ip): ClientIp, InsecureClientIp(ip): InsecureClientIp,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
ws_upgrade: Option<WebSocketUpgrade>, ws_upgrade: Option<WebSocketUpgrade>,
) -> FrontendResult { ) -> FrontendResult {
@ -121,7 +121,7 @@ async fn _websocket_handler(
#[debug_handler] #[debug_handler]
pub async fn websocket_handler_with_key( pub async fn websocket_handler_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
Path(rpc_key): Path<String>, Path(rpc_key): Path<String>,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>, referer: Option<TypedHeader<Referer>>,
@ -144,7 +144,7 @@ pub async fn websocket_handler_with_key(
#[debug_handler] #[debug_handler]
pub async fn fastest_websocket_handler_with_key( pub async fn fastest_websocket_handler_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
Path(rpc_key): Path<String>, Path(rpc_key): Path<String>,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>, referer: Option<TypedHeader<Referer>>,
@ -168,7 +168,7 @@ pub async fn fastest_websocket_handler_with_key(
#[debug_handler] #[debug_handler]
pub async fn versus_websocket_handler_with_key( pub async fn versus_websocket_handler_with_key(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ip: ClientIp, ip: InsecureClientIp,
Path(rpc_key): Path<String>, Path(rpc_key): Path<String>,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>, referer: Option<TypedHeader<Referer>>,
@ -192,7 +192,7 @@ pub async fn versus_websocket_handler_with_key(
async fn _websocket_handler_with_key( async fn _websocket_handler_with_key(
proxy_mode: ProxyMode, proxy_mode: ProxyMode,
app: Arc<Web3ProxyApp>, app: Arc<Web3ProxyApp>,
ClientIp(ip): ClientIp, InsecureClientIp(ip): InsecureClientIp,
rpc_key: String, rpc_key: String,
origin: Option<TypedHeader<Origin>>, origin: Option<TypedHeader<Origin>>,
referer: Option<TypedHeader<Referer>>, referer: Option<TypedHeader<Referer>>,

View File

@ -16,7 +16,7 @@ use axum::{
response::IntoResponse, response::IntoResponse,
Extension, Json, TypedHeader, Extension, Json, TypedHeader,
}; };
use axum_client_ip::ClientIp; use axum_client_ip::InsecureClientIp;
use axum_macros::debug_handler; use axum_macros::debug_handler;
use chrono::{TimeZone, Utc}; use chrono::{TimeZone, Utc};
use entities::sea_orm_active_enums::LogLevel; use entities::sea_orm_active_enums::LogLevel;
@ -61,7 +61,7 @@ use ulid::Ulid;
#[debug_handler] #[debug_handler]
pub async fn user_login_get( pub async fn user_login_get(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp, InsecureClientIp(ip): InsecureClientIp,
// TODO: what does axum's error handling look like if the path fails to parse? // TODO: what does axum's error handling look like if the path fails to parse?
Path(mut params): Path<HashMap<String, String>>, Path(mut params): Path<HashMap<String, String>>,
) -> FrontendResult { ) -> FrontendResult {
@ -178,7 +178,7 @@ pub struct PostLogin {
#[debug_handler] #[debug_handler]
pub async fn user_login_post( pub async fn user_login_post(
Extension(app): Extension<Arc<Web3ProxyApp>>, Extension(app): Extension<Arc<Web3ProxyApp>>,
ClientIp(ip): ClientIp, InsecureClientIp(ip): InsecureClientIp,
Query(query): Query<PostLoginQuery>, Query(query): Query<PostLoginQuery>,
Json(payload): Json<PostLogin>, Json(payload): Json<PostLogin>,
) -> FrontendResult { ) -> FrontendResult {

View File

@ -23,13 +23,14 @@ pub async fn serve(app: Arc<Web3ProxyApp>, port: u16) -> anyhow::Result<()> {
// TODO: into_make_service is enough if we always run behind a proxy. make into_make_service_with_connect_info optional? // TODO: into_make_service is enough if we always run behind a proxy. make into_make_service_with_connect_info optional?
/* /*
It sequentially looks for an IP in: InsecureClientIp sequentially looks for an IP in:
- x-forwarded-for header (de-facto standard) - x-forwarded-for header (de-facto standard)
- x-real-ip header - x-real-ip header
- forwarded header (new standard) - forwarded header (new standard)
- axum::extract::ConnectInfo (if not behind proxy) - axum::extract::ConnectInfo (if not behind proxy)
So we probably won't need into_make_service_with_connect_info, but it shouldn't hurt Since we run behind haproxy, x-forwarded-for will be set.
We probably won't need into_make_service_with_connect_info, but it shouldn't hurt.
*/ */
let service = app.into_make_service_with_connect_info::<SocketAddr>(); let service = app.into_make_service_with_connect_info::<SocketAddr>();
// let service = app.into_make_service(); // let service = app.into_make_service();

View File

@ -1,10 +1,10 @@
///! Keep track of the blockchain as seen by a Web3Connections. use super::many::Web3Rpcs;
use super::connection::Web3Connection; ///! Keep track of the blockchain as seen by a Web3Rpcs.
use super::connections::Web3Connections; use super::one::Web3Rpc;
use super::transactions::TxStatus; use super::transactions::TxStatus;
use crate::frontend::authorization::Authorization; use crate::frontend::authorization::Authorization;
use crate::{ use crate::{
config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusConnections, config::BlockAndRpc, jsonrpc::JsonRpcRequest, rpcs::synced_connections::ConsensusWeb3Rpcs,
}; };
use anyhow::Context; use anyhow::Context;
use derive_more::From; use derive_more::From;
@ -92,7 +92,7 @@ impl Display for SavedBlock {
} }
} }
impl Web3Connections { impl Web3Rpcs {
/// add a block to our mappings and track the heaviest chain /// add a block to our mappings and track the heaviest chain
pub async fn save_block( pub async fn save_block(
&self, &self,
@ -135,7 +135,7 @@ impl Web3Connections {
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
hash: &H256, hash: &H256,
rpc: Option<&Arc<Web3Connection>>, rpc: Option<&Arc<Web3Rpc>>,
) -> anyhow::Result<ArcBlock> { ) -> anyhow::Result<ArcBlock> {
// first, try to get the hash from our cache // first, try to get the hash from our cache
// the cache is set last, so if its here, its everywhere // the cache is set last, so if its here, its everywhere
@ -322,7 +322,7 @@ impl Web3Connections {
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
consensus_finder: &mut ConsensusFinder, consensus_finder: &mut ConsensusFinder,
rpc_head_block: Option<SavedBlock>, rpc_head_block: Option<SavedBlock>,
rpc: Arc<Web3Connection>, rpc: Arc<Web3Rpc>,
head_block_sender: &watch::Sender<ArcBlock>, head_block_sender: &watch::Sender<ArcBlock>,
pending_tx_sender: &Option<broadcast::Sender<TxStatus>>, pending_tx_sender: &Option<broadcast::Sender<TxStatus>>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
@ -550,11 +550,11 @@ impl ConnectionsGroup {
Self::new(true) Self::new(true)
} }
fn remove(&mut self, rpc: &Web3Connection) -> Option<H256> { fn remove(&mut self, rpc: &Web3Rpc) -> Option<H256> {
self.rpc_name_to_hash.remove(rpc.name.as_str()) self.rpc_name_to_hash.remove(rpc.name.as_str())
} }
fn insert(&mut self, rpc: &Web3Connection, block_hash: H256) -> Option<H256> { fn insert(&mut self, rpc: &Web3Rpc, block_hash: H256) -> Option<H256> {
self.rpc_name_to_hash.insert(rpc.name.clone(), block_hash) self.rpc_name_to_hash.insert(rpc.name.clone(), block_hash)
} }
@ -564,7 +564,7 @@ impl ConnectionsGroup {
rpc_name: &str, rpc_name: &str,
hash: &H256, hash: &H256,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
web3_connections: &Web3Connections, web3_rpcs: &Web3Rpcs,
) -> anyhow::Result<ArcBlock> { ) -> anyhow::Result<ArcBlock> {
// // TODO: why does this happen?!?! seems to only happen with uncled blocks // // TODO: why does this happen?!?! seems to only happen with uncled blocks
// // TODO: maybe we should do try_get_with? // // TODO: maybe we should do try_get_with?
@ -575,16 +575,17 @@ impl ConnectionsGroup {
// ); // );
// this option should almost always be populated. if the connection reconnects at a bad time it might not be available though // this option should almost always be populated. if the connection reconnects at a bad time it might not be available though
let rpc = web3_connections.conns.get(rpc_name); // TODO: if this is None, I think we should error.
let rpc = web3_rpcs.conns.get(rpc_name);
web3_connections.block(authorization, hash, rpc).await web3_rpcs.block(authorization, hash, rpc).await
} }
// TODO: do this during insert/remove? // TODO: do this during insert/remove?
pub(self) async fn highest_block( pub(self) async fn highest_block(
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
web3_connections: &Web3Connections, web3_rpcs: &Web3Rpcs,
) -> Option<ArcBlock> { ) -> Option<ArcBlock> {
let mut checked_heads = HashSet::with_capacity(self.rpc_name_to_hash.len()); let mut checked_heads = HashSet::with_capacity(self.rpc_name_to_hash.len());
let mut highest_block = None::<ArcBlock>; let mut highest_block = None::<ArcBlock>;
@ -596,7 +597,7 @@ impl ConnectionsGroup {
} }
let rpc_block = match self let rpc_block = match self
.get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_connections) .get_block_from_rpc(rpc_name, rpc_head_hash, authorization, web3_rpcs)
.await .await
{ {
Ok(x) => x, Ok(x) => x,
@ -631,9 +632,9 @@ impl ConnectionsGroup {
pub(self) async fn consensus_head_connections( pub(self) async fn consensus_head_connections(
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
web3_connections: &Web3Connections, web3_rpcs: &Web3Rpcs,
) -> anyhow::Result<ConsensusConnections> { ) -> anyhow::Result<ConsensusWeb3Rpcs> {
let mut maybe_head_block = match self.highest_block(authorization, web3_connections).await { let mut maybe_head_block = match self.highest_block(authorization, web3_rpcs).await {
None => return Err(anyhow::anyhow!("No blocks known")), None => return Err(anyhow::anyhow!("No blocks known")),
Some(x) => x, Some(x) => x,
}; };
@ -667,7 +668,7 @@ impl ConnectionsGroup {
continue; continue;
} }
if let Some(rpc) = web3_connections.conns.get(rpc_name.as_str()) { if let Some(rpc) = web3_rpcs.conns.get(rpc_name.as_str()) {
highest_rpcs.insert(rpc_name); highest_rpcs.insert(rpc_name);
highest_rpcs_sum_soft_limit += rpc.soft_limit; highest_rpcs_sum_soft_limit += rpc.soft_limit;
} else { } else {
@ -676,18 +677,15 @@ impl ConnectionsGroup {
} }
} }
if highest_rpcs_sum_soft_limit >= web3_connections.min_sum_soft_limit if highest_rpcs_sum_soft_limit >= web3_rpcs.min_sum_soft_limit
&& highest_rpcs.len() >= web3_connections.min_head_rpcs && highest_rpcs.len() >= web3_rpcs.min_head_rpcs
{ {
// we have enough servers with enough requests // we have enough servers with enough requests
break; break;
} }
// not enough rpcs yet. check the parent block // not enough rpcs yet. check the parent block
if let Some(parent_block) = web3_connections if let Some(parent_block) = web3_rpcs.block_hashes.get(&maybe_head_block.parent_hash) {
.block_hashes
.get(&maybe_head_block.parent_hash)
{
// trace!( // trace!(
// child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd", // child=%maybe_head_hash, parent=%parent_block.hash.unwrap(), "avoiding thundering herd",
// ); // );
@ -695,25 +693,25 @@ impl ConnectionsGroup {
maybe_head_block = parent_block; maybe_head_block = parent_block;
continue; continue;
} else { } else {
if num_known < web3_connections.min_head_rpcs { if num_known < web3_rpcs.min_head_rpcs {
return Err(anyhow::anyhow!( return Err(anyhow::anyhow!(
"not enough rpcs connected: {}/{}/{}", "not enough rpcs connected: {}/{}/{}",
highest_rpcs.len(), highest_rpcs.len(),
num_known, num_known,
web3_connections.min_head_rpcs, web3_rpcs.min_head_rpcs,
)); ));
} else { } else {
let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32 let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32
/ web3_connections.min_sum_soft_limit as f32) / web3_rpcs.min_sum_soft_limit as f32)
* 100.0; * 100.0;
return Err(anyhow::anyhow!( return Err(anyhow::anyhow!(
"ran out of parents to check. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})", "ran out of parents to check. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})",
highest_rpcs.len(), highest_rpcs.len(),
num_known, num_known,
web3_connections.min_head_rpcs, web3_rpcs.min_head_rpcs,
highest_rpcs_sum_soft_limit, highest_rpcs_sum_soft_limit,
web3_connections.min_sum_soft_limit, web3_rpcs.min_sum_soft_limit,
soft_limit_percent, soft_limit_percent,
)); ));
} }
@ -723,29 +721,28 @@ impl ConnectionsGroup {
// TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block. will need to change the return Err above into breaks. // TODO: if consensus_head_rpcs.is_empty, try another method of finding the head block. will need to change the return Err above into breaks.
// we've done all the searching for the heaviest block that we can // we've done all the searching for the heaviest block that we can
if highest_rpcs.len() < web3_connections.min_head_rpcs if highest_rpcs.len() < web3_rpcs.min_head_rpcs
|| highest_rpcs_sum_soft_limit < web3_connections.min_sum_soft_limit || highest_rpcs_sum_soft_limit < web3_rpcs.min_sum_soft_limit
{ {
// if we get here, not enough servers are synced. return an error // if we get here, not enough servers are synced. return an error
let soft_limit_percent = (highest_rpcs_sum_soft_limit as f32 let soft_limit_percent =
/ web3_connections.min_sum_soft_limit as f32) (highest_rpcs_sum_soft_limit as f32 / web3_rpcs.min_sum_soft_limit as f32) * 100.0;
* 100.0;
return Err(anyhow::anyhow!( return Err(anyhow::anyhow!(
"Not enough resources. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})", "Not enough resources. rpcs {}/{}/{}. soft limit: {:.2}% ({}/{})",
highest_rpcs.len(), highest_rpcs.len(),
num_known, num_known,
web3_connections.min_head_rpcs, web3_rpcs.min_head_rpcs,
highest_rpcs_sum_soft_limit, highest_rpcs_sum_soft_limit,
web3_connections.min_sum_soft_limit, web3_rpcs.min_sum_soft_limit,
soft_limit_percent, soft_limit_percent,
)); ));
} }
// success! this block has enough soft limit and nodes on it (or on later blocks) // success! this block has enough soft limit and nodes on it (or on later blocks)
let conns: Vec<Arc<Web3Connection>> = highest_rpcs let conns: Vec<Arc<Web3Rpc>> = highest_rpcs
.into_iter() .into_iter()
.filter_map(|conn_name| web3_connections.conns.get(conn_name).cloned()) .filter_map(|conn_name| web3_rpcs.conns.get(conn_name).cloned())
.collect(); .collect();
// TODO: DEBUG only check // TODO: DEBUG only check
@ -758,7 +755,7 @@ impl ConnectionsGroup {
let consensus_head_block: SavedBlock = maybe_head_block.into(); let consensus_head_block: SavedBlock = maybe_head_block.into();
Ok(ConsensusConnections { Ok(ConsensusWeb3Rpcs {
head_block: Some(consensus_head_block), head_block: Some(consensus_head_block),
conns, conns,
num_checked_conns: self.rpc_name_to_hash.len(), num_checked_conns: self.rpc_name_to_hash.len(),
@ -785,7 +782,7 @@ impl Default for ConsensusFinder {
} }
impl ConsensusFinder { impl ConsensusFinder {
fn remove(&mut self, rpc: &Web3Connection) -> Option<H256> { fn remove(&mut self, rpc: &Web3Rpc) -> Option<H256> {
// TODO: should we have multiple backup tiers? (remote datacenters vs third party) // TODO: should we have multiple backup tiers? (remote datacenters vs third party)
if !rpc.backup { if !rpc.backup {
self.main.remove(rpc); self.main.remove(rpc);
@ -793,7 +790,7 @@ impl ConsensusFinder {
self.all.remove(rpc) self.all.remove(rpc)
} }
fn insert(&mut self, rpc: &Web3Connection, new_hash: H256) -> Option<H256> { fn insert(&mut self, rpc: &Web3Rpc, new_hash: H256) -> Option<H256> {
// TODO: should we have multiple backup tiers? (remote datacenters vs third party) // TODO: should we have multiple backup tiers? (remote datacenters vs third party)
if !rpc.backup { if !rpc.backup {
self.main.insert(rpc, new_hash); self.main.insert(rpc, new_hash);
@ -805,9 +802,9 @@ impl ConsensusFinder {
async fn update_rpc( async fn update_rpc(
&mut self, &mut self,
rpc_head_block: Option<SavedBlock>, rpc_head_block: Option<SavedBlock>,
rpc: Arc<Web3Connection>, rpc: Arc<Web3Rpc>,
// we need this so we can save the block to caches. i don't like it though. maybe we should use a lazy_static Cache wrapper that has a "save_block" method?. i generally dislike globals but i also dislike all the types having to pass eachother around // we need this so we can save the block to caches. i don't like it though. maybe we should use a lazy_static Cache wrapper that has a "save_block" method?. i generally dislike globals but i also dislike all the types having to pass eachother around
web3_connections: &Web3Connections, web3_connections: &Web3Rpcs,
) -> anyhow::Result<bool> { ) -> anyhow::Result<bool> {
// add the rpc's block to connection_heads, or remove the rpc from connection_heads // add the rpc's block to connection_heads, or remove the rpc from connection_heads
let changed = match rpc_head_block { let changed = match rpc_head_block {
@ -852,15 +849,15 @@ impl ConsensusFinder {
async fn best_consensus_connections( async fn best_consensus_connections(
&mut self, &mut self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
web3_connections: &Web3Connections, web3_connections: &Web3Rpcs,
) -> ConsensusConnections { ) -> ConsensusWeb3Rpcs {
let highest_block_num = match self let highest_block_num = match self
.all .all
.highest_block(authorization, web3_connections) .highest_block(authorization, web3_connections)
.await .await
{ {
None => { None => {
return ConsensusConnections::default(); return ConsensusWeb3Rpcs::default();
} }
Some(x) => x.number.expect("blocks here should always have a number"), Some(x) => x.number.expect("blocks here should always have a number"),
}; };
@ -901,7 +898,7 @@ impl ConsensusFinder {
if self.all.rpc_name_to_hash.len() < web3_connections.min_head_rpcs { if self.all.rpc_name_to_hash.len() < web3_connections.min_head_rpcs {
debug!("No consensus head yet: {}", err); debug!("No consensus head yet: {}", err);
} }
return ConsensusConnections::default(); return ConsensusWeb3Rpcs::default();
} }
Ok(x) => x, Ok(x) => x,
}; };
@ -924,7 +921,7 @@ impl ConsensusFinder {
} else { } else {
// TODO: i don't think we need this error. and i doublt we'll ever even get here // TODO: i don't think we need this error. and i doublt we'll ever even get here
error!("NO CONSENSUS HEAD!"); error!("NO CONSENSUS HEAD!");
ConsensusConnections::default() ConsensusWeb3Rpcs::default()
} }
} }
} }

View File

@ -1,12 +1,10 @@
///! Load balanced communication with a group of web3 providers ///! Load balanced communication with a group of web3 rpc providers
use super::blockchain::{ArcBlock, BlockHashesCache}; use super::blockchain::{ArcBlock, BlockHashesCache};
use super::connection::Web3Connection; use super::one::Web3Rpc;
use super::request::{ use super::request::{OpenRequestHandle, OpenRequestResult, RequestRevertHandler};
OpenRequestHandle, OpenRequestResult, RequestRevertHandler, use super::synced_connections::ConsensusWeb3Rpcs;
};
use super::synced_connections::ConsensusConnections;
use crate::app::{flatten_handle, AnyhowJoinHandle}; use crate::app::{flatten_handle, AnyhowJoinHandle};
use crate::config::{BlockAndRpc, TxHashAndRpc, Web3ConnectionConfig}; use crate::config::{BlockAndRpc, TxHashAndRpc, Web3RpcConfig};
use crate::frontend::authorization::{Authorization, RequestMetadata}; use crate::frontend::authorization::{Authorization, RequestMetadata};
use crate::frontend::rpc_proxy_ws::ProxyMode; use crate::frontend::rpc_proxy_ws::ProxyMode;
use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest}; use crate::jsonrpc::{JsonRpcForwardedResponse, JsonRpcRequest};
@ -36,11 +34,11 @@ use tokio::time::{interval, sleep, sleep_until, Duration, Instant, MissedTickBeh
/// A collection of web3 connections. Sends requests either the current best server or all servers. /// A collection of web3 connections. Sends requests either the current best server or all servers.
#[derive(From)] #[derive(From)]
pub struct Web3Connections { pub struct Web3Rpcs {
/// any requests will be forwarded to one (or more) of these connections /// any requests will be forwarded to one (or more) of these connections
pub(crate) conns: HashMap<String, Arc<Web3Connection>>, pub(crate) conns: HashMap<String, Arc<Web3Rpc>>,
/// all providers with the same consensus head block. won't update if there is no `self.watch_consensus_head_sender` /// all providers with the same consensus head block. won't update if there is no `self.watch_consensus_head_sender`
pub(super) watch_consensus_connections_sender: watch::Sender<Arc<ConsensusConnections>>, pub(super) watch_consensus_connections_sender: watch::Sender<Arc<ConsensusWeb3Rpcs>>,
/// this head receiver makes it easy to wait until there is a new block /// this head receiver makes it easy to wait until there is a new block
pub(super) watch_consensus_head_receiver: Option<watch::Receiver<ArcBlock>>, pub(super) watch_consensus_head_receiver: Option<watch::Receiver<ArcBlock>>,
pub(super) pending_transactions: pub(super) pending_transactions:
@ -54,13 +52,13 @@ pub struct Web3Connections {
pub(super) min_sum_soft_limit: u32, pub(super) min_sum_soft_limit: u32,
} }
impl Web3Connections { impl Web3Rpcs {
/// Spawn durable connections to multiple Web3 providers. /// Spawn durable connections to multiple Web3 providers.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub async fn spawn( pub async fn spawn(
chain_id: u64, chain_id: u64,
db_conn: Option<DatabaseConnection>, db_conn: Option<DatabaseConnection>,
server_configs: HashMap<String, Web3ConnectionConfig>, server_configs: HashMap<String, Web3RpcConfig>,
http_client: Option<reqwest::Client>, http_client: Option<reqwest::Client>,
redis_pool: Option<redis_rate_limiter::RedisPool>, redis_pool: Option<redis_rate_limiter::RedisPool>,
block_map: BlockHashesCache, block_map: BlockHashesCache,
@ -242,13 +240,13 @@ impl Web3Connections {
Ok((connections, handle)) Ok((connections, handle))
} }
pub fn get(&self, conn_name: &str) -> Option<&Arc<Web3Connection>> { pub fn get(&self, conn_name: &str) -> Option<&Arc<Web3Rpc>> {
self.conns.get(conn_name) self.conns.get(conn_name)
} }
/// subscribe to blocks and transactions from all the backend rpcs. /// subscribe to blocks and transactions from all the backend rpcs.
/// blocks are processed by all the `Web3Connection`s and then sent to the `block_receiver` /// blocks are processed by all the `Web3Rpc`s and then sent to the `block_receiver`
/// transaction ids from all the `Web3Connection`s are deduplicated and forwarded to `pending_tx_sender` /// transaction ids from all the `Web3Rpc`s are deduplicated and forwarded to `pending_tx_sender`
async fn subscribe( async fn subscribe(
self: Arc<Self>, self: Arc<Self>,
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
@ -412,7 +410,7 @@ impl Web3Connections {
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
request_metadata: Option<&Arc<RequestMetadata>>, request_metadata: Option<&Arc<RequestMetadata>>,
skip: &[Arc<Web3Connection>], skip: &[Arc<Web3Rpc>],
min_block_needed: Option<&U64>, min_block_needed: Option<&U64>,
) -> anyhow::Result<OpenRequestResult> { ) -> anyhow::Result<OpenRequestResult> {
if let Ok(without_backups) = self if let Ok(without_backups) = self
@ -447,13 +445,10 @@ impl Web3Connections {
allow_backups: bool, allow_backups: bool,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
request_metadata: Option<&Arc<RequestMetadata>>, request_metadata: Option<&Arc<RequestMetadata>>,
skip: &[Arc<Web3Connection>], skip: &[Arc<Web3Rpc>],
min_block_needed: Option<&U64>, min_block_needed: Option<&U64>,
) -> anyhow::Result<OpenRequestResult> { ) -> anyhow::Result<OpenRequestResult> {
let usable_rpcs_by_head_num_and_weight: BTreeMap< let usable_rpcs_by_head_num_and_weight: BTreeMap<(Option<U64>, u64), Vec<Arc<Web3Rpc>>> = {
(Option<U64>, u64),
Vec<Arc<Web3Connection>>,
> = {
let synced_connections = self.watch_consensus_connections_sender.borrow().clone(); let synced_connections = self.watch_consensus_connections_sender.borrow().clone();
let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() { let head_block_num = if let Some(head_block) = synced_connections.head_block.as_ref() {
@ -1113,23 +1108,23 @@ impl Web3Connections {
} }
} }
impl fmt::Debug for Web3Connections { impl fmt::Debug for Web3Rpcs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// TODO: the default formatter takes forever to write. this is too quiet though // TODO: the default formatter takes forever to write. this is too quiet though
f.debug_struct("Web3Connections") f.debug_struct("Web3Rpcs")
.field("conns", &self.conns) .field("conns", &self.conns)
.finish_non_exhaustive() .finish_non_exhaustive()
} }
} }
impl Serialize for Web3Connections { impl Serialize for Web3Rpcs {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where
S: Serializer, S: Serializer,
{ {
let mut state = serializer.serialize_struct("Web3Connections", 6)?; let mut state = serializer.serialize_struct("Web3Rpcs", 6)?;
let conns: Vec<&Web3Connection> = self.conns.values().map(|x| x.as_ref()).collect(); let conns: Vec<&Web3Rpc> = self.conns.values().map(|x| x.as_ref()).collect();
state.serialize_field("conns", &conns)?; state.serialize_field("conns", &conns)?;
{ {
@ -1149,7 +1144,7 @@ impl Serialize for Web3Connections {
} }
/// sort by block number (descending) and tier (ascending) /// sort by block number (descending) and tier (ascending)
fn sort_connections_by_sync_status(rpcs: &mut Vec<Arc<Web3Connection>>) { fn sort_connections_by_sync_status(rpcs: &mut Vec<Arc<Web3Rpc>>) {
rpcs.sort_by_cached_key(|x| { rpcs.sort_by_cached_key(|x| {
let reversed_head_block = u64::MAX let reversed_head_block = u64::MAX
- x.head_block - x.head_block
@ -1170,7 +1165,7 @@ mod tests {
use super::*; use super::*;
use crate::rpcs::{ use crate::rpcs::{
blockchain::{ConsensusFinder, SavedBlock}, blockchain::{ConsensusFinder, SavedBlock},
connection::ProviderState, one::ProviderState,
provider::Web3Provider, provider::Web3Provider,
}; };
use ethers::types::{Block, U256}; use ethers::types::{Block, U256};
@ -1205,37 +1200,37 @@ mod tests {
.collect(); .collect();
let mut rpcs = [ let mut rpcs = [
Web3Connection { Web3Rpc {
name: "a".to_string(), name: "a".to_string(),
tier: 0, tier: 0,
head_block: RwLock::new(None), head_block: RwLock::new(None),
..Default::default() ..Default::default()
}, },
Web3Connection { Web3Rpc {
name: "b".to_string(), name: "b".to_string(),
tier: 0, tier: 0,
head_block: RwLock::new(blocks.get(1).cloned()), head_block: RwLock::new(blocks.get(1).cloned()),
..Default::default() ..Default::default()
}, },
Web3Connection { Web3Rpc {
name: "c".to_string(), name: "c".to_string(),
tier: 0, tier: 0,
head_block: RwLock::new(blocks.get(2).cloned()), head_block: RwLock::new(blocks.get(2).cloned()),
..Default::default() ..Default::default()
}, },
Web3Connection { Web3Rpc {
name: "d".to_string(), name: "d".to_string(),
tier: 1, tier: 1,
head_block: RwLock::new(None), head_block: RwLock::new(None),
..Default::default() ..Default::default()
}, },
Web3Connection { Web3Rpc {
name: "e".to_string(), name: "e".to_string(),
tier: 1, tier: 1,
head_block: RwLock::new(blocks.get(1).cloned()), head_block: RwLock::new(blocks.get(1).cloned()),
..Default::default() ..Default::default()
}, },
Web3Connection { Web3Rpc {
name: "f".to_string(), name: "f".to_string(),
tier: 1, tier: 1,
head_block: RwLock::new(blocks.get(2).cloned()), head_block: RwLock::new(blocks.get(2).cloned()),
@ -1292,48 +1287,32 @@ mod tests {
let block_data_limit = u64::MAX; let block_data_limit = u64::MAX;
let head_rpc = Web3Connection { let head_rpc = Web3Rpc {
name: "synced".to_string(), name: "synced".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com/synced".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
Web3Provider::Mock, Web3Provider::Mock,
))), ))),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000, soft_limit: 1_000,
automatic_block_limit: true, automatic_block_limit: false,
backup: false, backup: false,
block_data_limit: block_data_limit.into(), block_data_limit: block_data_limit.into(),
tier: 0, tier: 0,
head_block: RwLock::new(Some(head_block.clone())), head_block: RwLock::new(Some(head_block.clone())),
..Default::default()
}; };
let lagged_rpc = Web3Connection { let lagged_rpc = Web3Rpc {
name: "lagged".to_string(), name: "lagged".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com/lagged".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
Web3Provider::Mock, Web3Provider::Mock,
))), ))),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000, soft_limit: 1_000,
automatic_block_limit: false, automatic_block_limit: false,
backup: false, backup: false,
block_data_limit: block_data_limit.into(), block_data_limit: block_data_limit.into(),
tier: 0, tier: 0,
head_block: RwLock::new(Some(lagged_block.clone())), head_block: RwLock::new(Some(lagged_block.clone())),
..Default::default()
}; };
assert!(head_rpc.has_block_data(&lagged_block.number())); assert!(head_rpc.has_block_data(&lagged_block.number()));
@ -1352,8 +1331,8 @@ mod tests {
let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); let (watch_consensus_connections_sender, _) = watch::channel(Default::default());
// TODO: make a Web3Connections::new // TODO: make a Web3Rpcs::new
let conns = Web3Connections { let conns = Web3Rpcs {
conns, conns,
watch_consensus_head_receiver: None, watch_consensus_head_receiver: None,
watch_consensus_connections_sender, watch_consensus_connections_sender,
@ -1523,48 +1502,32 @@ mod tests {
let head_block: SavedBlock = Arc::new(head_block).into(); let head_block: SavedBlock = Arc::new(head_block).into();
let pruned_rpc = Web3Connection { let pruned_rpc = Web3Rpc {
name: "pruned".to_string(), name: "pruned".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com/pruned".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
Web3Provider::Mock, Web3Provider::Mock,
))), ))),
hard_limit: None,
hard_limit_until: None,
soft_limit: 3_000, soft_limit: 3_000,
automatic_block_limit: false, automatic_block_limit: false,
backup: false, backup: false,
block_data_limit: 64.into(), block_data_limit: 64.into(),
tier: 1, tier: 1,
head_block: RwLock::new(Some(head_block.clone())), head_block: RwLock::new(Some(head_block.clone())),
..Default::default()
}; };
let archive_rpc = Web3Connection { let archive_rpc = Web3Rpc {
name: "archive".to_string(), name: "archive".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com/archive".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new( provider_state: AsyncRwLock::new(ProviderState::Connected(Arc::new(
Web3Provider::Mock, Web3Provider::Mock,
))), ))),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000, soft_limit: 1_000,
automatic_block_limit: false, automatic_block_limit: false,
backup: false, backup: false,
block_data_limit: u64::MAX.into(), block_data_limit: u64::MAX.into(),
tier: 2, tier: 2,
head_block: RwLock::new(Some(head_block.clone())), head_block: RwLock::new(Some(head_block.clone())),
..Default::default()
}; };
assert!(pruned_rpc.has_block_data(&head_block.number())); assert!(pruned_rpc.has_block_data(&head_block.number()));
@ -1582,8 +1545,8 @@ mod tests {
let (watch_consensus_connections_sender, _) = watch::channel(Default::default()); let (watch_consensus_connections_sender, _) = watch::channel(Default::default());
// TODO: make a Web3Connections::new // TODO: make a Web3Rpcs::new
let conns = Web3Connections { let conns = Web3Rpcs {
conns, conns,
watch_consensus_head_receiver: None, watch_consensus_head_receiver: None,
watch_consensus_connections_sender, watch_consensus_connections_sender,

View File

@ -1,7 +1,7 @@
// TODO: all pub, or export useful things here instead? // TODO: all pub, or export useful things here instead?
pub mod blockchain; pub mod blockchain;
pub mod connection; pub mod many;
pub mod connections; pub mod one;
pub mod provider; pub mod provider;
pub mod request; pub mod request;
pub mod synced_connections; pub mod synced_connections;

View File

@ -10,6 +10,7 @@ use ethers::prelude::{Bytes, Middleware, ProviderError, TxHash, H256, U64};
use ethers::types::U256; use ethers::types::U256;
use futures::future::try_join_all; use futures::future::try_join_all;
use futures::StreamExt; use futures::StreamExt;
use hdrhistogram::Histogram;
use log::{debug, error, info, trace, warn, Level}; use log::{debug, error, info, trace, warn, Level};
use migration::sea_orm::DatabaseConnection; use migration::sea_orm::DatabaseConnection;
use parking_lot::RwLock; use parking_lot::RwLock;
@ -64,9 +65,31 @@ impl ProviderState {
} }
} }
pub struct Web3RpcLatencies {
/// Traack how far behind the fastest node we are
new_head: Histogram<u64>,
/// exponentially weighted moving average of how far behind the fastest node we are
new_head_ewma: u32,
/// Track how long an rpc call takes on average
request: Histogram<u64>,
/// exponentially weighted moving average of how far behind the fastest node we are
request_ewma: u32,
}
impl Default for Web3RpcLatencies {
fn default() -> Self {
Self {
new_head: Histogram::new(3).unwrap(),
new_head_ewma: 0,
request: Histogram::new(3).unwrap(),
request_ewma: 0,
}
}
}
/// An active connection to a Web3 RPC server like geth or erigon. /// An active connection to a Web3 RPC server like geth or erigon.
#[derive(Default)] #[derive(Default)]
pub struct Web3Connection { pub struct Web3Rpc {
pub name: String, pub name: String,
pub display_name: Option<String>, pub display_name: Option<String>,
pub db_conn: Option<DatabaseConnection>, pub db_conn: Option<DatabaseConnection>,
@ -100,9 +123,11 @@ pub struct Web3Connection {
pub(super) tier: u64, pub(super) tier: u64,
/// TODO: change this to a watch channel so that http providers can subscribe and take action on change /// TODO: change this to a watch channel so that http providers can subscribe and take action on change
pub(super) head_block: RwLock<Option<SavedBlock>>, pub(super) head_block: RwLock<Option<SavedBlock>>,
/// Track how fast this RPC is
pub(super) latency: Web3RpcLatencies,
} }
impl Web3Connection { impl Web3Rpc {
/// Connect to a web3 rpc /// Connect to a web3 rpc
// TODO: have this take a builder (which will have channels attached). or maybe just take the config and give the config public fields // TODO: have this take a builder (which will have channels attached). or maybe just take the config and give the config public fields
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
@ -126,7 +151,7 @@ impl Web3Connection {
tx_id_sender: Option<flume::Sender<(TxHash, Arc<Self>)>>, tx_id_sender: Option<flume::Sender<(TxHash, Arc<Self>)>>,
reconnect: bool, reconnect: bool,
tier: u64, tier: u64,
) -> anyhow::Result<(Arc<Web3Connection>, AnyhowJoinHandle<()>)> { ) -> anyhow::Result<(Arc<Web3Rpc>, AnyhowJoinHandle<()>)> {
let hard_limit = hard_limit.map(|(hard_rate_limit, redis_pool)| { let hard_limit = hard_limit.map(|(hard_rate_limit, redis_pool)| {
// TODO: is cache size 1 okay? i think we need // TODO: is cache size 1 okay? i think we need
RedisRateLimiter::new( RedisRateLimiter::new(
@ -159,18 +184,14 @@ impl Web3Connection {
display_name, display_name,
http_client, http_client,
url: url_str, url: url_str,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::None),
hard_limit, hard_limit,
hard_limit_until, hard_limit_until,
soft_limit, soft_limit,
automatic_block_limit, automatic_block_limit,
backup, backup,
block_data_limit, block_data_limit,
head_block: RwLock::new(Default::default()),
tier, tier,
..Default::default()
}; };
let new_connection = Arc::new(new_connection); let new_connection = Arc::new(new_connection);
@ -1068,40 +1089,40 @@ impl fmt::Debug for Web3Provider {
} }
} }
impl Hash for Web3Connection { impl Hash for Web3Rpc {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
// TODO: is this enough? // TODO: is this enough?
self.name.hash(state); self.name.hash(state);
} }
} }
impl Eq for Web3Connection {} impl Eq for Web3Rpc {}
impl Ord for Web3Connection { impl Ord for Web3Rpc {
fn cmp(&self, other: &Self) -> std::cmp::Ordering { fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.name.cmp(&other.name) self.name.cmp(&other.name)
} }
} }
impl PartialOrd for Web3Connection { impl PartialOrd for Web3Rpc {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
impl PartialEq for Web3Connection { impl PartialEq for Web3Rpc {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.name == other.name self.name == other.name
} }
} }
impl Serialize for Web3Connection { impl Serialize for Web3Rpc {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where
S: Serializer, S: Serializer,
{ {
// 3 is the number of fields in the struct. // 3 is the number of fields in the struct.
let mut state = serializer.serialize_struct("Web3Connection", 9)?; let mut state = serializer.serialize_struct("Web3Rpc", 9)?;
// the url is excluded because it likely includes private information. just show the name that we use in keys // the url is excluded because it likely includes private information. just show the name that we use in keys
state.serialize_field("name", &self.name)?; state.serialize_field("name", &self.name)?;
@ -1143,9 +1164,9 @@ impl Serialize for Web3Connection {
} }
} }
impl fmt::Debug for Web3Connection { impl fmt::Debug for Web3Rpc {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut f = f.debug_struct("Web3Connection"); let mut f = f.debug_struct("Web3Rpc");
f.field("name", &self.name); f.field("name", &self.name);
@ -1160,7 +1181,7 @@ impl fmt::Debug for Web3Connection {
} }
} }
impl fmt::Display for Web3Connection { impl fmt::Display for Web3Rpc {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// TODO: filter basic auth and api keys // TODO: filter basic auth and api keys
write!(f, "{}", &self.name) write!(f, "{}", &self.name)
@ -1193,24 +1214,16 @@ mod tests {
let head_block = SavedBlock::new(random_block); let head_block = SavedBlock::new(random_block);
let block_data_limit = u64::MAX; let block_data_limit = u64::MAX;
let x = Web3Connection { let x = Web3Rpc {
name: "name".to_string(), name: "name".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com".to_string(), url: "ws://example.com".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::None),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000, soft_limit: 1_000,
automatic_block_limit: false, automatic_block_limit: false,
backup: false, backup: false,
block_data_limit: block_data_limit.into(), block_data_limit: block_data_limit.into(),
tier: 0, tier: 0,
head_block: RwLock::new(Some(head_block.clone())), head_block: RwLock::new(Some(head_block.clone())),
..Default::default()
}; };
assert!(x.has_block_data(&0.into())); assert!(x.has_block_data(&0.into()));
@ -1239,24 +1252,15 @@ mod tests {
let block_data_limit = 64; let block_data_limit = 64;
// TODO: this is getting long. have a `impl Default` // TODO: this is getting long. have a `impl Default`
let x = Web3Connection { let x = Web3Rpc {
name: "name".to_string(), name: "name".to_string(),
db_conn: None,
display_name: None,
url: "ws://example.com".to_string(),
http_client: None,
active_requests: 0.into(),
frontend_requests: 0.into(),
internal_requests: 0.into(),
provider_state: AsyncRwLock::new(ProviderState::None),
hard_limit: None,
hard_limit_until: None,
soft_limit: 1_000, soft_limit: 1_000,
automatic_block_limit: false, automatic_block_limit: false,
backup: false, backup: false,
block_data_limit: block_data_limit.into(), block_data_limit: block_data_limit.into(),
tier: 0, tier: 0,
head_block: RwLock::new(Some(head_block.clone())), head_block: RwLock::new(Some(head_block.clone())),
..Default::default()
}; };
assert!(!x.has_block_data(&0.into())); assert!(!x.has_block_data(&0.into()));
@ -1293,7 +1297,7 @@ mod tests {
let metrics = OpenRequestHandleMetrics::default(); let metrics = OpenRequestHandleMetrics::default();
let x = Web3Connection { let x = Web3Rpc {
name: "name".to_string(), name: "name".to_string(),
db_conn: None, db_conn: None,
display_name: None, display_name: None,

View File

@ -1,4 +1,4 @@
use super::connection::Web3Connection; use super::one::Web3Rpc;
use super::provider::Web3Provider; use super::provider::Web3Provider;
use crate::frontend::authorization::{Authorization, AuthorizationType}; use crate::frontend::authorization::{Authorization, AuthorizationType};
use anyhow::Context; use anyhow::Context;
@ -30,7 +30,7 @@ pub enum OpenRequestResult {
#[derive(Debug)] #[derive(Debug)]
pub struct OpenRequestHandle { pub struct OpenRequestHandle {
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
conn: Arc<Web3Connection>, conn: Arc<Web3Rpc>,
provider: Arc<Web3Provider>, provider: Arc<Web3Provider>,
} }
@ -122,7 +122,7 @@ impl Authorization {
} }
impl OpenRequestHandle { impl OpenRequestHandle {
pub async fn new(authorization: Arc<Authorization>, conn: Arc<Web3Connection>) -> Self { pub async fn new(authorization: Arc<Authorization>, conn: Arc<Web3Rpc>) -> Self {
// TODO: take request_id as an argument? // TODO: take request_id as an argument?
// TODO: attach a unique id to this? customer requests have one, but not internal queries // TODO: attach a unique id to this? customer requests have one, but not internal queries
// TODO: what ordering?! // TODO: what ordering?!
@ -185,7 +185,7 @@ impl OpenRequestHandle {
} }
#[inline] #[inline]
pub fn clone_connection(&self) -> Arc<Web3Connection> { pub fn clone_connection(&self) -> Arc<Web3Rpc> {
self.conn.clone() self.conn.clone()
} }

View File

@ -1,25 +1,25 @@
use super::blockchain::{ArcBlock, SavedBlock}; use super::blockchain::{ArcBlock, SavedBlock};
use super::connection::Web3Connection; use super::many::Web3Rpcs;
use super::connections::Web3Connections; use super::one::Web3Rpc;
use ethers::prelude::{H256, U64}; use ethers::prelude::{H256, U64};
use serde::Serialize; use serde::Serialize;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
/// A collection of Web3Connections that are on the same block. /// A collection of Web3Rpcs that are on the same block.
/// Serialize is so we can print it on our debug endpoint /// Serialize is so we can print it on our debug endpoint
#[derive(Clone, Default, Serialize)] #[derive(Clone, Default, Serialize)]
pub struct ConsensusConnections { pub struct ConsensusWeb3Rpcs {
// TODO: store ArcBlock instead? // TODO: store ArcBlock instead?
pub(super) head_block: Option<SavedBlock>, pub(super) head_block: Option<SavedBlock>,
// TODO: this should be able to serialize, but it isn't // TODO: this should be able to serialize, but it isn't
#[serde(skip_serializing)] #[serde(skip_serializing)]
pub(super) conns: Vec<Arc<Web3Connection>>, pub(super) conns: Vec<Arc<Web3Rpc>>,
pub(super) num_checked_conns: usize, pub(super) num_checked_conns: usize,
pub(super) includes_backups: bool, pub(super) includes_backups: bool,
} }
impl ConsensusConnections { impl ConsensusWeb3Rpcs {
pub fn num_conns(&self) -> usize { pub fn num_conns(&self) -> usize {
self.conns.len() self.conns.len()
} }
@ -31,7 +31,7 @@ impl ConsensusConnections {
// TODO: sum_hard_limit? // TODO: sum_hard_limit?
} }
impl fmt::Debug for ConsensusConnections { impl fmt::Debug for ConsensusWeb3Rpcs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// TODO: the default formatter takes forever to write. this is too quiet though // TODO: the default formatter takes forever to write. this is too quiet though
// TODO: print the actual conns? // TODO: print the actual conns?
@ -42,7 +42,7 @@ impl fmt::Debug for ConsensusConnections {
} }
} }
impl Web3Connections { impl Web3Rpcs {
pub fn head_block(&self) -> Option<ArcBlock> { pub fn head_block(&self) -> Option<ArcBlock> {
self.watch_consensus_head_receiver self.watch_consensus_head_receiver
.as_ref() .as_ref()

View File

@ -1,8 +1,8 @@
use crate::frontend::authorization::Authorization; use crate::frontend::authorization::Authorization;
use super::many::Web3Rpcs;
///! Load balanced communication with a group of web3 providers ///! Load balanced communication with a group of web3 providers
use super::connection::Web3Connection; use super::one::Web3Rpc;
use super::connections::Web3Connections;
use super::request::OpenRequestResult; use super::request::OpenRequestResult;
use ethers::prelude::{ProviderError, Transaction, TxHash}; use ethers::prelude::{ProviderError, Transaction, TxHash};
use log::{debug, trace, Level}; use log::{debug, trace, Level};
@ -17,11 +17,11 @@ pub enum TxStatus {
Orphaned(Transaction), Orphaned(Transaction),
} }
impl Web3Connections { impl Web3Rpcs {
async fn query_transaction_status( async fn query_transaction_status(
&self, &self,
authorization: &Arc<Authorization>, authorization: &Arc<Authorization>,
rpc: Arc<Web3Connection>, rpc: Arc<Web3Rpc>,
pending_tx_id: TxHash, pending_tx_id: TxHash,
) -> Result<Option<TxStatus>, ProviderError> { ) -> Result<Option<TxStatus>, ProviderError> {
// TODO: there is a race here on geth. sometimes the rpc isn't yet ready to serve the transaction (even though they told us about it!) // TODO: there is a race here on geth. sometimes the rpc isn't yet ready to serve the transaction (even though they told us about it!)
@ -66,7 +66,7 @@ impl Web3Connections {
pub(super) async fn process_incoming_tx_id( pub(super) async fn process_incoming_tx_id(
self: Arc<Self>, self: Arc<Self>,
authorization: Arc<Authorization>, authorization: Arc<Authorization>,
rpc: Arc<Web3Connection>, rpc: Arc<Web3Rpc>,
pending_tx_id: TxHash, pending_tx_id: TxHash,
pending_tx_sender: broadcast::Sender<TxStatus>, pending_tx_sender: broadcast::Sender<TxStatus>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {